id
int64 0
755k
| file_name
stringlengths 3
109
| file_path
stringlengths 13
185
| content
stringlengths 31
9.38M
| size
int64 31
9.38M
| language
stringclasses 1
value | extension
stringclasses 11
values | total_lines
int64 1
340k
| avg_line_length
float64 2.18
149k
| max_line_length
int64 7
2.22M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 6
65
| repo_stars
int64 100
47.3k
| repo_forks
int64 0
12k
| repo_open_issues
int64 0
3.4k
| repo_license
stringclasses 9
values | repo_extraction_date
stringclasses 92
values | exact_duplicates_redpajama
bool 2
classes | near_duplicates_redpajama
bool 2
classes | exact_duplicates_githubcode
bool 2
classes | exact_duplicates_stackv2
bool 1
class | exact_duplicates_stackv1
bool 2
classes | near_duplicates_githubcode
bool 2
classes | near_duplicates_stackv1
bool 2
classes | near_duplicates_stackv2
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3,652
|
EVMVersion.h
|
ethereum_solidity/liblangutil/EVMVersion.h
|
/*
This file is part of solidity.
solidity is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
solidity is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with solidity. If not, see <http://www.gnu.org/licenses/>.
*/
// SPDX-License-Identifier: GPL-3.0
/**
* EVM versioning.
*/
#pragma once
#include <libsolutil/Assertions.h>
#include <cstdint>
#include <optional>
#include <string>
#include <vector>
#include <boost/operators.hpp>
namespace solidity::evmasm
{
/// Virtual machine bytecode instruction. Forward declared from libevmasm/Instruction.h
enum class Instruction: uint8_t;
}
namespace solidity::langutil
{
/**
* A version specifier of the EVM we want to compile to.
* Defaults to the latest version deployed on Ethereum Mainnet at the time of compiler release.
*/
class EVMVersion:
boost::less_than_comparable<EVMVersion>,
boost::equality_comparable<EVMVersion>
{
public:
EVMVersion() = default;
static EVMVersion homestead() { return {Version::Homestead}; }
static EVMVersion tangerineWhistle() { return {Version::TangerineWhistle}; }
static EVMVersion spuriousDragon() { return {Version::SpuriousDragon}; }
static EVMVersion byzantium() { return {Version::Byzantium}; }
static EVMVersion constantinople() { return {Version::Constantinople}; }
static EVMVersion petersburg() { return {Version::Petersburg}; }
static EVMVersion istanbul() { return {Version::Istanbul}; }
static EVMVersion berlin() { return {Version::Berlin}; }
static EVMVersion london() { return {Version::London}; }
static EVMVersion paris() { return {Version::Paris}; }
static EVMVersion shanghai() { return {Version::Shanghai}; }
static EVMVersion cancun() { return {Version::Cancun}; }
static EVMVersion prague() { return {Version::Prague}; }
static std::vector<EVMVersion> allVersions() {
return {
homestead(),
tangerineWhistle(),
spuriousDragon(),
byzantium(),
constantinople(),
petersburg(),
istanbul(),
berlin(),
london(),
paris(),
shanghai(),
cancun(),
prague(),
};
}
static std::optional<EVMVersion> fromString(std::string const& _version)
{
for (auto const& v: allVersions())
if (_version == v.name())
return v;
return std::nullopt;
}
bool isExperimental() const {
return m_version == Version::Prague;
}
bool operator==(EVMVersion const& _other) const { return m_version == _other.m_version; }
bool operator<(EVMVersion const& _other) const { return m_version < _other.m_version; }
std::string name() const
{
switch (m_version)
{
case Version::Homestead: return "homestead";
case Version::TangerineWhistle: return "tangerineWhistle";
case Version::SpuriousDragon: return "spuriousDragon";
case Version::Byzantium: return "byzantium";
case Version::Constantinople: return "constantinople";
case Version::Petersburg: return "petersburg";
case Version::Istanbul: return "istanbul";
case Version::Berlin: return "berlin";
case Version::London: return "london";
case Version::Paris: return "paris";
case Version::Shanghai: return "shanghai";
case Version::Cancun: return "cancun";
case Version::Prague: return "prague";
}
util::unreachable();
}
/// Has the RETURNDATACOPY and RETURNDATASIZE opcodes.
bool supportsReturndata() const { return *this >= byzantium(); }
bool hasStaticCall() const { return *this >= byzantium(); }
bool hasBitwiseShifting() const { return *this >= constantinople(); }
bool hasCreate2() const { return *this >= constantinople(); }
bool hasExtCodeHash() const { return *this >= constantinople(); }
bool hasChainID() const { return *this >= istanbul(); }
bool hasSelfBalance() const { return *this >= istanbul(); }
bool hasBaseFee() const { return *this >= london(); }
bool hasBlobBaseFee() const { return *this >= cancun(); }
bool hasPrevRandao() const { return *this >= paris(); }
bool hasPush0() const { return *this >= shanghai(); }
bool hasBlobHash() const { return *this >= cancun(); }
bool hasMcopy() const { return *this >= cancun(); }
bool supportsTransientStorage() const { return *this >= cancun(); }
bool hasOpcode(evmasm::Instruction _opcode, std::optional<uint8_t> _eofVersion) const;
/// Whether we have to retain the costs for the call opcode itself (false),
/// or whether we can just forward easily all remaining gas (true).
bool canOverchargeGasForCall() const { return *this >= tangerineWhistle(); }
private:
enum class Version {
Homestead,
TangerineWhistle,
SpuriousDragon,
Byzantium,
Constantinople,
Petersburg,
Istanbul,
Berlin,
London,
Paris,
Shanghai,
Cancun,
Prague
};
EVMVersion(Version _version): m_version(_version) {}
Version m_version = Version::Cancun;
};
}
| 5,122
|
C++
|
.h
| 142
| 33.612676
| 95
| 0.736279
|
ethereum/solidity
| 23,062
| 5,715
| 501
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,653
|
DebugData.h
|
ethereum_solidity/liblangutil/DebugData.h
|
/*
This file is part of solidity.
solidity is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
solidity is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with solidity. If not, see <http://www.gnu.org/licenses/>.
*/
// SPDX-License-Identifier: GPL-3.0
#pragma once
#include <liblangutil/SourceLocation.h>
#include <optional>
#include <memory>
namespace solidity::langutil
{
struct DebugData
{
typedef typename std::shared_ptr<DebugData const> ConstPtr;
explicit DebugData(
langutil::SourceLocation _nativeLocation = {},
langutil::SourceLocation _originLocation = {},
std::optional<int64_t> _astID = {}
):
nativeLocation(std::move(_nativeLocation)),
originLocation(std::move(_originLocation)),
astID(_astID)
{}
static DebugData::ConstPtr create(
langutil::SourceLocation _nativeLocation,
langutil::SourceLocation _originLocation = {},
std::optional<int64_t> _astID = {}
)
{
return std::make_shared<DebugData>(
std::move(_nativeLocation),
std::move(_originLocation),
_astID
);
}
static DebugData::ConstPtr create()
{
static DebugData::ConstPtr emptyDebugData = create({});
return emptyDebugData;
}
/// Location in the Yul code.
langutil::SourceLocation nativeLocation;
/// Location in the original source that the Yul code was produced from.
/// Optional. Only present if the Yul source contains location annotations.
langutil::SourceLocation originLocation;
/// ID in the (Solidity) source AST.
std::optional<int64_t> astID;
};
} // namespace solidity::langutil
| 1,956
|
C++
|
.h
| 58
| 31.413793
| 76
| 0.767232
|
ethereum/solidity
| 23,062
| 5,715
| 501
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,655
|
SourceLocation.h
|
ethereum_solidity/liblangutil/SourceLocation.h
|
/*
This file is part of solidity.
solidity is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
solidity is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with solidity. If not, see <http://www.gnu.org/licenses/>.
*/
// SPDX-License-Identifier: GPL-3.0
/**
* @author Lefteris Karapetsas <lefteris@ethdev.com>
* @date 2015
* Represents a location in a source file
*/
#pragma once
#include <iosfwd>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
namespace solidity::langutil
{
/**
* Representation of an interval of source positions.
* The interval includes start and excludes end.
*/
struct SourceLocation
{
bool operator==(SourceLocation const& _other) const
{
return start == _other.start && end == _other.end && equalSources(_other);
}
bool operator!=(SourceLocation const& _other) const { return !operator==(_other); }
bool operator<(SourceLocation const& _other) const
{
if (!sourceName || !_other.sourceName)
return std::make_tuple(int(!!sourceName), start, end) < std::make_tuple(int(!!_other.sourceName), _other.start, _other.end);
else
return std::make_tuple(*sourceName, start, end) < std::make_tuple(*_other.sourceName, _other.start, _other.end);
}
bool contains(SourceLocation const& _other) const
{
if (!hasText() || !_other.hasText() || !equalSources(_other))
return false;
return start <= _other.start && _other.end <= end;
}
bool containsOffset(int _pos) const
{
if (!hasText() || _pos < 0)
return false;
return start <= _pos && _pos < end;
}
bool intersects(SourceLocation const& _other) const
{
if (!hasText() || !_other.hasText() || !equalSources(_other))
return false;
return _other.start < end && start < _other.end;
}
bool equalSources(SourceLocation const& _other) const
{
if (!!sourceName != !!_other.sourceName)
return false;
if (sourceName && *sourceName != *_other.sourceName)
return false;
return true;
}
bool isValid() const { return sourceName || start != -1 || end != -1; }
bool hasText() const { return sourceName && 0 <= start && start <= end; }
/// @returns the smallest SourceLocation that contains both @param _a and @param _b.
/// Assumes that @param _a and @param _b refer to the same source (exception: if the source of either one
/// is unset, the source of the other will be used for the result, even if that is unset as well).
/// Invalid start and end positions (with value of -1) are ignored (if start or end are -1 for both @param _a and
/// @param _b, then start resp. end of the result will be -1 as well).
static SourceLocation smallestCovering(SourceLocation _a, SourceLocation const& _b)
{
if (!_a.sourceName)
_a.sourceName = _b.sourceName;
if (_a.start < 0)
_a.start = _b.start;
else if (_b.start >= 0 && _b.start < _a.start)
_a.start = _b.start;
if (_b.end > _a.end)
_a.end = _b.end;
return _a;
}
int start = -1;
int end = -1;
std::shared_ptr<std::string const> sourceName;
};
SourceLocation parseSourceLocation(
std::string const& _input,
std::vector<std::shared_ptr<std::string const>> const& _sourceNames
);
/// Stream output for Location (used e.g. in boost exceptions).
std::ostream& operator<<(std::ostream& _out, SourceLocation const& _location);
/**
* Alternative, line-column-based representation for source locations.
* Both line and column are zero-based.
* If used as a range, the second location is considered exclusive.
* Negative values are invalid.
*/
struct LineColumn
{
/// Line value, can be between zero and number of `\n` characters in the source file.
int line = -1;
/// Column value, can be between zero and number of characters in the line (inclusive).
int column = -1;
LineColumn() = default;
explicit LineColumn(int _line, int _column): line(_line), column(_column) {}
};
}
| 4,248
|
C++
|
.h
| 116
| 34.284483
| 127
| 0.712929
|
ethereum/solidity
| 23,062
| 5,715
| 501
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,656
|
Scanner.h
|
ethereum_solidity/liblangutil/Scanner.h
|
/*
* This file is part of solidity.
*
* solidity is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* solidity is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with solidity. If not, see <http://www.gnu.org/licenses/>.
*
* This file is derived from the file "scanner.h", which was part of the
* V8 project. The original copyright header follows:
*
* Copyright 2006-2012, the V8 project authors. All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @author Christian <c@ethdev.com>
* @date 2014
* Solidity scanner.
*/
#pragma once
#include <liblangutil/Token.h>
#include <liblangutil/CharStream.h>
#include <liblangutil/SourceLocation.h>
#include <optional>
#include <iosfwd>
namespace solidity::langutil
{
enum class ScannerKind
{
Solidity,
Yul,
ExperimentalSolidity,
SpecialComment
};
enum class ScannerError
{
NoError,
IllegalToken,
IllegalHexString,
IllegalHexDigit,
IllegalCommentTerminator,
IllegalEscapeSequence,
UnicodeCharacterInNonUnicodeString,
IllegalCharacterInString,
IllegalStringEndQuote,
IllegalNumberSeparator,
IllegalExponent,
IllegalNumberEnd,
DirectionalOverrideUnderflow,
DirectionalOverrideMismatch,
OctalNotAllowed,
};
std::string to_string(ScannerError _errorCode);
std::ostream& operator<<(std::ostream& os, ScannerError _errorCode);
class Scanner
{
friend class LiteralScope;
public:
explicit Scanner(CharStream& _source, ScannerKind _kind = ScannerKind::Solidity):
m_source(_source),
m_sourceName{std::make_shared<std::string>(_source.name())}
{
reset();
if (_kind != ScannerKind::Solidity)
setScannerMode(_kind);
}
/// Resets scanner to the start of input.
void reset();
/// Changes the scanner mode.
void setScannerMode(ScannerKind _kind)
{
m_kind = _kind;
// Invalidate lookahead buffer.
rescan();
}
CharStream const& charStream() const noexcept { return m_source; }
/// @returns the next token and advances input
Token next();
/// Set scanner to a specific offset. This is used in error recovery.
void setPosition(size_t _offset);
///@{
///@name Information about the current token
/// @returns the current token
Token currentToken() const
{
return m_tokens[Current].token;
}
ElementaryTypeNameToken currentElementaryTypeNameToken() const
{
unsigned firstSize;
unsigned secondSize;
std::tie(firstSize, secondSize) = m_tokens[Current].extendedTokenInfo;
return ElementaryTypeNameToken(m_tokens[Current].token, firstSize, secondSize);
}
SourceLocation currentLocation() const { return m_tokens[Current].location; }
std::string const& currentLiteral() const { return m_tokens[Current].literal; }
std::tuple<unsigned, unsigned> const& currentTokenInfo() const { return m_tokens[Current].extendedTokenInfo; }
/// Retrieves the last error that occurred during lexical analysis.
/// @note If no error occurred, the value is undefined.
ScannerError currentError() const noexcept { return m_tokens[Current].error; }
///@}
///@{
///@name Information about the current comment token
SourceLocation currentCommentLocation() const { return m_skippedComments[Current].location; }
std::string const& currentCommentLiteral() const { return m_skippedComments[Current].literal; }
/// Called by the parser during FunctionDefinition parsing to clear the current comment
void clearCurrentCommentLiteral() { m_skippedComments[Current].literal.clear(); }
ScannerKind scannerKind() const { return m_kind; }
///@}
///@{
///@name Information about the next token
/// @returns the next token without advancing input.
Token peekNextToken() const { return m_tokens[Next].token; }
SourceLocation peekLocation() const { return m_tokens[Next].location; }
std::string const& peekLiteral() const { return m_tokens[Next].literal; }
Token peekNextNextToken() const { return m_tokens[NextNext].token; }
///@}
private:
inline Token setError(ScannerError _error) noexcept
{
m_tokens[NextNext].error = _error;
return Token::Illegal;
}
/// Used for the current and look-ahead token and comments
struct TokenDesc
{
Token token;
SourceLocation location;
std::string literal;
ScannerError error = ScannerError::NoError;
std::tuple<unsigned, unsigned> extendedTokenInfo;
};
///@{
///@name Literal buffer support
inline void addLiteralChar(char c) { m_tokens[NextNext].literal.push_back(c); }
inline void addCommentLiteralChar(char c) { m_skippedComments[NextNext].literal.push_back(c); }
inline void addLiteralCharAndAdvance() { addLiteralChar(m_char); advance(); }
void addUnicodeAsUTF8(unsigned codepoint);
///@}
bool advance() { m_char = m_source.advanceAndGet(); return !m_source.isPastEndOfInput(); }
void rollback(size_t _amount) { m_char = m_source.rollback(_amount); }
/// Rolls back to the start of the current token and re-runs the scanner.
void rescan();
inline Token selectErrorToken(ScannerError _err) { advance(); return setError(_err); }
inline Token selectToken(Token _tok) { advance(); return _tok; }
/// If the next character is _next, advance and return _then, otherwise return _else.
inline Token selectToken(char _next, Token _then, Token _else);
bool scanHexByte(char& o_scannedByte);
std::optional<unsigned> scanUnicode();
/// Scans a single Solidity token.
void scanToken();
/// Skips all whitespace and @returns true if something was skipped.
bool skipWhitespace();
/// Skips all whitespace that are neither '\r' nor '\n'.
bool skipWhitespaceExceptUnicodeLinebreak();
Token skipSingleLineComment();
Token skipMultiLineComment();
/// Tests if current source position is CR, LF or CRLF.
bool atEndOfLine() const;
/// Tries to consume CR, LF or CRLF line terminators and returns success or failure.
bool tryScanEndOfLine();
void scanDecimalDigits();
Token scanNumber(char _charSeen = 0);
std::tuple<Token, unsigned, unsigned> scanIdentifierOrKeyword();
Token scanString(bool const _isUnicode);
Token scanHexString();
/// Scans a single line comment and returns its corrected end position.
size_t scanSingleLineDocComment();
Token scanMultiLineDocComment();
/// Scans a slash '/' and depending on the characters returns the appropriate token
Token scanSlash();
/// Scans an escape-sequence which is part of a string and adds the
/// decoded character to the current literal. Returns true if a pattern
/// is scanned.
bool scanEscape();
/// @returns true iff we are currently positioned at a unicode line break.
bool isUnicodeLinebreak();
/// Return the current source position.
size_t sourcePos() const { return m_source.position(); }
bool isSourcePastEndOfInput() const { return m_source.isPastEndOfInput(); }
enum TokenIndex { Current, Next, NextNext };
TokenDesc m_skippedComments[3] = {}; // desc for the current, next and nextnext skipped comment
TokenDesc m_tokens[3] = {}; // desc for the current, next and nextnext token
CharStream& m_source;
std::shared_ptr<std::string const> m_sourceName;
ScannerKind m_kind = ScannerKind::Solidity;
/// one character look-ahead, equals 0 at end of input
char m_char;
};
}
| 8,983
|
C++
|
.h
| 222
| 38.256757
| 111
| 0.76765
|
ethereum/solidity
| 23,062
| 5,715
| 501
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,660
|
SMTLib2Parser.h
|
ethereum_solidity/libsmtutil/SMTLib2Parser.h
|
/*
This file is part of solidity.
solidity is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
solidity is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with solidity. If not, see <http://www.gnu.org/licenses/>.
*/
// SPDX-License-Identifier: GPL-3.0
#pragma once
#include <libsmtutil/Exceptions.h>
#include <iostream>
#include <string>
#include <variant>
#include <vector>
/**
* The following is a parser for SMT-LIB2 expressions developed originally by @chriseth as part of solsmt.
*/
namespace solidity::smtutil
{
struct SMTLib2Expression {
using args_t = std::vector<SMTLib2Expression>;
std::variant<std::string, args_t> data;
[[nodiscard]] std::string toString() const;
};
inline bool isAtom(SMTLib2Expression const& expr)
{
return std::holds_alternative<std::string>(expr.data);
}
inline std::string const& asAtom(SMTLib2Expression const& expr)
{
smtAssert(isAtom(expr));
return std::get<std::string>(expr.data);
}
inline auto const& asSubExpressions(SMTLib2Expression const& expr)
{
smtAssert(!isAtom(expr));
return std::get<SMTLib2Expression::args_t>(expr.data);
}
inline auto& asSubExpressions(SMTLib2Expression& expr)
{
smtAssert(!isAtom(expr));
return std::get<SMTLib2Expression::args_t>(expr.data);
}
class SMTLib2Parser {
public:
class ParsingException {};
explicit SMTLib2Parser(std::istream& _input) :
m_input(_input),
m_token(static_cast<char>(m_input.get())) {}
SMTLib2Expression parseExpression();
bool isEOF()
{
skipWhitespace();
return m_input.eof();
}
private:
std::string parseToken();
void skipWhitespace();
[[nodiscard]] char token() const
{
return m_token;
}
void advance();
std::istream& m_input;
char m_token = 0;
};
}
| 2,138
|
C++
|
.h
| 73
| 27.30137
| 106
| 0.766405
|
ethereum/solidity
| 23,062
| 5,715
| 501
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,662
|
Exceptions.h
|
ethereum_solidity/libsmtutil/Exceptions.h
|
/*
This file is part of solidity.
solidity is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
solidity is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with solidity. If not, see <http://www.gnu.org/licenses/>.
*/
// SPDX-License-Identifier: GPL-3.0
#pragma once
#include <libsolutil/Assertions.h>
#include <libsolutil/Exceptions.h>
#include <boost/preprocessor/cat.hpp>
#include <boost/preprocessor/facilities/empty.hpp>
#include <boost/preprocessor/facilities/overload.hpp>
namespace solidity::smtutil
{
struct SMTLogicError: virtual util::Exception {};
/// Assertion that throws an SMTLogicError containing the given description if it is not met.
#if !BOOST_PP_VARIADICS_MSVC
#define smtAssert(...) BOOST_PP_OVERLOAD(smtAssert_,__VA_ARGS__)(__VA_ARGS__)
#else
#define smtAssert(...) BOOST_PP_CAT(BOOST_PP_OVERLOAD(smtAssert_,__VA_ARGS__)(__VA_ARGS__),BOOST_PP_EMPTY())
#endif
#define smtAssert_1(CONDITION) \
smtAssert_2((CONDITION), "")
#define smtAssert_2(CONDITION, DESCRIPTION) \
assertThrowWithDefaultDescription( \
(CONDITION), \
::solidity::smtutil::SMTLogicError, \
(DESCRIPTION), \
"SMT assertion failed" \
)
// Error to indicate that some problem occurred during an interaction with external solver.
// This could be a problem with calling the solver or unexpected situation during the processing of solver's response.
struct SMTSolverInteractionError: virtual util::Exception {};
#define smtSolverInteractionRequire(CONDITION, DESCRIPTION) \
assertThrowWithDefaultDescription( \
(CONDITION), \
::solidity::smtutil::SMTSolverInteractionError, \
(DESCRIPTION), \
"Encountered problem during interaction with a solver" \
)
}
| 2,103
|
C++
|
.h
| 49
| 40.979592
| 118
| 0.782353
|
ethereum/solidity
| 23,062
| 5,715
| 501
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,663
|
Helpers.h
|
ethereum_solidity/libsmtutil/Helpers.h
|
/*
This file is part of solidity.
solidity is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
solidity is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with solidity. If not, see <http://www.gnu.org/licenses/>.
*/
// SPDX-License-Identifier: GPL-3.0
#pragma once
#include <libsmtutil/SolverInterface.h>
namespace solidity::smtutil
{
/// Signed division in SMTLIB2 rounds differently than EVM.
/// This does not check for division by zero!
inline Expression signedDivisionEVM(Expression _left, Expression _right)
{
return Expression::ite(
_left >= 0,
Expression::ite(_right >= 0, _left / _right, 0 - (_left / (0 - _right))),
Expression::ite(_right >= 0, 0 - ((0 - _left) / _right), (0 - _left) / (0 - _right))
);
}
inline Expression abs(Expression _value)
{
return Expression::ite(_value >= 0, _value, 0 - _value);
}
/// Signed modulo in SMTLIB2 behaves differently with regards
/// to the sign than EVM.
/// This does not check for modulo by zero!
inline Expression signedModuloEVM(Expression _left, Expression _right)
{
return Expression::ite(
_left >= 0,
_left % _right,
Expression::ite(
(_left % _right) == 0,
0,
(_left % _right) - abs(_right)
)
);
}
}
| 1,626
|
C++
|
.h
| 48
| 31.854167
| 86
| 0.727679
|
ethereum/solidity
| 23,062
| 5,715
| 501
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,664
|
SMTLib2Context.h
|
ethereum_solidity/libsmtutil/SMTLib2Context.h
|
/*
This file is part of solidity.
solidity is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
solidity is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with solidity. If not, see <http://www.gnu.org/licenses/>.
*/
// SPDX-License-Identifier: GPL-3.0
#pragma once
#include <libsmtutil/SolverInterface.h>
#include <libsmtutil/Sorts.h>
#include <string>
#include <unordered_map>
#include <unordered_set>
namespace solidity::smtutil
{
using SortId = uint32_t;
struct SortPairHash
{
std::size_t operator()(std::pair<SortId, SortId> const& _pair) const;
};
struct SMTLibType {
Kind const kind;
SortId const id;
SMTLibType(Kind _kind, SortId _id): kind(_kind), id(_id) {}
virtual ~SMTLibType() = default;
};
struct SMTLibSort : public SMTLibType
{
std::string const name;
std::vector<SortId> const args;
SMTLibSort(
Kind _kind,
std::string_view _name,
std::vector<SortId> _args,
SortId _id
): SMTLibType(_kind, _id), name(_name), args(std::move(_args)) {}
};
struct TupleType : public SMTLibType
{
std::string const name;
std::vector<std::pair<std::string,SortId>> accessors;
TupleType(std::string_view _name, std::vector<std::pair<std::string,SortId>> _accessors, SortId _id)
: SMTLibType(Kind::Tuple, _id), name(_name), accessors(std::move(_accessors)) {}
};
class SMTLib2Context
{
public:
using TupleDeclarationCallback = std::function<void(TupleSort const&)>;
SMTLib2Context();
void clear();
bool isDeclared(std::string const& _name) const;
void declare(std::string const& _name, SortPointer const& _sort);
SortPointer getDeclaredSort(std::string const& _name) const;
SortId resolve(SortPointer const& _sort);
SortPointer unresolve(SortId _sortId) const;
std::string toString(SortId _id);
std::string toSExpr(Expression const& _expr);
std::string toSmtLibSort(SortPointer const& _sort);
std::optional<SortPointer> getTupleType(std::string const& _name) const;
std::optional<std::pair<std::string, SortPointer>> getTupleAccessor(std::string const& _name) const;
void setTupleDeclarationCallback(TupleDeclarationCallback _callback);
private:
SortId resolveBitVectorSort(BitVectorSort const& _sort);
SortId resolveArraySort(ArraySort const& _sort);
SortId resolveTupleSort(TupleSort const& _sort);
using functions_t = std::map<std::string, SortPointer>;
functions_t m_functions; // Variables are uninterpreted constants = nullary functions
SortId const m_boolSort{0u};
SortId const m_intSort{1u};
std::vector<std::unique_ptr<SMTLibType>> m_knownTypes;
std::unordered_map<std::pair<SortId, SortId>, SortId, SortPairHash> m_arraySorts;
std::unordered_map<std::size_t, SortId> m_bitVectorSorts;
std::unordered_map<std::string, SortId> m_tupleSorts;
TupleDeclarationCallback m_callback;
};
}
| 3,187
|
C++
|
.h
| 83
| 36.385542
| 101
| 0.769981
|
ethereum/solidity
| 23,062
| 5,715
| 501
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,665
|
SMTLib2Interface.h
|
ethereum_solidity/libsmtutil/SMTLib2Interface.h
|
/*
This file is part of solidity.
solidity is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
solidity is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with solidity. If not, see <http://www.gnu.org/licenses/>.
*/
// SPDX-License-Identifier: GPL-3.0
#pragma once
#include <libsmtutil/BMCSolverInterface.h>
#include <libsmtutil/SMTLib2Context.h>
#include <libsolidity/interface/ReadFile.h>
#include <libsolutil/Common.h>
#include <libsolutil/FixedHash.h>
#include <cstdio>
#include <map>
#include <set>
#include <string>
#include <vector>
namespace solidity::smtutil
{
class SMTLib2Commands
{
public:
void push();
void pop();
void clear();
void assertion(std::string _expr);
void setOption(std::string _name, std::string _value);
void setLogic(std::string _logic);
void declareVariable(std::string _name, std::string _sort);
void declareFunction(std::string const& _name, std::vector<std::string> const& _domain, std::string const& _codomain);
void declareTuple(
std::string const& _name,
std::vector<std::string> const& _memberNames,
std::vector<std::string> const& _memberSorts
);
[[nodiscard]] std::string toString() const;
private:
std::vector<std::string> m_commands;
std::vector<std::size_t> m_frameLimits;
};
class SMTLib2Interface: public BMCSolverInterface
{
public:
/// Noncopyable.
SMTLib2Interface(SMTLib2Interface const&) = delete;
SMTLib2Interface& operator=(SMTLib2Interface const&) = delete;
explicit SMTLib2Interface(
std::map<util::h256, std::string> _queryResponses = {},
frontend::ReadCallback::Callback _smtCallback = {},
std::optional<unsigned> _queryTimeout = {}
);
void reset() override;
void push() override;
void pop() override;
void declareVariable(std::string const&, SortPointer const&) override;
void addAssertion(Expression const& _expr) override;
std::pair<CheckResult, std::vector<std::string>> check(std::vector<Expression> const& _expressionsToEvaluate) override;
std::vector<std::string> unhandledQueries() override { return m_unhandledQueries; }
// Used by CHCSmtLib2Interface
std::string toSExpr(Expression const& _expr);
std::string toSmtLibSort(SortPointer _sort);
std::vector<std::string> toSmtLibSort(std::vector<SortPointer> const& _sort);
std::string dumpQuery(std::vector<Expression> const& _expressionsToEvaluate);
protected:
virtual void setupSmtCallback() {}
void declareFunction(std::string const& _name, SortPointer const& _sort);
std::string checkSatAndGetValuesCommand(std::vector<Expression> const& _expressionsToEvaluate);
/// Communicates with the solver via the callback. Throws SMTSolverError on error.
virtual std::string querySolver(std::string const& _input);
SMTLib2Commands m_commands;
SMTLib2Context m_context;
std::map<util::h256, std::string> m_queryResponses;
std::vector<std::string> m_unhandledQueries;
frontend::ReadCallback::Callback m_smtCallback;
};
}
| 3,342
|
C++
|
.h
| 84
| 37.619048
| 120
| 0.776606
|
ethereum/solidity
| 23,062
| 5,715
| 501
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,666
|
CHCSmtLib2Interface.h
|
ethereum_solidity/libsmtutil/CHCSmtLib2Interface.h
|
/*
This file is part of solidity.
solidity is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
solidity is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with solidity. If not, see <http://www.gnu.org/licenses/>.
*/
// SPDX-License-Identifier: GPL-3.0
/**
* Interface for solving Horn systems via smtlib2.
*/
#pragma once
#include <libsmtutil/CHCSolverInterface.h>
#include <libsmtutil/SMTLib2Interface.h>
#include <libsmtutil/SMTLib2Parser.h>
namespace solidity::smtutil
{
class CHCSmtLib2Interface: public CHCSolverInterface
{
public:
explicit CHCSmtLib2Interface(
std::map<util::h256, std::string> _queryResponses = {},
frontend::ReadCallback::Callback _smtCallback = {},
std::optional<unsigned> _queryTimeout = {}
);
void reset();
void registerRelation(Expression const& _expr) override;
void addRule(Expression const& _expr, std::string const& _name) override;
/// Takes a function application _expr and checks for reachability.
/// @returns solving result, an invariant, and counterexample graph, if possible.
QueryResult query(Expression const& _expr) override;
void declareVariable(std::string const& _name, SortPointer const& _sort) override;
std::string dumpQuery(Expression const& _expr);
std::vector<std::string> unhandledQueries() const { return m_unhandledQueries; }
protected:
class ScopedParser
{
public:
ScopedParser(SMTLib2Context const& _context): m_context(_context) {}
smtutil::Expression toSMTUtilExpression(SMTLib2Expression const& _expr);
SortPointer toSort(SMTLib2Expression const& _expr);
void addVariableDeclaration(std::string _name, SortPointer _sort);
private:
std::optional<SortPointer> lookupKnownTupleSort(std::string const& _name) const;
smtutil::Expression parseQuantifier(
std::string const& _quantifierName,
std::vector<SMTLib2Expression> const& _varList,
SMTLib2Expression const& _coreExpression
);
SMTLib2Context const& m_context;
std::unordered_map<std::string, SortPointer> m_localVariables;
};
/* Modifies the passed expression by inlining all let subexpressions */
static void inlineLetExpressions(SMTLib2Expression& _expr);
std::string toSmtLibSort(SortPointer const& _sort);
std::vector<std::string> toSmtLibSort(std::vector<SortPointer> const& _sort);
std::string forall(Expression const& _expr);
static std::string createQueryAssertion(std::string _name);
void createHeader();
/// Communicates with the solver via the callback. Throws SMTSolverError on error.
virtual std::string querySolver(std::string const& _input);
/// Translates CHC solver response with a model to our representation of invariants. Returns None on error.
std::optional<smtutil::Expression> invariantsFromSolverResponse(std::string const& _response) const;
std::set<std::string> collectVariableNames(Expression const& _expr) const;
SMTLib2Commands m_commands;
SMTLib2Context m_context;
std::map<util::h256, std::string> m_queryResponses;
std::vector<std::string> m_unhandledQueries;
frontend::ReadCallback::Callback m_smtCallback;
};
}
| 3,488
|
C++
|
.h
| 77
| 42.818182
| 108
| 0.786793
|
ethereum/solidity
| 23,062
| 5,715
| 501
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,667
|
SolverInterface.h
|
ethereum_solidity/libsmtutil/SolverInterface.h
|
/*
This file is part of solidity.
solidity is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
solidity is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with solidity. If not, see <http://www.gnu.org/licenses/>.
*/
// SPDX-License-Identifier: GPL-3.0
#pragma once
#include <libsmtutil/Exceptions.h>
#include <libsmtutil/Sorts.h>
#include <libsolutil/Common.h>
#include <libsolutil/Numeric.h>
#include <libsolutil/CommonData.h>
#include <range/v3/algorithm/all_of.hpp>
#include <range/v3/view.hpp>
#include <cstdio>
#include <map>
#include <memory>
#include <optional>
#include <set>
#include <string>
#include <vector>
namespace solidity::smtutil
{
struct SMTSolverChoice
{
bool cvc5 = false;
bool eld = false;
bool smtlib2 = false;
bool z3 = false;
static constexpr SMTSolverChoice All() noexcept { return {true, true, true, true}; }
static constexpr SMTSolverChoice CVC5() noexcept { return {true, false, false, false}; }
static constexpr SMTSolverChoice ELD() noexcept { return {false, true, false, false}; }
static constexpr SMTSolverChoice SMTLIB2() noexcept { return {false, false, true, false}; }
static constexpr SMTSolverChoice Z3() noexcept { return {false, false, false, true}; }
static constexpr SMTSolverChoice None() noexcept { return {false, false, false, false}; }
static std::optional<SMTSolverChoice> fromString(std::string const& _solvers)
{
SMTSolverChoice solvers;
for (auto&& s: _solvers | ranges::views::split(',') | ranges::to<std::vector<std::string>>())
if (!solvers.setSolver(s))
return {};
return solvers;
}
SMTSolverChoice& operator&=(SMTSolverChoice const& _other)
{
cvc5 &= _other.cvc5;
eld &= _other.eld;
smtlib2 &= _other.smtlib2;
z3 &= _other.z3;
return *this;
}
SMTSolverChoice operator&(SMTSolverChoice _other) const noexcept
{
_other &= *this;
return _other;
}
bool operator!=(SMTSolverChoice const& _other) const noexcept { return !(*this == _other); }
bool operator==(SMTSolverChoice const& _other) const noexcept
{
return cvc5 == _other.cvc5 &&
eld == _other.eld &&
smtlib2 == _other.smtlib2 &&
z3 == _other.z3;
}
bool setSolver(std::string const& _solver)
{
static std::set<std::string> const solvers{"cvc5", "eld", "smtlib2", "z3"};
if (!solvers.count(_solver))
return false;
if (_solver == "cvc5")
cvc5 = true;
if (_solver == "eld")
eld = true;
else if (_solver == "smtlib2")
smtlib2 = true;
else if (_solver == "z3")
z3 = true;
return true;
}
bool none() const noexcept { return !some(); }
bool some() const noexcept { return cvc5 || eld || smtlib2 || z3; }
bool all() const noexcept { return cvc5 && eld && smtlib2 && z3; }
};
enum class CheckResult
{
SATISFIABLE, UNSATISFIABLE, UNKNOWN, CONFLICTING, ERROR
};
/// C++ representation of an SMTLIB2 expression.
class Expression
{
friend class SolverInterface;
public:
explicit Expression(bool _v): Expression(_v ? "true" : "false", Kind::Bool) {}
explicit Expression(std::shared_ptr<SortSort> _sort, std::string _name = ""): Expression(std::move(_name), {}, _sort) {}
explicit Expression(std::string _name, std::vector<Expression> _arguments, SortPointer _sort):
name(std::move(_name)), arguments(std::move(_arguments)), sort(std::move(_sort)) {}
Expression(size_t _number): Expression(std::to_string(_number), {}, SortProvider::uintSort) {}
Expression(u256 const& _number): Expression(_number.str(), {}, SortProvider::uintSort) {}
Expression(s256 const& _number): Expression(
_number >= 0 ? _number.str() : "-",
_number >= 0 ?
std::vector<Expression>{} :
std::vector<Expression>{Expression(size_t(0)), bigint(-_number)},
SortProvider::sintSort
) {}
Expression(bigint const& _number): Expression(
_number >= 0 ? _number.str() : "-",
_number >= 0 ?
std::vector<Expression>{} :
std::vector<Expression>{Expression(size_t(0)), bigint(-_number)},
SortProvider::sintSort
) {}
Expression(Expression const&) = default;
Expression(Expression&&) = default;
Expression& operator=(Expression const&) = default;
Expression& operator=(Expression&&) = default;
bool hasCorrectArity() const
{
if (name == "tuple_constructor")
{
auto tupleSort = std::dynamic_pointer_cast<TupleSort>(sort);
smtAssert(tupleSort, "");
return arguments.size() == tupleSort->components.size();
}
static std::map<std::string, unsigned> const operatorsArity{
{"ite", 3},
{"not", 1},
{"and", 2},
{"or", 2},
{"=>", 2},
{"=", 2},
{"<", 2},
{"<=", 2},
{">", 2},
{">=", 2},
{"+", 2},
{"-", 2},
{"*", 2},
{"div", 2},
{"mod", 2},
{"bvnot", 1},
{"bvand", 2},
{"bvor", 2},
{"bvxor", 2},
{"bvshl", 2},
{"bvlshr", 2},
{"bvashr", 2},
{"int2bv", 2},
{"bv2int", 1},
{"select", 2},
{"store", 3},
{"const_array", 2},
{"tuple_get", 2}
};
return operatorsArity.count(name) && operatorsArity.at(name) == arguments.size();
}
static Expression ite(Expression _condition, Expression _trueValue, Expression _falseValue)
{
smtAssert(areCompatible(*_trueValue.sort, *_falseValue.sort));
SortPointer sort = _trueValue.sort;
return Expression("ite", std::vector<Expression>{
std::move(_condition), std::move(_trueValue), std::move(_falseValue)
}, std::move(sort));
}
static Expression implies(Expression _a, Expression _b)
{
return Expression(
"=>",
std::move(_a),
std::move(_b),
Kind::Bool
);
}
/// select is the SMT representation of an array index access.
static Expression select(Expression _array, Expression _index)
{
smtAssert(_array.sort->kind == Kind::Array, "");
std::shared_ptr<ArraySort> arraySort = std::dynamic_pointer_cast<ArraySort>(_array.sort);
smtAssert(arraySort, "");
smtAssert(_index.sort, "");
smtAssert(areCompatible(*arraySort->domain, *_index.sort));
return Expression(
"select",
std::vector<Expression>{std::move(_array), std::move(_index)},
arraySort->range
);
}
/// store is the SMT representation of an assignment to array index.
/// The function is pure and returns the modified array.
static Expression store(Expression _array, Expression _index, Expression _element)
{
auto arraySort = std::dynamic_pointer_cast<ArraySort>(_array.sort);
smtAssert(arraySort, "");
smtAssert(_index.sort, "");
smtAssert(_element.sort, "");
smtAssert(areCompatible(*arraySort->domain, *_index.sort));
smtAssert(areCompatible(*arraySort->range, *_element.sort));
return Expression(
"store",
std::vector<Expression>{std::move(_array), std::move(_index), std::move(_element)},
arraySort
);
}
static Expression const_array(Expression _sort, Expression _value)
{
smtAssert(_sort.sort->kind == Kind::Sort, "");
auto sortSort = std::dynamic_pointer_cast<SortSort>(_sort.sort);
auto arraySort = std::dynamic_pointer_cast<ArraySort>(sortSort->inner);
smtAssert(sortSort && arraySort, "");
smtAssert(_value.sort, "");
smtAssert(areCompatible(*arraySort->range, *_value.sort));
return Expression(
"const_array",
std::vector<Expression>{std::move(_sort), std::move(_value)},
arraySort
);
}
static Expression tuple_get(Expression _tuple, size_t _index)
{
smtAssert(_tuple.sort->kind == Kind::Tuple, "");
std::shared_ptr<TupleSort> tupleSort = std::dynamic_pointer_cast<TupleSort>(_tuple.sort);
smtAssert(tupleSort, "");
smtAssert(_index < tupleSort->components.size(), "");
return Expression(
"tuple_get",
std::vector<Expression>{std::move(_tuple), Expression(_index)},
tupleSort->components.at(_index)
);
}
static Expression tuple_constructor(Expression _tuple, std::vector<Expression> _arguments)
{
smtAssert(_tuple.sort->kind == Kind::Sort, "");
auto sortSort = std::dynamic_pointer_cast<SortSort>(_tuple.sort);
auto tupleSort = std::dynamic_pointer_cast<TupleSort>(sortSort->inner);
smtAssert(tupleSort, "");
smtAssert(_arguments.size() == tupleSort->components.size(), "");
return Expression(
"tuple_constructor",
std::move(_arguments),
tupleSort
);
}
static Expression int2bv(Expression _n, size_t _size)
{
smtAssert(_n.sort->kind == Kind::Int, "");
std::shared_ptr<IntSort> intSort = std::dynamic_pointer_cast<IntSort>(_n.sort);
smtAssert(intSort, "");
smtAssert(_size <= 256, "");
return Expression(
"int2bv",
std::vector<Expression>{std::move(_n), Expression(_size)},
std::make_shared<BitVectorSort>(_size)
);
}
static Expression bv2int(Expression _bv, bool _signed = false)
{
smtAssert(_bv.sort->kind == Kind::BitVector, "");
std::shared_ptr<BitVectorSort> bvSort = std::dynamic_pointer_cast<BitVectorSort>(_bv.sort);
smtAssert(bvSort, "");
smtAssert(bvSort->size <= 256, "");
return Expression(
"bv2int",
std::vector<Expression>{std::move(_bv)},
SortProvider::intSort(_signed)
);
}
static bool sameSort(std::vector<Expression> const& _args)
{
if (_args.empty())
return true;
auto sort = _args.front().sort;
return ranges::all_of(
_args,
[&](auto const& _expr){ return _expr.sort->kind == sort->kind; }
);
}
static Expression mkAnd(std::vector<Expression> _args)
{
smtAssert(!_args.empty(), "");
smtAssert(sameSort(_args), "");
auto sort = _args.front().sort;
if (sort->kind == Kind::BitVector)
return Expression("bvand", std::move(_args), sort);
smtAssert(sort->kind == Kind::Bool, "");
return Expression("and", std::move(_args), Kind::Bool);
}
static Expression mkOr(std::vector<Expression> _args)
{
smtAssert(!_args.empty(), "");
smtAssert(sameSort(_args), "");
auto sort = _args.front().sort;
if (sort->kind == Kind::BitVector)
return Expression("bvor", std::move(_args), sort);
smtAssert(sort->kind == Kind::Bool, "");
return Expression("or", std::move(_args), Kind::Bool);
}
static Expression mkPlus(std::vector<Expression> _args)
{
smtAssert(!_args.empty(), "");
smtAssert(sameSort(_args), "");
auto sort = _args.front().sort;
smtAssert(sort->kind == Kind::BitVector || sort->kind == Kind::Int, "");
return Expression("+", std::move(_args), sort);
}
static Expression mkMul(std::vector<Expression> _args)
{
smtAssert(!_args.empty(), "");
smtAssert(sameSort(_args), "");
auto sort = _args.front().sort;
smtAssert(sort->kind == Kind::BitVector || sort->kind == Kind::Int, "");
return Expression("*", std::move(_args), sort);
}
friend Expression operator!(Expression _a)
{
if (_a.sort->kind == Kind::BitVector)
return ~_a;
return Expression("not", std::move(_a), Kind::Bool);
}
friend Expression operator&&(Expression _a, Expression _b)
{
if (_a.sort->kind == Kind::BitVector)
{
smtAssert(_b.sort->kind == Kind::BitVector, "");
return _a & _b;
}
return Expression("and", std::move(_a), std::move(_b), Kind::Bool);
}
friend Expression operator||(Expression _a, Expression _b)
{
if (_a.sort->kind == Kind::BitVector)
{
smtAssert(_b.sort->kind == Kind::BitVector, "");
return _a | _b;
}
return Expression("or", std::move(_a), std::move(_b), Kind::Bool);
}
friend Expression operator==(Expression _a, Expression _b)
{
smtAssert(_a.sort->kind == _b.sort->kind, "Trying to create an 'equal' expression with different sorts");
return Expression("=", std::move(_a), std::move(_b), Kind::Bool);
}
friend Expression operator!=(Expression _a, Expression _b)
{
return !(std::move(_a) == std::move(_b));
}
friend Expression operator<(Expression _a, Expression _b)
{
return Expression("<", std::move(_a), std::move(_b), Kind::Bool);
}
friend Expression operator<=(Expression _a, Expression _b)
{
return Expression("<=", std::move(_a), std::move(_b), Kind::Bool);
}
friend Expression operator>(Expression _a, Expression _b)
{
return Expression(">", std::move(_a), std::move(_b), Kind::Bool);
}
friend Expression operator>=(Expression _a, Expression _b)
{
return Expression(">=", std::move(_a), std::move(_b), Kind::Bool);
}
friend Expression operator+(Expression _a, Expression _b)
{
auto intSort = _a.sort;
return Expression("+", {std::move(_a), std::move(_b)}, intSort);
}
friend Expression operator-(Expression _a, Expression _b)
{
auto intSort = _a.sort;
return Expression("-", {std::move(_a), std::move(_b)}, intSort);
}
friend Expression operator*(Expression _a, Expression _b)
{
auto intSort = _a.sort;
return Expression("*", {std::move(_a), std::move(_b)}, intSort);
}
friend Expression operator/(Expression _a, Expression _b)
{
auto intSort = _a.sort;
return Expression("div", {std::move(_a), std::move(_b)}, intSort);
}
friend Expression operator%(Expression _a, Expression _b)
{
auto intSort = _a.sort;
return Expression("mod", {std::move(_a), std::move(_b)}, intSort);
}
friend Expression operator~(Expression _a)
{
auto bvSort = _a.sort;
return Expression("bvnot", {std::move(_a)}, bvSort);
}
friend Expression operator&(Expression _a, Expression _b)
{
auto bvSort = _a.sort;
return Expression("bvand", {std::move(_a), std::move(_b)}, bvSort);
}
friend Expression operator|(Expression _a, Expression _b)
{
auto bvSort = _a.sort;
return Expression("bvor", {std::move(_a), std::move(_b)}, bvSort);
}
friend Expression operator^(Expression _a, Expression _b)
{
auto bvSort = _a.sort;
return Expression("bvxor", {std::move(_a), std::move(_b)}, bvSort);
}
friend Expression operator<<(Expression _a, Expression _b)
{
auto bvSort = _a.sort;
return Expression("bvshl", {std::move(_a), std::move(_b)}, bvSort);
}
friend Expression operator>>(Expression _a, Expression _b)
{
auto bvSort = _a.sort;
return Expression("bvlshr", {std::move(_a), std::move(_b)}, bvSort);
}
static Expression ashr(Expression _a, Expression _b)
{
auto bvSort = _a.sort;
return Expression("bvashr", {std::move(_a), std::move(_b)}, bvSort);
}
Expression operator()(std::vector<Expression> _arguments) const
{
smtAssert(
sort->kind == Kind::Function,
"Attempted function application to non-function."
);
auto fSort = dynamic_cast<FunctionSort const*>(sort.get());
smtAssert(fSort, "");
return Expression(name, std::move(_arguments), fSort->codomain);
}
std::string name;
std::vector<Expression> arguments;
SortPointer sort;
private:
/// Helper method for checking sort compatibility when creating expressions
/// Signed and unsigned Int sorts are compatible even though they are not same
static bool areCompatible(Sort const& s1, Sort const& s2)
{
return s1.kind == Kind::Int ? s1.kind == s2.kind : s1 == s2;
}
/// Manual constructors, should only be used by SolverInterface and this class itself.
Expression(std::string _name, std::vector<Expression> _arguments, Kind _kind):
Expression(std::move(_name), std::move(_arguments), std::make_shared<Sort>(_kind)) {}
explicit Expression(std::string _name, Kind _kind):
Expression(std::move(_name), std::vector<Expression>{}, _kind) {}
Expression(std::string _name, Expression _arg, Kind _kind):
Expression(std::move(_name), std::vector<Expression>{std::move(_arg)}, _kind) {}
Expression(std::string _name, Expression _arg1, Expression _arg2, Kind _kind):
Expression(std::move(_name), std::vector<Expression>{std::move(_arg1), std::move(_arg2)}, _kind) {}
};
DEV_SIMPLE_EXCEPTION(SolverError);
class SolverInterface
{
public:
SolverInterface() = default;
virtual ~SolverInterface() = default;
virtual void declareVariable(std::string const& _name, SortPointer const& _sort) = 0;
Expression newVariable(std::string _name, SortPointer const& _sort)
{
// Subclasses should do something here
smtAssert(_sort, "");
declareVariable(_name, _sort);
return Expression(std::move(_name), {}, _sort);
}
/// @returns how many SMT solvers this interface has.
virtual size_t solvers() { return 1; }
};
}
| 16,185
|
C++
|
.h
| 477
| 31.197065
| 121
| 0.683447
|
ethereum/solidity
| 23,062
| 5,715
| 501
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,668
|
CHCSolverInterface.h
|
ethereum_solidity/libsmtutil/CHCSolverInterface.h
|
/*
This file is part of solidity.
solidity is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
solidity is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with solidity. If not, see <http://www.gnu.org/licenses/>.
*/
// SPDX-License-Identifier: GPL-3.0
/**
* Interface for constrained Horn solvers.
*/
#pragma once
#include <libsmtutil/SolverInterface.h>
#include <map>
#include <vector>
namespace solidity::smtutil
{
class CHCSolverInterface : public SolverInterface
{
public:
CHCSolverInterface(std::optional<unsigned> _queryTimeout = {}): m_queryTimeout(_queryTimeout) {}
/// Takes a function declaration as a relation.
virtual void registerRelation(Expression const& _expr) = 0;
/// Takes an implication and adds as rule.
/// Needs to bound all vars as universally quantified.
virtual void addRule(Expression const& _expr, std::string const& _name) = 0;
using CexNode = Expression;
struct CexGraph
{
std::map<unsigned, CexNode> nodes;
std::map<unsigned, std::vector<unsigned>> edges;
};
struct QueryResult
{
CheckResult answer;
Expression invariant;
CexGraph cex;
};
/// Takes a function application _expr and checks for reachability.
/// @returns solving result, an invariant, and counterexample graph, if possible.
virtual QueryResult query(Expression const& _expr) = 0;
protected:
std::optional<unsigned> m_queryTimeout;
};
}
| 1,806
|
C++
|
.h
| 51
| 33.333333
| 97
| 0.775287
|
ethereum/solidity
| 23,062
| 5,715
| 501
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,669
|
SMTPortfolio.h
|
ethereum_solidity/libsmtutil/SMTPortfolio.h
|
/*
This file is part of solidity.
solidity is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
solidity is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with solidity. If not, see <http://www.gnu.org/licenses/>.
*/
// SPDX-License-Identifier: GPL-3.0
#pragma once
#include <libsmtutil/BMCSolverInterface.h>
#include <libsolidity/interface/ReadFile.h>
#include <libsolutil/FixedHash.h>
#include <map>
#include <vector>
namespace solidity::smtutil
{
/**
* The SMTPortfolio wraps all available solvers within a single interface,
* propagating the functionalities to all solvers.
* It also checks whether different solvers give conflicting answers
* to SMT queries.
*/
class SMTPortfolio: public BMCSolverInterface
{
public:
/// Noncopyable.
SMTPortfolio(SMTPortfolio const&) = delete;
SMTPortfolio& operator=(SMTPortfolio const&) = delete;
SMTPortfolio(std::vector<std::unique_ptr<BMCSolverInterface>> solvers, std::optional<unsigned> _queryTimeout);
void reset() override;
void push() override;
void pop() override;
void declareVariable(std::string const&, SortPointer const&) override;
void addAssertion(Expression const& _expr) override;
std::pair<CheckResult, std::vector<std::string>> check(std::vector<Expression> const& _expressionsToEvaluate) override;
std::vector<std::string> unhandledQueries() override;
size_t solvers() override { return m_solvers.size(); }
std::string dumpQuery(std::vector<Expression> const& _expressionsToEvaluate);
private:
static bool solverAnswered(CheckResult result);
std::vector<std::unique_ptr<BMCSolverInterface>> m_solvers;
std::vector<Expression> m_assertions;
};
}
| 2,078
|
C++
|
.h
| 50
| 39.5
| 120
| 0.791729
|
ethereum/solidity
| 23,062
| 5,715
| 501
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,670
|
BMCSolverInterface.h
|
ethereum_solidity/libsmtutil/BMCSolverInterface.h
|
/*
This file is part of solidity.
solidity is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
solidity is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with solidity. If not, see <http://www.gnu.org/licenses/>.
*/
// SPDX-License-Identifier: GPL-3.0
#pragma once
#include <libsmtutil/SolverInterface.h>
namespace solidity::smtutil
{
class BMCSolverInterface : public SolverInterface
{
public:
explicit BMCSolverInterface(std::optional<unsigned> _queryTimeout = {}): m_queryTimeout(_queryTimeout) {}
~BMCSolverInterface() override = default;
virtual void reset() = 0;
virtual void push() = 0;
virtual void pop() = 0;
virtual void addAssertion(Expression const& _expr) = 0;
/// Checks for satisfiability, evaluates the expressions if a model
/// is available. Throws SMTSolverError on error.
virtual std::pair<CheckResult, std::vector<std::string>>
check(std::vector<Expression> const& _expressionsToEvaluate) = 0;
/// @returns a list of queries that the system was not able to respond to.
virtual std::vector<std::string> unhandledQueries() { return {}; }
protected:
std::optional<unsigned> m_queryTimeout;
};
}
| 1,570
|
C++
|
.h
| 37
| 40.405405
| 106
| 0.776169
|
ethereum/solidity
| 23,062
| 5,715
| 501
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,673
|
Assembly.h
|
ethereum_solidity/libevmasm/Assembly.h
|
/*
This file is part of solidity.
solidity is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
solidity is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with solidity. If not, see <http://www.gnu.org/licenses/>.
*/
// SPDX-License-Identifier: GPL-3.0
#pragma once
#include <libevmasm/Instruction.h>
#include <liblangutil/SourceLocation.h>
#include <libevmasm/AssemblyItem.h>
#include <libevmasm/LinkerObject.h>
#include <libevmasm/Exceptions.h>
#include <liblangutil/DebugInfoSelection.h>
#include <liblangutil/EVMVersion.h>
#include <libsolutil/Common.h>
#include <libsolutil/Assertions.h>
#include <libsolutil/Keccak256.h>
#include <libsolutil/JSON.h>
#include <libsolidity/interface/OptimiserSettings.h>
#include <iostream>
#include <sstream>
#include <memory>
#include <map>
#include <utility>
namespace solidity::evmasm
{
using AssemblyPointer = std::shared_ptr<Assembly>;
class Assembly
{
using TagRefs = std::map<size_t, std::pair<size_t, size_t>>;
using DataRefs = std::multimap<util::h256, unsigned>;
using SubAssemblyRefs = std::multimap<size_t, size_t>;
using ProgramSizeRefs = std::vector<unsigned>;
using LinkRef = std::pair<size_t, std::string>;
public:
Assembly(langutil::EVMVersion _evmVersion, bool _creation, std::optional<uint8_t> _eofVersion, std::string _name):
m_evmVersion(_evmVersion),
m_creation(_creation),
m_eofVersion(_eofVersion),
m_name(std::move(_name))
{
// Code section number 0 has to be non-returning.
m_codeSections.emplace_back(CodeSection{0, 0x80, {}});
}
std::optional<uint8_t> eofVersion() const { return m_eofVersion; }
AssemblyItem newTag() { assertThrow(m_usedTags < 0xffffffff, AssemblyException, ""); return AssemblyItem(Tag, m_usedTags++); }
AssemblyItem newPushTag() { assertThrow(m_usedTags < 0xffffffff, AssemblyException, ""); return AssemblyItem(PushTag, m_usedTags++); }
/// Returns a tag identified by the given name. Creates it if it does not yet exist.
AssemblyItem namedTag(std::string const& _name, size_t _params, size_t _returns, std::optional<uint64_t> _sourceID);
AssemblyItem newData(bytes const& _data) { util::h256 h(util::keccak256(util::asString(_data))); m_data[h] = _data; return AssemblyItem(PushData, h); }
bytes const& data(util::h256 const& _i) const { return m_data.at(_i); }
AssemblyItem newSub(AssemblyPointer const& _sub) { m_subs.push_back(_sub); return AssemblyItem(PushSub, m_subs.size() - 1); }
Assembly const& sub(size_t _sub) const { return *m_subs.at(_sub); }
Assembly& sub(size_t _sub) { return *m_subs.at(_sub); }
size_t numSubs() const { return m_subs.size(); }
AssemblyItem newPushSubSize(u256 const& _subId) { return AssemblyItem(PushSubSize, _subId); }
AssemblyItem newPushLibraryAddress(std::string const& _identifier);
AssemblyItem newPushImmutable(std::string const& _identifier);
AssemblyItem newImmutableAssignment(std::string const& _identifier);
AssemblyItem newAuxDataLoadN(size_t offset);
AssemblyItem const& append(AssemblyItem _i);
AssemblyItem const& append(bytes const& _data) { return append(newData(_data)); }
template <class T> Assembly& operator<<(T const& _d) { append(_d); return *this; }
/// Pushes the final size of the current assembly itself. Use this when the code is modified
/// after compilation and CODESIZE is not an option.
void appendProgramSize() { append(AssemblyItem(PushProgramSize)); }
void appendLibraryAddress(std::string const& _identifier) { append(newPushLibraryAddress(_identifier)); }
void appendImmutable(std::string const& _identifier) { append(newPushImmutable(_identifier)); }
void appendImmutableAssignment(std::string const& _identifier) { append(newImmutableAssignment(_identifier)); }
void appendAuxDataLoadN(uint16_t _offset) { append(newAuxDataLoadN(_offset));}
void appendVerbatim(bytes _data, size_t _arguments, size_t _returnVariables)
{
append(AssemblyItem(std::move(_data), _arguments, _returnVariables));
}
AssemblyItem appendEOFCreate(ContainerID _containerId)
{
solAssert(_containerId < m_subs.size(), "EOF Create of undefined container.");
return append(AssemblyItem::eofCreate(_containerId));
}
AssemblyItem appendReturnContract(ContainerID _containerId)
{
solAssert(_containerId < m_subs.size(), "Return undefined container ID.");
return append(AssemblyItem::returnContract(_containerId));
}
AssemblyItem appendJump() { auto ret = append(newPushTag()); append(Instruction::JUMP); return ret; }
AssemblyItem appendJumpI() { auto ret = append(newPushTag()); append(Instruction::JUMPI); return ret; }
AssemblyItem appendJump(AssemblyItem const& _tag) { auto ret = append(_tag.pushTag()); append(Instruction::JUMP); return ret; }
AssemblyItem appendJumpI(AssemblyItem const& _tag) { auto ret = append(_tag.pushTag()); append(Instruction::JUMPI); return ret; }
/// Adds a subroutine to the code (in the data section) and pushes its size (via a tag)
/// on the stack. @returns the pushsub assembly item.
AssemblyItem appendSubroutine(AssemblyPointer const& _assembly) { auto sub = newSub(_assembly); append(newPushSubSize(size_t(sub.data()))); return sub; }
void pushSubroutineSize(size_t _subRoutine) { append(newPushSubSize(_subRoutine)); }
/// Pushes the offset of the subroutine.
void pushSubroutineOffset(size_t _subRoutine) { append(AssemblyItem(PushSub, _subRoutine)); }
/// Appends @a _data literally to the very end of the bytecode.
void appendToAuxiliaryData(bytes const& _data) { m_auxiliaryData += _data; }
int deposit() const { return m_deposit; }
void adjustDeposit(int _adjustment) { m_deposit += _adjustment; assertThrow(m_deposit >= 0, InvalidDeposit, ""); }
void setDeposit(int _deposit) { m_deposit = _deposit; assertThrow(m_deposit >= 0, InvalidDeposit, ""); }
std::string const& name() const { return m_name; }
/// Changes the source location used for each appended item.
void setSourceLocation(langutil::SourceLocation const& _location) { m_currentSourceLocation = _location; }
langutil::SourceLocation const& currentSourceLocation() const { return m_currentSourceLocation; }
langutil::EVMVersion const& evmVersion() const { return m_evmVersion; }
/// Assembles the assembly into bytecode. The assembly should not be modified after this call, since the assembled version is cached.
LinkerObject const& assemble() const;
struct OptimiserSettings
{
bool runInliner = false;
bool runJumpdestRemover = false;
bool runPeephole = false;
bool runDeduplicate = false;
bool runCSE = false;
bool runConstantOptimiser = false;
langutil::EVMVersion evmVersion;
/// This specifies an estimate on how often each opcode in this assembly will be executed,
/// i.e. use a small value to optimise for size and a large value to optimise for runtime gas usage.
size_t expectedExecutionsPerDeployment = frontend::OptimiserSettings{}.expectedExecutionsPerDeployment;
static OptimiserSettings translateSettings(frontend::OptimiserSettings const& _settings, langutil::EVMVersion const& _evmVersion);
};
/// Modify and return the current assembly such that creation and execution gas usage
/// is optimised according to the settings in @a _settings.
Assembly& optimise(OptimiserSettings const& _settings);
/// Create a text representation of the assembly.
std::string assemblyString(
langutil::DebugInfoSelection const& _debugInfoSelection = langutil::DebugInfoSelection::Default(),
StringMap const& _sourceCodes = StringMap()
) const;
void assemblyStream(
std::ostream& _out,
langutil::DebugInfoSelection const& _debugInfoSelection = langutil::DebugInfoSelection::Default(),
std::string const& _prefix = "",
StringMap const& _sourceCodes = StringMap()
) const;
/// Create a JSON representation of the assembly.
Json assemblyJSON(std::map<std::string, unsigned> const& _sourceIndices, bool _includeSourceList = true) const;
/// Constructs an @a Assembly from the serialized JSON representation.
/// @param _json JSON object containing assembly in the format produced by assemblyJSON().
/// @param _sourceList List of source files the assembly was built from. When the JSON represents
/// the root assembly, the function will read it from the 'sourceList' field and the parameter
/// must be empty. It is only used to pass the list down to recursive calls.
/// @param _level Nesting level of the current assembly in the assembly tree. The root is
/// at level 0 and the value increases down the tree. Necessary to distinguish between creation
/// and deployed objects.
/// @returns Created @a Assembly and the source list read from the 'sourceList' field of the root
/// assembly or an empty list (in recursive calls).
static std::pair<std::shared_ptr<Assembly>, std::vector<std::string>> fromJSON(
Json const& _json,
std::vector<std::string> const& _sourceList = {},
size_t _level = 0,
std::optional<uint8_t> _eofVersion = std::nullopt
);
/// Mark this assembly as invalid. Calling ``assemble`` on it will throw.
void markAsInvalid() { m_invalid = true; }
std::vector<size_t> decodeSubPath(size_t _subObjectId) const;
size_t encodeSubPath(std::vector<size_t> const& _subPath);
bool isCreation() const { return m_creation; }
struct CodeSection
{
uint8_t inputs = 0;
uint8_t outputs = 0;
AssemblyItems items{};
};
std::vector<CodeSection>& codeSections()
{
return m_codeSections;
}
std::vector<CodeSection> const& codeSections() const
{
return m_codeSections;
}
protected:
/// Does the same operations as @a optimise, but should only be applied to a sub and
/// returns the replaced tags. Also takes an argument containing the tags of this assembly
/// that are referenced in a super-assembly.
std::map<u256, u256> const& optimiseInternal(OptimiserSettings const& _settings, std::set<size_t> _tagsReferencedFromOutside);
/// For EOF and legacy it calculates approximate size of "pure" code without data.
unsigned codeSize(unsigned subTagSize) const;
/// Add all assembly items from given JSON array. This function imports the items by iterating through
/// the code array. This method only works on clean Assembly objects that don't have any items defined yet.
/// @param _json JSON array that contains assembly items (e.g. json['.code'])
/// @param _sourceList List of source names.
void importAssemblyItemsFromJSON(Json const& _code, std::vector<std::string> const& _sourceList);
/// Creates an AssemblyItem from a given JSON representation.
/// @param _json JSON object that consists a single assembly item
/// @param _sourceList List of source names.
/// @returns AssemblyItem of _json argument.
AssemblyItem createAssemblyItemFromJSON(Json const& _json, std::vector<std::string> const& _sourceList);
private:
bool m_invalid = false;
Assembly const* subAssemblyById(size_t _subId) const;
void encodeAllPossibleSubPathsInAssemblyTree(std::vector<size_t> _pathFromRoot = {}, std::vector<Assembly*> _assembliesOnPath = {});
std::shared_ptr<std::string const> sharedSourceName(std::string const& _name) const;
/// Returns EOF header bytecode | code section sizes offsets | data section size offset
std::tuple<bytes, std::vector<size_t>, size_t> createEOFHeader(std::set<uint16_t> const& _referencedSubIds) const;
LinkerObject const& assembleLegacy() const;
LinkerObject const& assembleEOF() const;
/// Returns map from m_subs to an index of subcontainer in the final EOF bytecode
std::map<uint16_t, uint16_t> findReferencedContainers() const;
/// Returns max AuxDataLoadN offset for the assembly.
std::optional<uint16_t> findMaxAuxDataLoadNOffset() const;
/// Assemble bytecode for AssemblyItem type.
[[nodiscard]] bytes assembleOperation(AssemblyItem const& _item) const;
[[nodiscard]] bytes assemblePush(AssemblyItem const& _item) const;
[[nodiscard]] std::pair<bytes, Assembly::LinkRef> assemblePushLibraryAddress(AssemblyItem const& _item, size_t _pos) const;
[[nodiscard]] bytes assembleVerbatimBytecode(AssemblyItem const& item) const;
[[nodiscard]] bytes assemblePushDeployTimeAddress() const;
[[nodiscard]] bytes assembleTag(AssemblyItem const& _item, size_t _pos, bool _addJumpDest) const;
protected:
/// 0 is reserved for exception
unsigned m_usedTags = 1;
struct NamedTagInfo
{
size_t id;
std::optional<size_t> sourceID;
size_t params;
size_t returns;
};
std::map<std::string, NamedTagInfo> m_namedTags;
std::map<util::h256, bytes> m_data;
/// Data that is appended to the very end of the contract.
bytes m_auxiliaryData;
std::vector<std::shared_ptr<Assembly>> m_subs;
std::vector<CodeSection> m_codeSections;
uint16_t m_currentCodeSection = 0;
std::map<util::h256, std::string> m_strings;
std::map<util::h256, std::string> m_libraries; ///< Identifiers of libraries to be linked.
std::map<util::h256, std::string> m_immutables; ///< Identifiers of immutables.
/// Map from a vector representing a path to a particular sub assembly to sub assembly id.
/// This map is used only for sub-assemblies which are not direct sub-assemblies (where path is having more than one value).
std::map<std::vector<size_t>, size_t> m_subPaths;
/// Contains the tag replacements relevant for super-assemblies.
/// If set, it means the optimizer has run and we will not run it again.
std::optional<std::map<u256, u256>> m_tagReplacements;
mutable LinkerObject m_assembledObject;
mutable std::vector<size_t> m_tagPositionsInBytecode;
langutil::EVMVersion m_evmVersion;
int m_deposit = 0;
/// True, if the assembly contains contract creation code.
bool const m_creation = false;
std::optional<uint8_t> m_eofVersion;
/// Internal name of the assembly object, only used with the Yul backend
/// currently
std::string m_name;
langutil::SourceLocation m_currentSourceLocation;
// FIXME: This being static means that the strings won't be freed when they're no longer needed
static std::map<std::string, std::shared_ptr<std::string const>> s_sharedSourceNames;
public:
size_t m_currentModifierDepth = 0;
};
inline std::ostream& operator<<(std::ostream& _out, Assembly const& _a)
{
_a.assemblyStream(_out);
return _out;
}
}
| 14,519
|
C++
|
.h
| 264
| 52.746212
| 154
| 0.760814
|
ethereum/solidity
| 23,062
| 5,715
| 501
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,674
|
EVMAssemblyStack.h
|
ethereum_solidity/libevmasm/EVMAssemblyStack.h
|
/*
This file is part of solidity.
solidity is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
solidity is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with solidity. If not, see <http://www.gnu.org/licenses/>.
*/
// SPDX-License-Identifier: GPL-3.0
#pragma once
#include <libevmasm/AbstractAssemblyStack.h>
#include <libevmasm/Assembly.h>
#include <libevmasm/LinkerObject.h>
#include <libsolutil/JSON.h>
#include <map>
#include <string>
namespace solidity::evmasm
{
class EVMAssemblyStack: public AbstractAssemblyStack
{
public:
explicit EVMAssemblyStack(langutil::EVMVersion _evmVersion, std::optional<uint8_t> _eofVersion):
m_evmVersion(_evmVersion), m_eofVersion(_eofVersion) {}
/// Runs parsing and analysis steps.
/// Multiple calls overwrite the previous state.
/// @throws AssemblyImportException, if JSON could not be validated.
void parseAndAnalyze(std::string const& _sourceName, std::string const& _source);
/// Runs analysis steps.
/// Multiple calls overwrite the previous state.
/// @throws AssemblyImportException, if JSON could not be validated.
void analyze(std::string const& _sourceName, Json const& _assemblyJson);
void assemble();
std::string const& name() const { return m_name; }
virtual LinkerObject const& object(std::string const& _contractName) const override;
virtual LinkerObject const& runtimeObject(std::string const& _contractName) const override;
std::shared_ptr<evmasm::Assembly> const& evmAssembly() const { return m_evmAssembly; }
std::shared_ptr<evmasm::Assembly> const& evmRuntimeAssembly() const { return m_evmRuntimeAssembly; }
virtual std::string const* sourceMapping(std::string const& _contractName) const override;
virtual std::string const* runtimeSourceMapping(std::string const& _contractName) const override;
virtual Json assemblyJSON(std::string const& _contractName) const override;
virtual std::string assemblyString(std::string const& _contractName, StringMap const& _sourceCodes) const override;
virtual std::string const filesystemFriendlyName(std::string const& _contractName) const override;
virtual std::vector<std::string> contractNames() const override { return {m_name}; }
virtual std::vector<std::string> sourceNames() const override;
std::map<std::string, unsigned> sourceIndices() const;
virtual bool compilationSuccessful() const override { return m_evmAssembly != nullptr; }
void selectDebugInfo(langutil::DebugInfoSelection _debugInfoSelection)
{
m_debugInfoSelection = _debugInfoSelection;
}
private:
langutil::EVMVersion m_evmVersion;
std::optional<uint8_t> m_eofVersion;
std::string m_name;
std::shared_ptr<evmasm::Assembly> m_evmAssembly;
std::shared_ptr<evmasm::Assembly> m_evmRuntimeAssembly;
evmasm::LinkerObject m_object; ///< Deployment object (includes the runtime sub-object).
evmasm::LinkerObject m_runtimeObject; ///< Runtime object.
std::vector<std::string> m_sourceList;
langutil::DebugInfoSelection m_debugInfoSelection = langutil::DebugInfoSelection::Default();
std::string m_sourceMapping;
std::string m_runtimeSourceMapping;
};
} // namespace solidity::evmasm
| 3,543
|
C++
|
.h
| 69
| 49.246377
| 116
| 0.789336
|
ethereum/solidity
| 23,062
| 5,715
| 501
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,675
|
Instruction.h
|
ethereum_solidity/libevmasm/Instruction.h
|
/*
This file is part of solidity.
solidity is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
solidity is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with solidity. If not, see <http://www.gnu.org/licenses/>.
*/
// SPDX-License-Identifier: GPL-3.0
/** @file Instruction.h
* @author Gav Wood <i@gavwood.com>
* @date 2014
*/
#pragma once
#include <libevmasm/Exceptions.h>
#include <libsolutil/Common.h>
#include <libsolutil/Assertions.h>
#include <liblangutil/EVMVersion.h>
namespace solidity::evmasm
{
/// Virtual machine bytecode instruction.
enum class Instruction: uint8_t
{
STOP = 0x00, ///< halts execution
ADD, ///< addition operation
MUL, ///< multiplication operation
SUB, ///< subtraction operation
DIV, ///< integer division operation
SDIV, ///< signed integer division operation
MOD, ///< modulo remainder operation
SMOD, ///< signed modulo remainder operation
ADDMOD, ///< unsigned modular addition
MULMOD, ///< unsigned modular multiplication
EXP, ///< exponential operation
SIGNEXTEND, ///< extend length of signed integer
LT = 0x10, ///< less-than comparison
GT, ///< greater-than comparison
SLT, ///< signed less-than comparison
SGT, ///< signed greater-than comparison
EQ, ///< equality comparison
ISZERO, ///< simple not operator
AND, ///< bitwise AND operation
OR, ///< bitwise OR operation
XOR, ///< bitwise XOR operation
NOT, ///< bitwise NOT operation
BYTE, ///< retrieve single byte from word
SHL, ///< bitwise SHL operation
SHR, ///< bitwise SHR operation
SAR, ///< bitwise SAR operation
KECCAK256 = 0x20, ///< compute KECCAK-256 hash
ADDRESS = 0x30, ///< get address of currently executing account
BALANCE, ///< get balance of the given account
ORIGIN, ///< get execution origination address
CALLER, ///< get caller address
CALLVALUE, ///< get deposited value by the instruction/transaction responsible for this execution
CALLDATALOAD, ///< get input data of current environment
CALLDATASIZE, ///< get size of input data in current environment
CALLDATACOPY, ///< copy input data in current environment to memory
CODESIZE, ///< get size of code running in current environment
CODECOPY, ///< copy code running in current environment to memory
GASPRICE, ///< get price of gas in current environment
EXTCODESIZE, ///< get external code size (from another contract)
EXTCODECOPY, ///< copy external code (from another contract)
RETURNDATASIZE = 0x3d, ///< get size of return data buffer
RETURNDATACOPY = 0x3e, ///< copy return data in current environment to memory
EXTCODEHASH = 0x3f, ///< get external code hash (from another contract)
BLOCKHASH = 0x40, ///< get hash of most recent complete block
COINBASE, ///< get the block's coinbase address
TIMESTAMP, ///< get the block's timestamp
NUMBER, ///< get the block's number
PREVRANDAO, ///< get randomness provided by the beacon chain
GASLIMIT, ///< get the block's gas limit
CHAINID, ///< get the config's chainid param
SELFBALANCE, ///< get balance of the current account
BASEFEE, ///< get the block's basefee
BLOBHASH = 0x49, ///< get a versioned hash of one of the blobs associated with the transaction
BLOBBASEFEE = 0x4a, ///< get the block's blob basefee
POP = 0x50, ///< remove item from stack
MLOAD, ///< load word from memory
MSTORE, ///< save word to memory
MSTORE8, ///< save byte to memory
SLOAD, ///< load word from storage
SSTORE, ///< save word to storage
JUMP, ///< alter the program counter
JUMPI, ///< conditionally alter the program counter
PC, ///< get the program counter
MSIZE, ///< get the size of active memory
GAS, ///< get the amount of available gas
JUMPDEST, ///< set a potential jump destination
TLOAD = 0x5c, ///< load word from transient storage
TSTORE = 0x5d, ///< save word to transient storage
MCOPY = 0x5e, ///< copy between memory areas
PUSH0 = 0x5f, ///< place the value 0 on stack
PUSH1 = 0x60, ///< place 1 byte item on stack
PUSH2, ///< place 2 byte item on stack
PUSH3, ///< place 3 byte item on stack
PUSH4, ///< place 4 byte item on stack
PUSH5, ///< place 5 byte item on stack
PUSH6, ///< place 6 byte item on stack
PUSH7, ///< place 7 byte item on stack
PUSH8, ///< place 8 byte item on stack
PUSH9, ///< place 9 byte item on stack
PUSH10, ///< place 10 byte item on stack
PUSH11, ///< place 11 byte item on stack
PUSH12, ///< place 12 byte item on stack
PUSH13, ///< place 13 byte item on stack
PUSH14, ///< place 14 byte item on stack
PUSH15, ///< place 15 byte item on stack
PUSH16, ///< place 16 byte item on stack
PUSH17, ///< place 17 byte item on stack
PUSH18, ///< place 18 byte item on stack
PUSH19, ///< place 19 byte item on stack
PUSH20, ///< place 20 byte item on stack
PUSH21, ///< place 21 byte item on stack
PUSH22, ///< place 22 byte item on stack
PUSH23, ///< place 23 byte item on stack
PUSH24, ///< place 24 byte item on stack
PUSH25, ///< place 25 byte item on stack
PUSH26, ///< place 26 byte item on stack
PUSH27, ///< place 27 byte item on stack
PUSH28, ///< place 28 byte item on stack
PUSH29, ///< place 29 byte item on stack
PUSH30, ///< place 30 byte item on stack
PUSH31, ///< place 31 byte item on stack
PUSH32, ///< place 32 byte item on stack
DUP1 = 0x80, ///< copies the highest item in the stack to the top of the stack
DUP2, ///< copies the second highest item in the stack to the top of the stack
DUP3, ///< copies the third highest item in the stack to the top of the stack
DUP4, ///< copies the 4th highest item in the stack to the top of the stack
DUP5, ///< copies the 5th highest item in the stack to the top of the stack
DUP6, ///< copies the 6th highest item in the stack to the top of the stack
DUP7, ///< copies the 7th highest item in the stack to the top of the stack
DUP8, ///< copies the 8th highest item in the stack to the top of the stack
DUP9, ///< copies the 9th highest item in the stack to the top of the stack
DUP10, ///< copies the 10th highest item in the stack to the top of the stack
DUP11, ///< copies the 11th highest item in the stack to the top of the stack
DUP12, ///< copies the 12th highest item in the stack to the top of the stack
DUP13, ///< copies the 13th highest item in the stack to the top of the stack
DUP14, ///< copies the 14th highest item in the stack to the top of the stack
DUP15, ///< copies the 15th highest item in the stack to the top of the stack
DUP16, ///< copies the 16th highest item in the stack to the top of the stack
SWAP1 = 0x90, ///< swaps the highest and second highest value on the stack
SWAP2, ///< swaps the highest and third highest value on the stack
SWAP3, ///< swaps the highest and 4th highest value on the stack
SWAP4, ///< swaps the highest and 5th highest value on the stack
SWAP5, ///< swaps the highest and 6th highest value on the stack
SWAP6, ///< swaps the highest and 7th highest value on the stack
SWAP7, ///< swaps the highest and 8th highest value on the stack
SWAP8, ///< swaps the highest and 9th highest value on the stack
SWAP9, ///< swaps the highest and 10th highest value on the stack
SWAP10, ///< swaps the highest and 11th highest value on the stack
SWAP11, ///< swaps the highest and 12th highest value on the stack
SWAP12, ///< swaps the highest and 13th highest value on the stack
SWAP13, ///< swaps the highest and 14th highest value on the stack
SWAP14, ///< swaps the highest and 15th highest value on the stack
SWAP15, ///< swaps the highest and 16th highest value on the stack
SWAP16, ///< swaps the highest and 17th highest value on the stack
LOG0 = 0xa0, ///< Makes a log entry; no topics.
LOG1, ///< Makes a log entry; 1 topic.
LOG2, ///< Makes a log entry; 2 topics.
LOG3, ///< Makes a log entry; 3 topics.
LOG4, ///< Makes a log entry; 4 topics.
DATALOADN = 0xd1, ///< load data from EOF data section
EOFCREATE = 0xec, ///< create a new account with associated container code.
RETURNCONTRACT = 0xee, ///< return container to be deployed with axiliary data filled in.
CREATE = 0xf0, ///< create a new account with associated code
CALL, ///< message-call into an account
CALLCODE, ///< message-call with another account's code only
RETURN, ///< halt execution returning output data
DELEGATECALL, ///< like CALLCODE but keeps caller's value and sender
CREATE2 = 0xf5, ///< create new account with associated code at address `sha3(0xff + sender + salt + init code) % 2**160`
STATICCALL = 0xfa, ///< like CALL but disallow state modifications
REVERT = 0xfd, ///< halt execution, revert state and return output data
INVALID = 0xfe, ///< invalid instruction for expressing runtime errors (e.g., division-by-zero)
SELFDESTRUCT = 0xff ///< halt execution and register account for later deletion
};
/// @returns true if the instruction is of the CALL opcode family
constexpr bool isCallInstruction(Instruction _inst) noexcept
{
switch (_inst)
{
case Instruction::CALL:
case Instruction::CALLCODE:
case Instruction::DELEGATECALL:
case Instruction::STATICCALL:
return true;
default:
return false;
}
}
/// @returns true if the instruction is a PUSH
inline bool isPushInstruction(Instruction _inst)
{
return Instruction::PUSH0 <= _inst && _inst <= Instruction::PUSH32;
}
/// @returns true if the instruction is a DUP
inline bool isDupInstruction(Instruction _inst)
{
return Instruction::DUP1 <= _inst && _inst <= Instruction::DUP16;
}
/// @returns true if the instruction is a SWAP
inline bool isSwapInstruction(Instruction _inst)
{
return Instruction::SWAP1 <= _inst && _inst <= Instruction::SWAP16;
}
/// @returns true if the instruction is a LOG
inline bool isLogInstruction(Instruction _inst)
{
return Instruction::LOG0 <= _inst && _inst <= Instruction::LOG4;
}
/// @returns the number of PUSH Instruction _inst
inline unsigned getPushNumber(Instruction _inst)
{
return static_cast<uint8_t>(_inst) - unsigned(Instruction::PUSH0);
}
/// @returns the number of DUP Instruction _inst
inline unsigned getDupNumber(Instruction _inst)
{
return static_cast<uint8_t>(_inst) - unsigned(Instruction::DUP1) + 1;
}
/// @returns the number of SWAP Instruction _inst
inline unsigned getSwapNumber(Instruction _inst)
{
return static_cast<uint8_t>(_inst) - unsigned(Instruction::SWAP1) + 1;
}
/// @returns the number of LOG Instruction _inst
inline unsigned getLogNumber(Instruction _inst)
{
return static_cast<uint8_t>(_inst) - unsigned(Instruction::LOG0);
}
/// @returns the PUSH<_number> instruction
inline Instruction pushInstruction(unsigned _number)
{
assertThrow(_number <= 32, InvalidOpcode, std::string("Invalid PUSH instruction requested (") + std::to_string(_number) + ").");
return Instruction(unsigned(Instruction::PUSH0) + _number);
}
/// @returns the DUP<_number> instruction
inline Instruction dupInstruction(unsigned _number)
{
assertThrow(1 <= _number && _number <= 16, InvalidOpcode, std::string("Invalid DUP instruction requested (") + std::to_string(_number) + ").");
return Instruction(unsigned(Instruction::DUP1) + _number - 1);
}
/// @returns the SWAP<_number> instruction
inline Instruction swapInstruction(unsigned _number)
{
assertThrow(1 <= _number && _number <= 16, InvalidOpcode, std::string("Invalid SWAP instruction requested (") + std::to_string(_number) + ").");
return Instruction(unsigned(Instruction::SWAP1) + _number - 1);
}
/// @returns the LOG<_number> instruction
inline Instruction logInstruction(unsigned _number)
{
assertThrow(_number <= 4, InvalidOpcode, std::string("Invalid LOG instruction requested (") + std::to_string(_number) + ").");
return Instruction(unsigned(Instruction::LOG0) + _number);
}
/// Gas price tiers representing static cost of an instruction.
/// Opcodes whose cost is dynamic or depends on EVM version should use the `Special` tier and need
/// dedicated logic in GasMeter (especially in estimateMax()).
/// The tiers loosely follow opcode groups originally defined in the Yellow Paper.
enum class Tier
{
// NOTE: Tiers should be ordered by cost, since we sometimes perform comparisons between them.
Zero = 0, // 0, Zero
Base, // 2, Quick
VeryLow, // 3, Fastest
Low, // 5, Fast
Mid, // 8, Mid
High, // 10, Slow
BlockHash, // 20
WarmAccess, // 100, Warm Access
Special, // multiparam or otherwise special
Invalid, // Invalid.
};
/// Information structure for a particular instruction.
struct InstructionInfo
{
std::string name; ///< The name of the instruction.
int additional; ///< Additional items required in memory for this instructions (only for PUSH).
int args; ///< Number of items required on the stack for this instruction (and, for the purposes of ret, the number taken from the stack).
int ret; ///< Number of items placed (back) on the stack by this instruction, assuming args items were removed.
bool sideEffects; ///< false if the only effect on the execution environment (apart from gas usage) is a change to a topmost segment of the stack
Tier gasPriceTier; ///< Tier for gas pricing.
};
/// Information on all the instructions.
InstructionInfo instructionInfo(Instruction _inst, langutil::EVMVersion _evmVersion);
/// check whether instructions exists.
bool isValidInstruction(Instruction _inst);
/// Convert from string mnemonic to Instruction type.
extern const std::map<std::string, Instruction> c_instructions;
}
| 16,326
|
C++
|
.h
| 294
| 53.663265
| 149
| 0.615067
|
ethereum/solidity
| 23,062
| 5,715
| 501
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,677
|
AbstractAssemblyStack.h
|
ethereum_solidity/libevmasm/AbstractAssemblyStack.h
|
/*
This file is part of solidity.
solidity is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
solidity is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with solidity. If not, see <http://www.gnu.org/licenses/>.
*/
// SPDX-License-Identifier: GPL-3.0
#pragma once
#include <libevmasm/LinkerObject.h>
#include <libsolutil/Common.h>
#include <libsolutil/JSON.h>
#include <string>
#include <vector>
namespace solidity::evmasm
{
class AbstractAssemblyStack
{
public:
virtual ~AbstractAssemblyStack() {}
virtual LinkerObject const& object(std::string const& _contractName) const = 0;
virtual LinkerObject const& runtimeObject(std::string const& _contractName) const = 0;
virtual std::string const* sourceMapping(std::string const& _contractName) const = 0;
virtual std::string const* runtimeSourceMapping(std::string const& _contractName) const = 0;
virtual Json assemblyJSON(std::string const& _contractName) const = 0;
virtual std::string assemblyString(std::string const& _contractName, StringMap const& _sourceCodes) const = 0;
virtual std::string const filesystemFriendlyName(std::string const& _contractName) const = 0;
virtual std::vector<std::string> contractNames() const = 0;
virtual std::vector<std::string> sourceNames() const = 0;
virtual bool compilationSuccessful() const = 0;
};
} // namespace solidity::evmasm
| 1,780
|
C++
|
.h
| 38
| 44.842105
| 111
| 0.785052
|
ethereum/solidity
| 23,062
| 5,715
| 501
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,682
|
Disassemble.h
|
ethereum_solidity/libevmasm/Disassemble.h
|
/*
This file is part of solidity.
solidity is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
solidity is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with solidity. If not, see <http://www.gnu.org/licenses/>.
*/
// SPDX-License-Identifier: GPL-3.0
#pragma once
#include <libsolutil/Common.h>
#include <libsolutil/Numeric.h>
#include <libevmasm/Instruction.h>
#include <functional>
#include <string>
namespace solidity::evmasm
{
/// Iterate through EVM code and call a function on each instruction.
void eachInstruction(bytes const& _mem, langutil::EVMVersion _evmVersion, std::function<void(Instruction, u256 const&)> const& _onInstruction);
/// Convert from EVM code to simple EVM assembly language.
std::string disassemble(bytes const& _mem, langutil::EVMVersion _evmVersion, std::string const& _delimiter = " ");
}
| 1,263
|
C++
|
.h
| 27
| 44.962963
| 143
| 0.786122
|
ethereum/solidity
| 23,062
| 5,715
| 501
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,687
|
Inliner.h
|
ethereum_solidity/libevmasm/Inliner.h
|
/*
This file is part of solidity.
solidity is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
solidity is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with solidity. If not, see <http://www.gnu.org/licenses/>.
*/
// SPDX-License-Identifier: GPL-3.0
/**
* @file Inliner.h
* Inlines small code snippets by replacing JUMP with a copy of the code jumped to.
*/
#pragma once
#include <libsolutil/Common.h>
#include <libevmasm/Assembly.h>
#include <libevmasm/AssemblyItem.h>
#include <liblangutil/EVMVersion.h>
#include <range/v3/view/span.hpp>
#include <map>
#include <set>
#include <vector>
namespace solidity::evmasm
{
class Inliner
{
public:
explicit Inliner(
AssemblyItems& _items,
std::set<size_t> const& _tagsReferencedFromOutside,
size_t _runs,
bool _isCreation,
langutil::EVMVersion _evmVersion
):
m_items(_items),
m_tagsReferencedFromOutside(_tagsReferencedFromOutside),
m_runs(_runs),
m_isCreation(_isCreation),
m_evmVersion(_evmVersion)
{
}
virtual ~Inliner() = default;
void optimise();
private:
struct InlinableBlock
{
ranges::span<AssemblyItem const> items;
uint64_t pushTagCount = 0;
};
/// @returns the exit item for the block to be inlined, if a particular jump to it should be inlined, otherwise nullopt.
std::optional<AssemblyItem> shouldInline(size_t _tag, AssemblyItem const& _jump, InlinableBlock const& _block) const;
/// @returns true, if the full function at tag @a _tag with body @a _block that is referenced @a _pushTagCount times
/// should be inlined, false otherwise. @a _block should start at the first instruction after the function entry tag
/// up to and including the return jump.
bool shouldInlineFullFunctionBody(size_t _tag, ranges::span<AssemblyItem const> _block, uint64_t _pushTagCount) const;
/// @returns true, if the @a _items at @a _tag are a potential candidate for inlining.
bool isInlineCandidate(size_t _tag, ranges::span<AssemblyItem const> _items) const;
/// @returns a map from tags that can potentially be inlined to the inlinable item range behind that tag and the
/// number of times the tag in question was referenced.
std::map<size_t, InlinableBlock> determineInlinableBlocks(AssemblyItems const& _items) const;
AssemblyItems& m_items;
std::set<size_t> const& m_tagsReferencedFromOutside;
size_t const m_runs = Assembly::OptimiserSettings{}.expectedExecutionsPerDeployment;
bool const m_isCreation = false;
langutil::EVMVersion const m_evmVersion;
};
}
| 2,897
|
C++
|
.h
| 72
| 38.263889
| 121
| 0.775329
|
ethereum/solidity
| 23,062
| 5,715
| 501
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,691
|
AssemblyItem.h
|
ethereum_solidity/libevmasm/AssemblyItem.h
|
/*
This file is part of solidity.
solidity is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
solidity is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with solidity. If not, see <http://www.gnu.org/licenses/>.
*/
// SPDX-License-Identifier: GPL-3.0
/** @file AssemblyItem.h
* @author Gav Wood <i@gavwood.com>
* @date 2014
*/
#pragma once
#include <libevmasm/Instruction.h>
#include <libevmasm/Exceptions.h>
#include <liblangutil/DebugData.h>
#include <liblangutil/Exceptions.h>
#include <libsolutil/Common.h>
#include <libsolutil/Numeric.h>
#include <libsolutil/Assertions.h>
#include <optional>
#include <iostream>
#include <sstream>
namespace solidity::evmasm
{
enum AssemblyItemType
{
UndefinedItem,
Operation,
Push,
PushTag,
PushSub,
PushSubSize,
PushProgramSize,
Tag,
PushData,
PushLibraryAddress, ///< Push a currently unknown address of another (library) contract.
PushDeployTimeAddress, ///< Push an address to be filled at deploy time. Should not be touched by the optimizer.
PushImmutable, ///< Push the currently unknown value of an immutable variable. The actual value will be filled in by the constructor.
AssignImmutable, ///< Assigns the current value on the stack to an immutable variable. Only valid during creation code.
/// Loads 32 bytes from static auxiliary data of EOF data section. The offset does *not* have to be always from the beginning
/// of the data EOF section. More details here: https://github.com/ipsilon/eof/blob/main/spec/eof.md#data-section-lifecycle
AuxDataLoadN,
EOFCreate, ///< Creates new contract using subcontainer as initcode
ReturnContract, ///< Returns new container (with auxiliary data filled in) to be deployed
VerbatimBytecode ///< Contains data that is inserted into the bytecode code section without modification.
};
enum class Precision { Precise , Approximate };
class Assembly;
class AssemblyItem;
using AssemblyItems = std::vector<AssemblyItem>;
using ContainerID = uint8_t;
class AssemblyItem
{
public:
enum class JumpType { Ordinary, IntoFunction, OutOfFunction };
AssemblyItem(u256 _push, langutil::DebugData::ConstPtr _debugData = langutil::DebugData::create()):
AssemblyItem(Push, std::move(_push), std::move(_debugData)) { }
AssemblyItem(Instruction _i, langutil::DebugData::ConstPtr _debugData = langutil::DebugData::create()):
m_type(Operation),
m_instruction(_i),
m_debugData(std::move(_debugData))
{}
AssemblyItem(AssemblyItemType _type, u256 _data = 0, langutil::DebugData::ConstPtr _debugData = langutil::DebugData::create()):
m_type(_type),
m_debugData(std::move(_debugData))
{
if (m_type == Operation)
m_instruction = Instruction(uint8_t(_data));
else
m_data = std::make_shared<u256>(std::move(_data));
}
explicit AssemblyItem(AssemblyItemType _type, Instruction _instruction, u256 _data = 0, langutil::DebugData::ConstPtr _debugData = langutil::DebugData::create()):
m_type(_type),
m_instruction(_instruction),
m_data(std::make_shared<u256>(std::move(_data))),
m_debugData(std::move(_debugData))
{}
explicit AssemblyItem(bytes _verbatimData, size_t _arguments, size_t _returnVariables):
m_type(VerbatimBytecode),
m_instruction{},
m_verbatimBytecode{{_arguments, _returnVariables, std::move(_verbatimData)}},
m_debugData{langutil::DebugData::create()}
{}
static AssemblyItem eofCreate(ContainerID _containerID, langutil::DebugData::ConstPtr _debugData = langutil::DebugData::create())
{
return AssemblyItem(EOFCreate, Instruction::EOFCREATE, _containerID, std::move(_debugData));
}
static AssemblyItem returnContract(ContainerID _containerID, langutil::DebugData::ConstPtr _debugData = langutil::DebugData::create())
{
return AssemblyItem(ReturnContract, Instruction::RETURNCONTRACT, _containerID, std::move(_debugData));
}
AssemblyItem(AssemblyItem const&) = default;
AssemblyItem(AssemblyItem&&) = default;
AssemblyItem& operator=(AssemblyItem const&) = default;
AssemblyItem& operator=(AssemblyItem&&) = default;
AssemblyItem tag() const { assertThrow(m_type == PushTag || m_type == Tag, util::Exception, ""); return AssemblyItem(Tag, data()); }
AssemblyItem pushTag() const { assertThrow(m_type == PushTag || m_type == Tag, util::Exception, ""); return AssemblyItem(PushTag, data()); }
/// Converts the tag to a subassembly tag. This has to be called in order to move a tag across assemblies.
/// @param _subId the identifier of the subassembly the tag is taken from.
AssemblyItem toSubAssemblyTag(size_t _subId) const;
/// @returns splits the data of the push tag into sub assembly id and actual tag id.
/// The sub assembly id of non-foreign push tags is -1.
std::pair<size_t, size_t> splitForeignPushTag() const;
/// Sets sub-assembly part and tag for a push tag.
void setPushTagSubIdAndTag(size_t _subId, size_t _tag);
AssemblyItemType type() const { return m_type; }
u256 const& data() const { assertThrow(m_type != Operation, util::Exception, ""); return *m_data; }
void setData(u256 const& _data) { assertThrow(m_type != Operation, util::Exception, ""); m_data = std::make_shared<u256>(_data); }
/// This function is used in `Assembly::assemblyJSON`.
/// It returns the name & data of the current assembly item.
/// @param _evmVersion the EVM version.
/// @returns a pair, where the first element is the json-assembly
/// item name, where second element is the string representation
/// of it's data.
std::pair<std::string, std::string> nameAndData(langutil::EVMVersion _evmVersion) const;
bytes const& verbatimData() const { assertThrow(m_type == VerbatimBytecode, util::Exception, ""); return std::get<2>(*m_verbatimBytecode); }
/// @returns true if the item has m_instruction properly set.
bool hasInstruction() const
{
return m_type == Operation || m_type == EOFCreate || m_type == ReturnContract;
}
/// @returns the instruction of this item (only valid if type() == Operation || EOFCreate || ReturnContract)
Instruction instruction() const
{
solAssert(hasInstruction());
return m_instruction;
}
/// @returns true if the type and data of the items are equal.
bool operator==(AssemblyItem const& _other) const
{
if (type() != _other.type())
return false;
if (type() == Operation)
return instruction() == _other.instruction();
else if (type() == VerbatimBytecode)
return *m_verbatimBytecode == *_other.m_verbatimBytecode;
else
return data() == _other.data();
}
bool operator!=(AssemblyItem const& _other) const { return !operator==(_other); }
/// Less-than operator compatible with operator==.
bool operator<(AssemblyItem const& _other) const
{
if (type() != _other.type())
return type() < _other.type();
else if (type() == Operation)
return instruction() < _other.instruction();
else if (type() == VerbatimBytecode)
return *m_verbatimBytecode < *_other.m_verbatimBytecode;
else
return data() < _other.data();
}
/// Shortcut that avoids constructing an AssemblyItem just to perform the comparison.
bool operator==(Instruction _instr) const
{
return type() == Operation && instruction() == _instr;
}
bool operator!=(Instruction _instr) const { return !operator==(_instr); }
static std::string computeSourceMapping(
AssemblyItems const& _items,
std::map<std::string, unsigned> const& _sourceIndicesMap
);
/// @returns an upper bound for the number of bytes required by this item, assuming that
/// the value of a jump tag takes @a _addressLength bytes.
/// @param _evmVersion the EVM version
/// @param _precision Whether to return a precise count (which involves
/// counting immutable references which are only set after
/// a call to `assemble()`) or an approx. count.
size_t bytesRequired(size_t _addressLength, langutil::EVMVersion _evmVersion, Precision _precision = Precision::Precise) const;
size_t arguments() const;
size_t returnValues() const;
size_t deposit() const { return returnValues() - arguments(); }
/// @returns true if the assembly item can be used in a functional context.
bool canBeFunctional() const;
void setLocation(langutil::SourceLocation const& _location)
{
solAssert(m_debugData);
m_debugData = langutil::DebugData::create(
_location,
m_debugData->originLocation,
m_debugData->astID
);
}
langutil::SourceLocation const& location() const
{
solAssert(m_debugData);
return m_debugData->nativeLocation;
}
void setDebugData(langutil::DebugData::ConstPtr _debugData)
{
solAssert(_debugData);
m_debugData = std::move(_debugData);
}
langutil::DebugData::ConstPtr debugData() const { return m_debugData; }
void setJumpType(JumpType _jumpType) { m_jumpType = _jumpType; }
static std::optional<JumpType> parseJumpType(std::string const& _jumpType);
JumpType getJumpType() const { return m_jumpType; }
std::string getJumpTypeAsString() const;
void setPushedValue(u256 const& _value) const { m_pushedValue = std::make_shared<u256>(_value); }
u256 const* pushedValue() const { return m_pushedValue.get(); }
std::string toAssemblyText(Assembly const& _assembly) const;
size_t m_modifierDepth = 0;
void setImmutableOccurrences(size_t _n) const { m_immutableOccurrences = _n; }
private:
size_t opcodeCount() const noexcept;
AssemblyItemType m_type;
Instruction m_instruction; ///< Only valid if m_type == Operation
std::shared_ptr<u256> m_data; ///< Only valid if m_type != Operation
/// If m_type == VerbatimBytecode, this holds number of arguments, number of
/// return variables and verbatim bytecode.
std::optional<std::tuple<size_t, size_t, bytes>> m_verbatimBytecode;
langutil::DebugData::ConstPtr m_debugData;
JumpType m_jumpType = JumpType::Ordinary;
/// Pushed value for operations with data to be determined during assembly stage,
/// e.g. PushSubSize, PushTag, PushSub, etc.
mutable std::shared_ptr<u256> m_pushedValue;
/// Number of PushImmutable's with the same hash. Only used for AssignImmutable.
mutable std::optional<size_t> m_immutableOccurrences;
};
inline size_t bytesRequired(AssemblyItems const& _items, size_t _addressLength, langutil::EVMVersion _evmVersion, Precision _precision = Precision::Precise)
{
size_t size = 0;
for (AssemblyItem const& item: _items)
size += item.bytesRequired(_addressLength, _evmVersion, _precision);
return size;
}
std::ostream& operator<<(std::ostream& _out, AssemblyItem const& _item);
inline std::ostream& operator<<(std::ostream& _out, AssemblyItems const& _items)
{
for (AssemblyItem const& item: _items)
_out << item;
return _out;
}
}
| 10,957
|
C++
|
.h
| 241
| 43.178423
| 163
| 0.745691
|
ethereum/solidity
| 23,062
| 5,715
| 501
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,693
|
SemanticInformation.h
|
ethereum_solidity/libevmasm/SemanticInformation.h
|
/*
This file is part of solidity.
solidity is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
solidity is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with solidity. If not, see <http://www.gnu.org/licenses/>.
*/
// SPDX-License-Identifier: GPL-3.0
/**
* @file SemanticInformation.h
* @author Christian <c@ethdev.com>
* @date 2015
* Helper to provide semantic information about assembly items.
*/
#pragma once
#include <libevmasm/Instruction.h>
#include <optional>
#include <vector>
namespace solidity::evmasm
{
class AssemblyItem;
/**
* Helper functions to provide context-independent information about assembly items.
*/
struct SemanticInformation
{
/// Corresponds to the effect that a YUL-builtin has on a generic data location (storage, memory,
/// transient storage and other blockchain state).
enum Effect
{
None,
Read,
Write
};
enum class Location { Storage, Memory, TransientStorage };
/**
* Represents a read or write operation from or to one of the data locations.
*/
struct Operation
{
Location location;
Effect effect;
/// Start of affected area as an index into the parameters.
/// Unknown if not provided.
std::optional<size_t> startParameter;
/// Length of the affected area as an index into the parameters (if this is an opcode).
/// Unknown if neither this nor lengthConstant is provided.
std::optional<size_t> lengthParameter;
/// Length as a constant.
/// Unknown if neither this nor lengthArgument is provided.
std::optional<size_t> lengthConstant;
};
/// @returns the sequence of read write operations performed by the instruction.
/// Order matters.
/// For external calls, there is just one unknown read and one unknown write operation,
/// event though there might be multiple.
static std::vector<Operation> readWriteOperations(Instruction _instruction);
/// @returns true if the given items starts a new block for common subexpression analysis.
/// @param _msizeImportant if false, consider an operation non-breaking if its only side-effect is that it modifies msize.
static bool breaksCSEAnalysisBlock(AssemblyItem const& _item, bool _msizeImportant);
/// @returns true if the item is a two-argument operation whose value does not depend on the
/// order of its arguments.
static bool isCommutativeOperation(AssemblyItem const& _item);
static bool isDupInstruction(AssemblyItem const& _item);
static bool isSwapInstruction(AssemblyItem const& _item);
static bool isJumpInstruction(AssemblyItem const& _item);
static bool altersControlFlow(AssemblyItem const& _item);
static bool terminatesControlFlow(AssemblyItem const& _item);
static bool terminatesControlFlow(Instruction _instruction);
static bool reverts(Instruction _instruction);
/// @returns false if the value put on the stack by _item depends on anything else than
/// the information in the current block header, memory, storage, transient storage or stack.
static bool isDeterministic(AssemblyItem const& _item);
/// @returns true if the instruction can be moved or copied (together with its arguments)
/// without altering the semantics. This means it cannot depend on storage, transient storage or memory,
/// cannot have any side-effects, but it can depend on a call-constant state of the blockchain.
static bool movable(Instruction _instruction);
/// If true, the expressions in this code can be moved or copied (together with their arguments)
/// across control flow branches and instructions as long as these instructions' 'effects' do
/// not influence the 'effects' of the aforementioned expressions.
static bool movableApartFromEffects(Instruction _instruction);
/// @returns true if the instruction can be removed without changing the semantics.
/// This does not mean that it has to be deterministic or retrieve information from
/// somewhere else than purely the values of its arguments.
static bool canBeRemoved(Instruction _instruction);
/// @returns true if the instruction can be removed without changing the semantics.
/// This does not mean that it has to be deterministic or retrieve information from
/// somewhere else than purely the values of its arguments.
/// If true, the instruction is still allowed to influence the value returned by the
/// msize instruction.
static bool canBeRemovedIfNoMSize(Instruction _instruction);
static Effect memory(Instruction _instruction);
static Effect storage(Instruction _instruction);
static Effect transientStorage(Instruction _instruction);
static Effect otherState(Instruction _instruction);
static bool invalidInPureFunctions(Instruction _instruction);
static bool invalidInViewFunctions(Instruction _instruction);
};
}
| 5,115
|
C++
|
.h
| 105
| 46.580952
| 123
| 0.78743
|
ethereum/solidity
| 23,062
| 5,715
| 501
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,694
|
libsolc.h
|
ethereum_solidity/libsolc/libsolc.h
|
/*
This file is part of solidity.
solidity is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
solidity is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with solidity. If not, see <http://www.gnu.org/licenses/>.
*/
// SPDX-License-Identifier: GPL-3.0
/**
* @author Christian <c@ethdev.com>
* @date 2014
* Public compiler API.
*/
#pragma once
#include <stdbool.h>
#include <stddef.h>
#ifdef __cplusplus
#define SOLC_NOEXCEPT noexcept
#else
#define SOLC_NOEXCEPT
#endif
#ifdef __cplusplus
extern "C" {
#endif
/// Callback used to retrieve additional source files or data.
///
/// @param _context The readContext passed to solidity_compile. Can be NULL.
/// @param _kind The kind of callback (a string).
/// @param _data The data for the callback (a string).
/// @param o_contents A pointer to the contents of the file, if found. Allocated via solidity_alloc().
/// @param o_error A pointer to an error message, if there is one.
///
/// The file (as well as error) contents that is to be allocated by the callback
/// implementer must use the solidity_alloc() API to allocate its underlying
/// storage. Ownership is then transferred to the compiler which will take care
/// of the deallocation.
///
/// If the callback is not supported, *o_contents and *o_error must be set to NULL.
typedef void (*CStyleReadFileCallback)(void* _context, char const* _kind, char const* _data, char** o_contents, char** o_error);
/// Returns the complete license document.
///
/// The pointer returned must NOT be freed by the caller.
char const* solidity_license() SOLC_NOEXCEPT;
/// Returns the compiler version.
///
/// The pointer returned must NOT be freed by the caller.
char const* solidity_version() SOLC_NOEXCEPT;
/// Allocates a chunk of memory of @p _size bytes.
///
/// Use this function inside callbacks to allocate data that is to be passed to
/// the compiler. You may use solidity_free() or solidity_reset() to free this
/// memory again but it is not required as the compiler takes ownership for any
/// data passed to it via callbacks.
///
/// This function will return NULL if the requested memory region could not be allocated.
char* solidity_alloc(size_t _size) SOLC_NOEXCEPT;
/// Explicitly frees the memory (@p _data) that was being allocated with solidity_alloc()
/// or returned by a call to solidity_compile().
///
/// Important, this call will abort() in case of any invalid argument being passed to this call.
void solidity_free(char* _data) SOLC_NOEXCEPT;
/// Takes a "Standard Input JSON" and an optional callback (can be set to null). Returns
/// a "Standard Output JSON". Both are to be UTF-8 encoded.
///
/// @param _input The input JSON to process.
/// @param _readCallback The optional callback pointer. Can be NULL, but if not NULL,
/// it can be called by the compiler to request additional input.
/// Please see the documentation of the type for details.
/// @param _readContext An optional context pointer passed to _readCallback. Can be NULL.
///
/// @returns A pointer to the result. The pointer returned must be freed by the caller using solidity_free() or solidity_reset().
char* solidity_compile(char const* _input, CStyleReadFileCallback _readCallback, void* _readContext) SOLC_NOEXCEPT;
/// Frees up any allocated memory.
///
/// NOTE: the pointer returned by solidity_compile as well as any other pointer retrieved via solidity_alloc()
/// is invalid after calling this!
void solidity_reset() SOLC_NOEXCEPT;
#ifdef __cplusplus
}
#endif
| 3,955
|
C++
|
.h
| 86
| 44.639535
| 129
| 0.747276
|
ethereum/solidity
| 23,062
| 5,715
| 501
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,695
|
adi_tree_test.cpp
|
typesense_typesense/test/adi_tree_test.cpp
|
#include "adi_tree.h"
#include <gtest/gtest.h>
#include "logger.h"
#include <fstream>
class ADITreeTest : public ::testing::Test {
protected:
virtual void SetUp() {
}
virtual void TearDown() {
}
};
TEST_F(ADITreeTest, BasicOps) {
adi_tree_t tree;
// operations on fresh tree
ASSERT_EQ(INT64_MAX, tree.rank(100));
tree.remove(100);
tree.index(100, "f");
ASSERT_EQ(1, tree.rank(100));
tree.index(101, "e");
ASSERT_EQ(2, tree.rank(100));
ASSERT_EQ(1, tree.rank(101));
tree.remove(101);
ASSERT_EQ(1, tree.rank(100));
tree.remove(100);
ASSERT_EQ(INT64_MAX, tree.rank(100));
ASSERT_EQ(INT64_MAX, tree.rank(101));
}
TEST_F(ADITreeTest, OverlappedString) {
adi_tree_t tree;
tree.index(1, "t");
tree.index(2, "to");
ASSERT_EQ(2, tree.rank(2));
ASSERT_EQ(1, tree.rank(1));
tree.remove(1);
tree.remove(2);
ASSERT_EQ(INT64_MAX, tree.rank(2));
ASSERT_EQ(INT64_MAX, tree.rank(1));
}
TEST_F(ADITreeTest, OrderInsertedStrings) {
std::vector<std::pair<uint32_t, std::string>> records = {
{1, "alpha"}, {2, "beta"},
{3, "foo"}, {4, "ant"}, {5, "foobar"},
{6, "buzz"}
};
adi_tree_t tree;
for(auto& record: records) {
tree.index(record.first, record.second);
}
std::sort(records.begin(), records.end(),
[](const std::pair<uint32_t, std::string>& a, const std::pair<uint32_t, std::string>& b) -> bool {
return a.second < b.second;
});
// alpha, ant, beta, buzz, foo, foobar
ASSERT_EQ(1, tree.rank(1));
ASSERT_EQ(3, tree.rank(2));
ASSERT_EQ(5, tree.rank(3));
ASSERT_EQ(2, tree.rank(4));
ASSERT_EQ(6, tree.rank(5));
ASSERT_EQ(4, tree.rank(6));
// remove "foo"
tree.remove(3);
ASSERT_EQ(5, tree.rank(5));
// remove "foobar"
tree.remove(5);
ASSERT_EQ(4, tree.rank(6));
// remove "alpha"
tree.remove(1);
ASSERT_EQ(1, tree.rank(4));
ASSERT_EQ(2, tree.rank(2));
ASSERT_EQ(3, tree.rank(6));
}
TEST_F(ADITreeTest, InsertDuplicateAndDelete) {
adi_tree_t tree;
tree.index(100, "map");
tree.index(101, "map");
tree.remove(100);
tree.remove(101);
ASSERT_EQ(INT64_MAX, tree.rank(100));
ASSERT_EQ(INT64_MAX, tree.rank(101));
ASSERT_EQ(nullptr, tree.get_root());
}
TEST_F(ADITreeTest, InsertDeleteManyElements) {
adi_tree_t tree;
size_t num_elements = UINT16_MAX + 100;
for(size_t i = 0; i < num_elements; i++) {
tree.index(i, "key");
}
for(size_t i = 0; i < num_elements; i++) {
tree.remove(i);
}
}
| 2,646
|
C++
|
.cpp
| 91
| 23.868132
| 112
| 0.595012
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,696
|
collection_optimized_faceting_test.cpp
|
typesense_typesense/test/collection_optimized_faceting_test.cpp
|
#include <gtest/gtest.h>
#include <string>
#include <vector>
#include <fstream>
#include <algorithm>
#include <collection_manager.h>
#include "collection.h"
class CollectionOptimizedFacetingTest : public ::testing::Test {
protected:
Store *store;
CollectionManager & collectionManager = CollectionManager::get_instance();
std::atomic<bool> quit = false;
std::vector<std::string> query_fields;
std::vector<sort_by> sort_fields;
void setupCollection() {
std::string state_dir_path = "/tmp/typesense_test/collection_optimized_faceting";
LOG(INFO) << "Truncating and creating: " << state_dir_path;
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
store = new Store(state_dir_path);
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
}
virtual void SetUp() {
setupCollection();
}
virtual void TearDown() {
collectionManager.dispose();
delete store;
}
};
TEST_F(CollectionOptimizedFacetingTest, FacetCounts) {
Collection *coll_array_fields;
std::ifstream infile(std::string(ROOT_DIR)+"test/numeric_array_documents.jsonl");
std::vector<field> fields = {field("name", field_types::STRING, false),
field("name_facet", field_types::STRING, true),
field("age", field_types::INT32, true),
field("years", field_types::INT32_ARRAY, true),
field("rating", field_types::FLOAT, true),
field("timestamps", field_types::INT64_ARRAY, true),
field("tags", field_types::STRING_ARRAY, true),
field("optional_facet", field_types::INT64_ARRAY, true, true),};
std::vector<sort_by> sort_fields = { sort_by("age", "DESC") };
coll_array_fields = collectionManager.get_collection("coll_array_fields").get();
if(coll_array_fields == nullptr) {
coll_array_fields = collectionManager.create_collection("coll_array_fields", 4, fields, "age").get();
}
std::string json_line;
while (std::getline(infile, json_line)) {
nlohmann::json document = nlohmann::json::parse(json_line);
document["name_facet"] = document["name"];
const std::string & patched_json_line = document.dump();
coll_array_fields->add(patched_json_line);
}
infile.close();
query_fields = {"name"};
std::vector<std::string> facets = {"tags"};
// single facet with no filters
nlohmann::json results = coll_array_fields->search("Jeremy", query_fields, "", facets, sort_fields,
{0}, 10, 1, FREQUENCY, {false}, 1UL,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(),
10UL, "", 30UL, 4UL, "", 1UL, "", "", {},
3UL, "<mark>", "</mark>", {}, 4294967295UL, true,
false, true, "", false, 6000000UL, 4UL, 7UL, fallback,
4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false, "", true,
0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values").get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(4, results["facet_counts"][0].size());
ASSERT_EQ("tags", results["facet_counts"][0]["field_name"]);
ASSERT_EQ(false, results["facet_counts"][0]["sampled"].get<bool>());
ASSERT_EQ(4, results["facet_counts"][0]["counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["stats"].size());
ASSERT_EQ(4, results["facet_counts"][0]["stats"]["total_values"].get<size_t>());
ASSERT_STREQ("gold", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_EQ(3, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_STREQ("silver", results["facet_counts"][0]["counts"][1]["value"].get<std::string>().c_str());
ASSERT_EQ(3, (int) results["facet_counts"][0]["counts"][1]["count"]);
ASSERT_STREQ("bronze", results["facet_counts"][0]["counts"][2]["value"].get<std::string>().c_str());
ASSERT_EQ(2, (int) results["facet_counts"][0]["counts"][2]["count"]);
ASSERT_STREQ("FINE PLATINUM", results["facet_counts"][0]["counts"][3]["value"].get<std::string>().c_str());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][3]["count"]);
// facet with facet count limit
results = coll_array_fields->search("Jeremy", query_fields, "", facets, sort_fields, {0}, 10, 1,
FREQUENCY, {false}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 2, "", 30UL, 4UL, "", 1UL,
"", "", {}, 3UL, "<mark>", "</mark>", {}, 4294967295UL, true,
false, true, "", false, 6000000UL, 4UL, 7UL, fallback, 4UL, {off},
32767UL, 32767UL, 2UL, 2UL, false, "", true, 0UL, max_score, 100UL,
0UL, 4294967295UL, "top_values").get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_STREQ("tags", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_STREQ("gold", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_EQ(3, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_STREQ("silver", results["facet_counts"][0]["counts"][1]["value"].get<std::string>().c_str());
ASSERT_EQ(3, (int) results["facet_counts"][0]["counts"][1]["count"]);
// 2 facets, 1 text query with no filters
facets.clear();
facets.push_back("tags");
facets.push_back("name_facet");
results = coll_array_fields->search("Jeremy", query_fields, "", facets, sort_fields,
{0}, 10, 1, FREQUENCY, {false}, 1UL,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(),
10UL, "", 30UL, 4UL, "", 1UL, "", "", {},
3UL, "<mark>", "</mark>", {}, 4294967295UL, true,
false, true, "", false, 6000000UL, 4UL, 7UL, fallback,
4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false, "", true,
0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values").get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(2, results["facet_counts"].size());
ASSERT_STREQ("tags", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_STREQ("name_facet", results["facet_counts"][1]["field_name"].get<std::string>().c_str());
// facet value must one that's stored, not indexed (i.e. no tokenization/standardization)
ASSERT_STREQ("Jeremy Howard", results["facet_counts"][1]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_EQ(5, (int) results["facet_counts"][1]["counts"][0]["count"]);
// facet with wildcard
results = coll_array_fields->search("Jeremy", query_fields, "", {"ag*"}, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, 1UL, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(),
10UL, "", 30UL, 4UL, "", 1UL, "", "", {},
3UL, "<mark>", "</mark>", {}, 4294967295UL, true,
false, true, "", false, 6000000UL, 4UL, 7UL, fallback,
4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false, "", true,
0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values").get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_STREQ("age", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
// facet on a float field without query to check on stats
results = coll_array_fields->search("*", query_fields, "", {"rating"}, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30UL, 4UL,
"", 1UL, "", "", {}, 3UL, "<mark>", "</mark>", {},
4294967295UL, true, false, true, "", false, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values").get();
ASSERT_EQ(5, results["facet_counts"][0]["stats"].size());
ASSERT_FLOAT_EQ(4.880199885368347, results["facet_counts"][0]["stats"]["avg"].get<double>());
ASSERT_FLOAT_EQ(0.0, results["facet_counts"][0]["stats"]["min"].get<double>());
ASSERT_FLOAT_EQ(9.99899959564209, results["facet_counts"][0]["stats"]["max"].get<double>());
ASSERT_FLOAT_EQ(24.400999426841736, results["facet_counts"][0]["stats"]["sum"].get<double>());
ASSERT_FLOAT_EQ(5, results["facet_counts"][0]["stats"]["total_values"].get<size_t>());
// check for "0" case
ASSERT_STREQ("0", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_EQ(1, results["facet_counts"][0]["counts"][0]["count"].get<size_t>());
facets.clear();
facets.push_back("tags");
results = coll_array_fields->search("*", query_fields, "age: >24", facets, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30UL, 4UL,
"", 1UL, "", "", {}, 3UL, "<mark>", "</mark>", {},
4294967295UL, true, false, true, "", false, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values").get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_STREQ("tags", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(2, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][1]["count"]);
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][2]["count"]);
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][3]["count"]);
ASSERT_STREQ("silver", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_STREQ("gold", results["facet_counts"][0]["counts"][1]["value"].get<std::string>().c_str());
ASSERT_STREQ("bronze", results["facet_counts"][0]["counts"][2]["value"].get<std::string>().c_str());
ASSERT_STREQ("FINE PLATINUM", results["facet_counts"][0]["counts"][3]["value"].get<std::string>().c_str());
// facet with facet filter query (allows typo correction!)
results = coll_array_fields->search("*", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, " tags : sliver", 30UL, 4UL,
"", 1UL, "", "", {}, 3UL, "<mark>", "</mark>", {},
4294967295UL, true, false, true, "", false, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values").get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_STREQ("tags", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(3, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_STREQ("silver", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
// facet with facet filter query matching 2 tokens
results = coll_array_fields->search("*", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "tags: fxne platim", 30UL, 4UL,
"", 1UL, "", "", {}, 3UL, "<mark>", "</mark>", {},
4294967295UL, true, false, true, "", false, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values").get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_STREQ("tags", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_STREQ("FINE PLATINUM", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_STREQ("<mark>FINE</mark> <mark>PLATIN</mark>UM", results["facet_counts"][0]["counts"][0]["highlighted"].get<std::string>().c_str());
// facet with facet filter query matching first token of an array
results = coll_array_fields->search("*", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "tags: fine", 30UL, 4UL,
"", 1UL, "", "", {}, 3UL, "<mark>", "</mark>", {},
4294967295UL, true, false, true, "", false, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values").get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_STREQ("tags", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_STREQ("FINE PLATINUM", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
// facet with facet filter query matching second token of an array
results = coll_array_fields->search("*", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "tags: pltinum", 30UL, 4UL,
"", 1UL, "", "", {}, 3UL, "<mark>", "</mark>", {},
4294967295UL, true, false, true, "", false, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values").get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_STREQ("tags", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_STREQ("FINE PLATINUM", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
// facet query on an integer field
results = coll_array_fields->search("*", query_fields, "", {"age"}, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "age: 2",30UL, 4UL,
"", 1UL, "", "", {}, 3UL, "<mark>", "</mark>", {},
4294967295UL, true, false, true, "", false, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values").get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_STREQ("age", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_STREQ("24", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_STREQ("<mark>2</mark>4", results["facet_counts"][0]["counts"][0]["highlighted"].get<std::string>().c_str());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][1]["count"]);
ASSERT_STREQ("21", results["facet_counts"][0]["counts"][1]["value"].get<std::string>().c_str());
ASSERT_STREQ("<mark>2</mark>1", results["facet_counts"][0]["counts"][1]["highlighted"].get<std::string>().c_str());
// facet query on a float field
results = coll_array_fields->search("*", query_fields, "", {"rating"}, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "rating: 7",30UL, 4UL,
"", 1UL, "", "", {}, 3UL, "<mark>", "</mark>", {},
4294967295UL, true, false, true, "", false, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values").get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_STREQ("rating", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_STREQ("7.812", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_STREQ("<mark>7</mark>.812", results["facet_counts"][0]["counts"][0]["highlighted"].get<std::string>().c_str());
ASSERT_EQ(5, results["facet_counts"][0]["stats"].size());
ASSERT_FLOAT_EQ(7.812, results["facet_counts"][0]["stats"]["avg"].get<double>());
ASSERT_FLOAT_EQ(0, results["facet_counts"][0]["stats"]["min"].get<double>());
ASSERT_FLOAT_EQ(9.9989996, results["facet_counts"][0]["stats"]["max"].get<double>());
ASSERT_FLOAT_EQ(7.812, results["facet_counts"][0]["stats"]["sum"].get<double>());
ASSERT_FLOAT_EQ(1, results["facet_counts"][0]["stats"]["total_values"].get<size_t>());
// facet with wildcard
results = coll_array_fields->search("Jeremy", query_fields, "", {"ag*"}, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30UL, 4UL,
"", 1UL, "", "", {}, 3UL, "<mark>", "</mark>", {},
4294967295UL, true, false, true, "", false, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values").get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_STREQ("age", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
// empty facet query value should return all facets without any filtering of facets
results = coll_array_fields->search("*", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "tags: ", 30UL, 4UL,
"", 1UL, "", "", {}, 3UL, "<mark>", "</mark>", {},
4294967295UL, true, false, true, "", false, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values").get();
ASSERT_EQ(5, results["hits"].size());
results = coll_array_fields->search("*", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "tags:", 30UL, 4UL,
"", 1UL, "", "", {}, 3UL, "<mark>", "</mark>", {},
4294967295UL, true, false, true, "", false, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values").get();
ASSERT_EQ(5, results["hits"].size());
// Wildcard facet_by can have partial matches
results = coll_array_fields->search("*", query_fields, "", {"nam*"}, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30UL, 4UL,
"", 1UL, "", "", {}, 3UL, "<mark>", "</mark>", {},
4294967295UL, true, false, true, "", false, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values").get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ("name_facet", results["facet_counts"][0]["field_name"].get<std::string>());
// Wildcard facet_by having no counts should not be returned
results = coll_array_fields->search("*", query_fields, "", {"optio*"}, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30UL, 4UL,
"", 1UL, "", "", {}, 3UL, "<mark>", "</mark>", {},
4294967295UL, true, false, true, "", false, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values").get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(0, results["facet_counts"].size());
results = coll_array_fields->search("*", query_fields, "", {"optional_facet"}, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30UL, 4UL,
"", 1UL, "", "", {}, 3UL, "<mark>", "</mark>", {},
4294967295UL, true, false, true, "", false, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values").get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ("optional_facet", results["facet_counts"][0]["field_name"].get<std::string>());
// bad facet query syntax
auto res_op = coll_array_fields->search("*", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "foobar", 30UL, 4UL,
"", 1UL, "", "", {}, 3UL, "<mark>", "</mark>", {},
4294967295UL, true, false, true, "", false, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values");
ASSERT_FALSE(res_op.ok());
ASSERT_STREQ("Facet query must be in the `facet_field: value` format.", res_op.error().c_str());
// unknown facet field
res_op = coll_array_fields->search("*", query_fields, "", {"foobar"}, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "foobar: baz", 30UL, 4UL,
"", 1UL, "", "", {}, 3UL, "<mark>", "</mark>", {},
4294967295UL, true, false, true, "", false, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values");
ASSERT_FALSE(res_op.ok());
ASSERT_STREQ("Could not find a facet field named `foobar` in the schema.", res_op.error().c_str());
// only prefix matching is valid
res_op = coll_array_fields->search("*", query_fields, "", {"*_facet"}, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30UL, 4UL,
"", 1UL, "", "", {}, 3UL, "<mark>", "</mark>", {},
4294967295UL, true, false, true, "", false, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values");
ASSERT_FALSE(res_op.ok());
ASSERT_STREQ("Only prefix matching with a wildcard is allowed.", res_op.error().c_str());
// unknown wildcard facet field
res_op = coll_array_fields->search("*", query_fields, "", {"foo*"}, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30UL, 4UL,
"", 1UL, "", "", {}, 3UL, "<mark>", "</mark>", {},
4294967295UL, true, false, true, "", false, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values");
ASSERT_FALSE(res_op.ok());
ASSERT_STREQ("Could not find a facet field for `foo*` in the schema.", res_op.error().c_str());
// when facet query is given but no facet fields are specified, must return an error message
res_op = coll_array_fields->search("*", query_fields, "", {}, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "tags: foo", 30UL, 4UL,
"", 1UL, "", "", {}, 3UL, "<mark>", "</mark>", {},
4294967295UL, true, false, true, "", false, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values");
ASSERT_FALSE(res_op.ok());
ASSERT_STREQ("The `facet_query` parameter is supplied without a `facet_by` parameter.", res_op.error().c_str());
res_op = coll_array_fields->search("*", query_fields, "", {""}, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "tags: foo", 30UL, 4UL,
"", 1UL, "", "", {}, 3UL, "<mark>", "</mark>", {},
4294967295UL, true, false, true, "", false, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values");
ASSERT_FALSE(res_op.ok());
ASSERT_STREQ("Could not find a facet field named `` in the schema.", res_op.error().c_str());
// given facet query field must be part of facet fields requested
res_op = coll_array_fields->search("*", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "name_facet: jeremy", 30UL, 4UL,
"", 1UL, "", "", {}, 3UL, "<mark>", "</mark>", {},
4294967295UL, true, false, true, "", false, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values");
ASSERT_FALSE(res_op.ok());
ASSERT_STREQ("Facet query refers to a facet field `name_facet` that is not part of `facet_by` parameter.", res_op.error().c_str());
//facet query on int64 field with stats
results = coll_array_fields->search("*", query_fields, "", {"timestamps"}, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "timestamps: 142189002").get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_STREQ("timestamps", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(2, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_STREQ("1421890022", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_STREQ("<mark>142189002</mark>2", results["facet_counts"][0]["counts"][0]["highlighted"].get<std::string>().c_str());
ASSERT_EQ(5, results["facet_counts"][0]["stats"].size());
ASSERT_FLOAT_EQ(1106321222, results["facet_counts"][0]["stats"]["avg"].get<double>());
ASSERT_FLOAT_EQ(348974822, results["facet_counts"][0]["stats"]["min"].get<double>());
ASSERT_FLOAT_EQ(1453426022, results["facet_counts"][0]["stats"]["max"].get<double>());
ASSERT_FLOAT_EQ(13275854664, results["facet_counts"][0]["stats"]["sum"].get<double>());
ASSERT_FLOAT_EQ(1, results["facet_counts"][0]["stats"]["total_values"].get<size_t>());
collectionManager.drop_collection("coll_array_fields");
}
TEST_F(CollectionOptimizedFacetingTest, FacetCountsStringArraySimple) {
Collection *coll1;
std::vector<field> fields = {field("tags", field_types::STRING_ARRAY, true),
field("points", field_types::INT32, false),
field("in_stock", field_types::BOOL, false)};
std::vector<sort_by> sort_fields = {sort_by("points", "DESC")};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "points").get();
}
nlohmann::json doc;
doc["id"] = "100";
doc["tags"] = {"gold", "silver"};
doc["points"] = 25;
doc["in_stock"] = true;
coll1->add(doc.dump());
std::vector<std::string> facets = {"tags"};
nlohmann::json results = coll1->search("*", {"tags"}, "", facets, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10,"", 30UL, 4UL,
"", 1UL, "", "", {}, 3UL, "<mark>", "</mark>", {},
4294967295UL, true, false, true, "", false, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values").get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_STREQ("tags", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_STREQ("gold", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][1]["count"]);
ASSERT_STREQ("silver", results["facet_counts"][0]["counts"][1]["value"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOptimizedFacetingTest, FacetCountsBool) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),
field("in_stock", field_types::BOOL, true)};
std::vector<sort_by> sort_fields = {sort_by("points", "DESC")};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "points").get();
}
nlohmann::json doc;
doc["id"] = "100";
doc["title"] = "Ford Mustang";
doc["points"] = 25;
doc["in_stock"] = true;
coll1->add(doc.dump());
doc["id"] = "101";
doc["title"] = "Tesla Model S";
doc["points"] = 40;
doc["in_stock"] = false;
coll1->add(doc.dump());
doc["id"] = "102";
doc["title"] = "Ford Mustang GT";
doc["points"] = 10;
doc["in_stock"] = true;
coll1->add(doc.dump());
std::vector<std::string> facets = {"in_stock"};
nlohmann::json results = coll1->search("Ford", {"title"}, "", facets, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10,"", 30UL, 4UL,
"", 1UL, "", "", {}, 3UL, "<mark>", "</mark>", {},
4294967295UL, true, false, true, "", false, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values").get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["stats"].size());
ASSERT_FLOAT_EQ(1, results["facet_counts"][0]["stats"]["total_values"].get<size_t>());
ASSERT_STREQ("in_stock", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(2, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_STREQ("true", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOptimizedFacetingTest, FacetCountsFloatPrecision) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::FLOAT, true)};
std::vector<sort_by> sort_fields = {sort_by("points", "DESC")};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "points").get();
}
nlohmann::json doc;
doc["id"] = "100";
doc["title"] = "Ford Mustang";
doc["points"] = 113.4;
coll1->add(doc.dump());
std::vector<std::string> facets = {"points"};
nlohmann::json results = coll1->search("*", {"title"}, "", facets, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10,"", 30UL, 4UL,
"", 1UL, "", "", {}, 3UL, "<mark>", "</mark>", {},
4294967295UL, true, false, true, "", false, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values").get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_STREQ("points", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_STREQ("113.4", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_STREQ("113.4",results["facet_counts"][0]["counts"][0]["highlighted"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOptimizedFacetingTest, FacetFloatStats) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::FLOAT, true)};
std::vector<sort_by> sort_fields = {sort_by("points", "DESC")};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "points").get();
}
nlohmann::json doc;
doc["id"] = "100";
doc["title"] = "Ford Mustang";
doc["points"] = 50.4;
coll1->add(doc.dump());
doc["id"] = "200";
doc["title"] = "Ford Mustang";
doc["points"] = 50.4;
coll1->add(doc.dump());
std::vector<std::string> facets = {"points"};
nlohmann::json results = coll1->search("*", {"title"}, "", facets, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10,"", 30UL, 4UL,
"", 1UL, "", "", {}, 3UL, "<mark>", "</mark>", {},
4294967295UL, true, false, true, "", false, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values").get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_STREQ("points", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(2, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_EQ(5, results["facet_counts"][0]["stats"].size());
ASSERT_FLOAT_EQ(50.40, results["facet_counts"][0]["stats"]["min"].get<double>());
ASSERT_FLOAT_EQ(50.40, results["facet_counts"][0]["stats"]["max"].get<double>());
ASSERT_FLOAT_EQ(100.80, results["facet_counts"][0]["stats"]["sum"].get<double>());
ASSERT_FLOAT_EQ(50.40, results["facet_counts"][0]["stats"]["avg"].get<double>());
ASSERT_FLOAT_EQ(1, results["facet_counts"][0]["stats"]["total_values"].get<size_t>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOptimizedFacetingTest, FacetDeleteRepeatingValuesInArray) {
Collection *coll1;
std::vector<field> fields = {field("tags", field_types::STRING_ARRAY, true)};
std::vector<sort_by> sort_fields = {};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields).get();
}
nlohmann::json doc;
doc["id"] = "0";
doc["tags"] = {"alpha", "beta", "alpha"};
coll1->add(doc.dump());
auto findex = coll1->_get_index()->_get_facet_index();
ASSERT_EQ(1, findex->facet_val_num_ids("tags", "alpha"));
ASSERT_EQ(1, findex->facet_node_count("tags", "alpha"));
doc["id"] = "1";
doc["tags"] = {"alpha"};
coll1->add(doc.dump());
coll1->remove("0");
ASSERT_EQ(1, findex->facet_val_num_ids("tags", "alpha"));
ASSERT_EQ(1, findex->facet_node_count("tags", "alpha"));
ASSERT_EQ(0, findex->facet_val_num_ids("tags", "beta"));
ASSERT_EQ(0, findex->facet_node_count("tags", "beta"));
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOptimizedFacetingTest, FacetStatOnFloatFields) {
Collection *coll_float_fields;
std::ifstream infile(std::string(ROOT_DIR)+"test/float_documents.jsonl");
std::vector<field> fields = {
field("title", field_types::STRING, false),
field("score", field_types::FLOAT, false),
field("average", field_types::FLOAT, true)
};
std::vector<sort_by> sort_fields_desc = { sort_by("average", "DESC") };
coll_float_fields = collectionManager.get_collection("coll_float_fields").get();
if(coll_float_fields == nullptr) {
coll_float_fields = collectionManager.create_collection("coll_float_fields", 4, fields, "average").get();
}
std::string json_line;
while (std::getline(infile, json_line)) {
coll_float_fields->add(json_line);
}
infile.close();
query_fields = {"title"};
auto res_op = coll_float_fields->search("Jeremy", query_fields, "", {"average"}, sort_fields_desc, {0}, 10,
1, FREQUENCY, {false}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30UL, 4UL,
"", 1UL, "", "", {}, 3UL, "<mark>", "</mark>", {},
4294967295UL, true, false, true, "", false, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values");
auto results = res_op.get();
ASSERT_EQ(7, results["hits"].size());
ASSERT_EQ(5, results["facet_counts"][0]["stats"].size());
ASSERT_FLOAT_EQ(-21.3799991607666, results["facet_counts"][0]["stats"]["min"].get<double>());
ASSERT_FLOAT_EQ(300, results["facet_counts"][0]["stats"]["max"].get<double>());
ASSERT_FLOAT_EQ(277.8160007725237, results["facet_counts"][0]["stats"]["sum"].get<double>());
ASSERT_FLOAT_EQ(39.68800011036053, results["facet_counts"][0]["stats"]["avg"].get<double>());
ASSERT_FLOAT_EQ(7, results["facet_counts"][0]["stats"]["total_values"].get<size_t>());
}
TEST_F(CollectionOptimizedFacetingTest, FacetCountOnSimilarStrings) {
Collection *coll1;
std::vector<field> fields = {field("categories", field_types::STRING_ARRAY, true),
field("points", field_types::INT32, true)};
std::vector<sort_by> sort_fields = {sort_by("points", "DESC")};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "points").get();
}
nlohmann::json doc;
doc["id"] = "100";
doc["categories"] = {"England in India"};
doc["points"] = 25;
coll1->add(doc.dump());
doc["id"] = "101";
doc["categories"] = {"India in England"};
doc["points"] = 50;
coll1->add(doc.dump());
std::vector<std::string> facets = {"categories"};
nlohmann::json results = coll1->search("*", {"categories"}, "points:[25, 50]", facets, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30UL, 4UL,
"", 1UL, "", "", {}, 3UL, "<mark>", "</mark>", {},
4294967295UL, true, false, true, "", false, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values").get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_STREQ("India in England", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_STREQ("England in India", results["facet_counts"][0]["counts"][1]["value"].get<std::string>().c_str());
// facet query
results = coll1->search("*", {"categories"}, "points:[25, 50]", facets, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "categories:india eng", 30UL, 4UL,
"", 1UL, "", "", {}, 3UL, "<mark>", "</mark>", {},
4294967295UL, true, false, true, "", false, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values").get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_STREQ("India in England", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_STREQ("<mark>India</mark> in <mark>Eng</mark>land", results["facet_counts"][0]["counts"][0]["highlighted"].get<std::string>().c_str());
ASSERT_STREQ("England in India", results["facet_counts"][0]["counts"][1]["value"].get<std::string>().c_str());
ASSERT_STREQ("<mark>Eng</mark>land in <mark>India</mark>", results["facet_counts"][0]["counts"][1]["highlighted"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOptimizedFacetingTest, ConcurrentValueFacetingOnMulFields) {
Collection *coll1;
std::vector<field> fields = {field("c1", field_types::STRING, true),
field("c2", field_types::STRING, true),
field("c3", field_types::STRING, true),
field("c4", field_types::STRING, true),
field("points", field_types::INT32, true)};
std::vector<sort_by> sort_fields = {sort_by("points", "DESC")};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "points").get();
}
for(size_t i = 0; i < 1000; i++) {
nlohmann::json doc;
doc["c1"] = "c1_" + std::to_string(i % 40);
doc["c2"] = "c2_" + std::to_string(i % 40);
doc["c3"] = "c3_" + std::to_string(i % 40);
doc["c4"] = "c4_" + std::to_string(i % 40);
doc["points"] = 25;
coll1->add(doc.dump());
}
std::vector<std::string> facets = {"c1", "c2", "c3", "c4"};
nlohmann::json results = coll1->search("*", {}, "points:[25, 50]", facets, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30UL, 4UL,
"", 1UL, "", "", {}, 3UL, "<mark>", "</mark>", {},
4294967295UL, true, false, true, "", false, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values").get();
ASSERT_EQ(4, results["facet_counts"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOptimizedFacetingTest, FacetByNestedIntField) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "details", "type": "object", "optional": false },
{"name": "company.num_employees", "type": "int32", "optional": false, "facet": true },
{"name": "companyRank", "type": "int32", "optional": false, "facet": true }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"details": {"count": 1000},
"company": {"num_employees": 2000},
"companyRank": 100
})"_json;
auto doc2 = R"({
"details": {"count": 2000},
"company": {"num_employees": 2000},
"companyRank": 101
})"_json;
ASSERT_TRUE(coll1->add(doc1.dump(), CREATE).ok());
ASSERT_TRUE(coll1->add(doc2.dump(), CREATE).ok());
std::vector<sort_by> sort_fields = { sort_by("details.count", "ASC") };
auto results = coll1->search("*", {}, "", {"company.num_employees"}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 1UL, "", "", {}, 3UL,
"<mark>", "</mark>", {}, 4294967295UL, true, false, true, "", false, 6000000UL,
4UL, 7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values").get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ("company.num_employees", results["facet_counts"][0]["field_name"]);
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ(2, results["facet_counts"][0]["counts"][0]["count"].get<size_t>());
ASSERT_EQ("2000", results["facet_counts"][0]["counts"][0]["value"].get<std::string>());
// Nested wildcard faceting
std::vector<facet> wildcard_facets;
coll1->parse_facet("company.*", wildcard_facets);
ASSERT_EQ(1, wildcard_facets.size());
ASSERT_EQ("company.num_employees", wildcard_facets[0].field_name);
wildcard_facets.clear();
coll1->parse_facet("company*", wildcard_facets);
ASSERT_EQ(2, wildcard_facets.size());
ASSERT_EQ("company.num_employees", wildcard_facets[0].field_name);
ASSERT_EQ("companyRank", wildcard_facets[1].field_name);
}
TEST_F(CollectionOptimizedFacetingTest, FacetParseTest){
std::vector<field> fields = {
field("score", field_types::INT32, true),
field("grade", field_types::INT32, true),
field("rank", field_types::INT32, true),
field("range", field_types::INT32, true),
field("sortindex", field_types::INT32, true),
field("scale", field_types::INT32, false),
};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
std::vector<std::string> range_facet_fields {
"score(fail:[0, 40], pass:[40, 100])",
"grade(A:[80, 100], B:[60, 80], C:[40, 60])"
};
std::vector<facet> range_facets;
for(const std::string & facet_field: range_facet_fields) {
coll1->parse_facet(facet_field, range_facets);
}
ASSERT_EQ(2, range_facets.size());
ASSERT_STREQ("score", range_facets[0].field_name.c_str());
ASSERT_TRUE(range_facets[0].is_range_query);
ASSERT_GT(range_facets[0].facet_range_map.size(), 0);
ASSERT_STREQ("grade", range_facets[1].field_name.c_str());
ASSERT_TRUE(range_facets[1].is_range_query);
ASSERT_GT(range_facets[1].facet_range_map.size(), 0);
std::vector<std::string> normal_facet_fields {
"score",
"grade"
};
std::vector<facet> normal_facets;
for(const std::string & facet_field: normal_facet_fields) {
coll1->parse_facet(facet_field, normal_facets);
}
ASSERT_EQ(2, normal_facets.size());
ASSERT_STREQ("score", normal_facets[0].field_name.c_str());
ASSERT_STREQ("grade", normal_facets[1].field_name.c_str());
std::vector<std::string> wildcard_facet_fields {
"ran*",
"sc*",
};
std::vector<facet> wildcard_facets;
for(const std::string & facet_field: wildcard_facet_fields) {
coll1->parse_facet(facet_field, wildcard_facets);
}
ASSERT_EQ(3, wildcard_facets.size());
std::set<std::string> expected{"range", "rank", "score"};
for (size_t i = 0; i < wildcard_facets.size(); i++) {
ASSERT_TRUE(expected.count(wildcard_facets[i].field_name) == 1);
}
wildcard_facets.clear();
coll1->parse_facet("*", wildcard_facets);
// Last field is not a facet.
ASSERT_EQ(fields.size() - 1, wildcard_facets.size());
expected.clear();
for (size_t i = 0; i < fields.size() - 1; i++) {
expected.insert(fields[i].name);
}
for (size_t i = 0; i < wildcard_facets.size(); i++) {
ASSERT_TRUE(expected.count(wildcard_facets[i].field_name) == 1);
}
std::vector<std::string> mixed_facet_fields {
"score",
"grade(A:[80, 100], B:[60, 80], C:[40, 60])",
"ra*",
};
std::vector<facet> mixed_facets;
for(const std::string & facet_field: mixed_facet_fields) {
coll1->parse_facet(facet_field, mixed_facets);
}
ASSERT_EQ(4, mixed_facets.size());
std::vector<facet*> mixed_facets_ptr;
for(auto& f: mixed_facets) {
mixed_facets_ptr.push_back(&f);
}
std::sort(mixed_facets_ptr.begin(), mixed_facets_ptr.end(), [](const facet* f1, const facet* f2) {
return f1->field_name < f2->field_name;
});
ASSERT_EQ("score", mixed_facets_ptr[3]->field_name);
ASSERT_EQ("grade", mixed_facets_ptr[0]->field_name);
ASSERT_TRUE(mixed_facets_ptr[0]->is_range_query);
ASSERT_GT(mixed_facets_ptr[0]->facet_range_map.size(), 0);
ASSERT_EQ("rank", mixed_facets_ptr[2]->field_name);
ASSERT_EQ("range", mixed_facets_ptr[1]->field_name);
//facetfield containing sort keyword should parse successfully
std::vector<facet> range_facets_with_sort_as_field;
auto facet_range = "sortindex(Top:[85, 100], Average:[60, 85])";
coll1->parse_facet(facet_range, range_facets_with_sort_as_field);
ASSERT_EQ(1, range_facets_with_sort_as_field.size());
//range facet label with special chars
std::vector<std::string> range_facet_special_chars{
"score(%0 - %19:[0, 20], %20 - %59:[20, 60], %60+:[60, ])",
"range($$$:[0, 20])"
};
std::vector<facet> facet_speical_chars;
for(const std::string& facet_field: range_facet_special_chars) {
auto res = coll1->parse_facet(facet_field, facet_speical_chars);
if(!res.error().empty()) {
LOG(ERROR) << res.error();
FAIL();
}
}
//should not allow to pass only space chars
facet_speical_chars.clear();
auto only_space_char("range( :[0, 20])");
auto res = coll1->parse_facet(only_space_char, facet_speical_chars);
ASSERT_FALSE(res.error().empty());
ASSERT_EQ(400, res.code());
ASSERT_EQ("Facet range value is not valid.", res.error());
}
TEST_F(CollectionOptimizedFacetingTest, RangeFacetTest) {
std::vector<field> fields = {field("place", field_types::STRING, false),
field("state", field_types::STRING, true),
field("visitors", field_types::INT32, true),
field("trackingFrom", field_types::INT32, true),};
Collection* coll1 = collectionManager.create_collection(
"coll1", 1, fields, "", 0, "", {}, {}
).get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["place"] = "Mysore Palace";
doc1["state"] = "Karnataka";
doc1["visitors"] = 235486;
doc1["trackingFrom"] = 1900;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["place"] = "Hampi";
doc2["state"] = "Karnataka";
doc2["visitors"] = 187654;
doc2["trackingFrom"] = 1900;
nlohmann::json doc3;
doc3["id"] = "2";
doc3["place"] = "Mahabalipuram";
doc3["state"] = "TamilNadu";
doc3["visitors"] = 174684;
doc3["trackingFrom"] = 1900;
nlohmann::json doc4;
doc4["id"] = "3";
doc4["place"] = "Meenakshi Amman Temple";
doc4["state"] = "TamilNadu";
doc4["visitors"] = 246676;
doc4["trackingFrom"] = 2000;
nlohmann::json doc5;
doc5["id"] = "4";
doc5["place"] = "Staue of Unity";
doc5["state"] = "Gujarat";
doc5["visitors"] = 345878;
doc5["trackingFrom"] = 2000;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
ASSERT_TRUE(coll1->add(doc4.dump()).ok());
ASSERT_TRUE(coll1->add(doc5.dump()).ok());
auto results = coll1->search("Karnataka", {"state"},
"", {"visitors(Busy:[0, 200000], VeryBusy:[200000, 500000])"},
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true, 6000000UL,
4UL, 7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values").get();
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_EQ("Busy", results["facet_counts"][0]["counts"][0]["value"].get<std::string>());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][1]["count"]);
ASSERT_EQ("VeryBusy", results["facet_counts"][0]["counts"][1]["value"].get<std::string>());
auto results2 = coll1->search("Gujarat", {"state"},
"", {"visitors(Busy:[0, 200000], VeryBusy:[200000, 500000])"},
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true, 6000000UL,
4UL, 7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values").get();
ASSERT_EQ(1, results2["facet_counts"][0]["counts"].size());
ASSERT_EQ(1, results2["facet_counts"][0]["counts"][0]["count"].get<std::size_t>());
ASSERT_STREQ("VeryBusy", results2["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_TRUE(results2["facet_counts"][0]["counts"][1]["value"] == nullptr);
// ensure that unknown facet field are handled
auto results3 = coll1->search("Gujarat", {"state"},
"", {"visitorsz(Busy:[0, 200000], VeryBusy:[200000, 500000])"},
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", true, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false, "", true, 0UL,
max_score, 100UL, 0UL, 4294967295UL, "top_values");
ASSERT_FALSE(results3.ok());
ASSERT_EQ("Could not find a facet field named `visitorsz` in the schema.", results3.error());
auto results4 = coll1->search("*", {"state"},
"", {"trackingFrom(Old:[0, 1910], New:[1910, 2100])"},
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", true, 6000000UL,
4UL, 7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values").get();
ASSERT_EQ(2, results4["facet_counts"][0]["counts"].size());
ASSERT_EQ(3, results4["facet_counts"][0]["counts"][0]["count"].get<std::size_t>());
ASSERT_EQ("Old", results4["facet_counts"][0]["counts"][0]["value"].get<std::string>());
ASSERT_EQ(2, results4["facet_counts"][0]["counts"][1]["count"].get<std::size_t>());
ASSERT_EQ("New", results4["facet_counts"][0]["counts"][1]["value"].get<std::string>());
// ensure that only integer fields are allowed
auto rop = coll1->search("Karnataka", {"state"},
"", {"state(Busy:[0, 200000], VeryBusy:[200000, 500000])"},
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true, 6000000UL,
4UL, 7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values");
ASSERT_FALSE(rop.ok());
ASSERT_EQ("Range facet is restricted to only integer and float fields.", rop.error());
// ensure that bad facet range values are handled
rop = coll1->search("Karnataka", {"state"},
"", {"visitors(Busy:[alpha, 200000], VeryBusy:[200000, beta])"},
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true, 6000000UL,
4UL, 7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values");
ASSERT_FALSE(rop.ok());
ASSERT_EQ("Facet range value is not valid.", rop.error());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOptimizedFacetingTest, RangeFacetContinuity) {
std::vector<field> fields = {field("place", field_types::STRING, false),
field("state", field_types::STRING, false),
field("visitors", field_types::INT32, true),};
Collection* coll1 = collectionManager.create_collection(
"coll1", 1, fields, "", 0, "", {}, {}
).get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["place"] = "Mysore Palace";
doc1["state"] = "Karnataka";
doc1["visitors"] = 235486;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["place"] = "Hampi";
doc2["state"] = "Karnataka";
doc2["visitors"] = 187654;
nlohmann::json doc3;
doc3["id"] = "2";
doc3["place"] = "Mahabalipuram";
doc3["state"] = "TamilNadu";
doc3["visitors"] = 174684;
nlohmann::json doc4;
doc4["id"] = "3";
doc4["place"] = "Meenakshi Amman Temple";
doc4["state"] = "TamilNadu";
doc4["visitors"] = 246676;
nlohmann::json doc5;
doc5["id"] = "4";
doc5["place"] = "Staue of Unity";
doc5["state"] = "Gujarat";
doc5["visitors"] = 345878;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
ASSERT_TRUE(coll1->add(doc4.dump()).ok());
ASSERT_TRUE(coll1->add(doc5.dump()).ok());
auto results = coll1->search("TamilNadu", {"state"},
"", {"visitors(Busy:[0, 200000], VeryBusy:[200001, 500000])"},
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true, 6000000UL,
4UL, 7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values");
ASSERT_STREQ("Ranges in range facet syntax should be continous.", results.error().c_str());
auto results2 = coll1->search("TamilNadu", {"state"},
"", {"visitors(Busy:[0, 200000], VeryBusy:[199999, 500000])"},
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true, 6000000UL,
4UL, 7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values");
ASSERT_STREQ("Ranges in range facet syntax should be continous.", results2.error().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOptimizedFacetingTest, RangeFacetTypo) {
std::vector<field> fields = {field("place", field_types::STRING, false),
field("state", field_types::STRING, false),
field("visitors", field_types::INT32, true),};
Collection* coll1 = collectionManager.create_collection(
"coll1", 1, fields, "", 0, "", {}, {}
).get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["place"] = "Mysore Palace";
doc1["state"] = "Karnataka";
doc1["visitors"] = 235486;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["place"] = "Hampi";
doc2["state"] = "Karnataka";
doc2["visitors"] = 187654;
nlohmann::json doc3;
doc3["id"] = "2";
doc3["place"] = "Mahabalipuram";
doc3["state"] = "TamilNadu";
doc3["visitors"] = 174684;
nlohmann::json doc4;
doc4["id"] = "3";
doc4["place"] = "Meenakshi Amman Temple";
doc4["state"] = "TamilNadu";
doc4["visitors"] = 246676;
nlohmann::json doc5;
doc5["id"] = "4";
doc5["place"] = "Staue of Unity";
doc5["state"] = "Gujarat";
doc5["visitors"] = 345878;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
ASSERT_TRUE(coll1->add(doc4.dump()).ok());
ASSERT_TRUE(coll1->add(doc5.dump()).ok());
auto results = coll1->search("TamilNadu", {"state"},
"", {"visitors(Busy:[0, 200000], VeryBusy:[200000, 500000)"}, //missing ']' at end
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true, 6000000UL,
4UL, 7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values");
ASSERT_STREQ("Error splitting the facet range values.", results.error().c_str());
auto results2 = coll1->search("TamilNadu", {"state"},
"", {"visitors(Busy:[0, 200000], VeryBusy:200000, 500000])"}, //missing '[' in second range
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true, 6000000UL,
4UL, 7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values");
ASSERT_STREQ("Invalid facet param `VeryBusy`.", results2.error().c_str());
auto results3 = coll1->search("TamilNadu", {"state"},
"", {"visitors(Busy:[0, 200000] VeryBusy:[200000, 500000])"}, //missing ',' between ranges
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true, 6000000UL,
4UL, 7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values");
ASSERT_STREQ("Invalid facet format.", results3.error().c_str());
auto results4 = coll1->search("TamilNadu", {"state"},
"", {"visitors(Busy:[0 200000], VeryBusy:[200000, 500000])"}, //missing ',' between first ranges values
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true, 6000000UL,
4UL, 7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values");
ASSERT_STREQ("Facet range value is not valid.", results4.error().c_str());
auto results5 = coll1->search("TamilNadu", {"state"},
"", {"visitors(Busy:[0, 200000 VeryBusy:200000, 500000])"}, //missing '],' and '['
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true, 6000000UL,
4UL, 7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values");
ASSERT_STREQ("Error splitting the facet range values.", results5.error().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOptimizedFacetingTest, SampleFacetCounts) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "color", "type": "string", "facet": true}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
std::mt19937 gen(137723); // use constant seed to make sure that counts don't jump around
std::uniform_int_distribution<> distr(1, 100); // 1 to 100 inclusive
size_t count_blue = 0, count_red = 0;
for(size_t i = 0; i < 1000; i++) {
nlohmann::json doc;
if(distr(gen) % 4 == 0) {
doc["color"] = "blue";
count_blue++;
} else {
doc["color"] = "red";
count_red++;
}
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto res = coll1->search("*", {}, "color:blue || color:red", {"color"}, {}, {0}, 3, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 3, 3, 2, 2, false, "", true, 0, max_score, 5, 0, 4294967295UL, "top_values").get();
ASSERT_EQ(1000, res["found"].get<size_t>());
ASSERT_EQ(1, res["facet_counts"].size());
ASSERT_EQ(2, res["facet_counts"][0]["counts"].size());
// verify approximate counts
ASSERT_GE(res["facet_counts"][0]["counts"][0]["count"].get<size_t>(), 700);
ASSERT_EQ("red", res["facet_counts"][0]["counts"][0]["value"].get<std::string>());
ASSERT_GE(res["facet_counts"][0]["counts"][1]["count"].get<size_t>(), 200);
ASSERT_EQ("blue", res["facet_counts"][0]["counts"][1]["value"].get<std::string>());
ASSERT_TRUE(res["facet_counts"][0]["sampled"].get<bool>());
// when sample threshold is high, don't estimate
res = coll1->search("*", {}, "color:blue || color:red", {"color"}, {}, {0}, 3, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 3, 3, 2, 2, false, "", true, 0, max_score, 10, 10000, 4294967295UL, "top_values").get();
ASSERT_EQ(1000, res["found"].get<size_t>());
ASSERT_EQ(1, res["facet_counts"].size());
ASSERT_EQ(2, res["facet_counts"][0]["counts"].size());
for(size_t i = 0; i < res["facet_counts"][0]["counts"].size(); i++) {
if(res["facet_counts"][0]["counts"][i]["value"].get<std::string>() == "red") {
ASSERT_EQ(count_red, res["facet_counts"][0]["counts"][i]["count"].get<size_t>());
} else {
ASSERT_EQ(count_blue, res["facet_counts"][0]["counts"][i]["count"].get<size_t>());
}
}
ASSERT_FALSE(res["facet_counts"][0]["sampled"].get<bool>());
// test for sample percent > 100
auto res_op = coll1->search("*", {}, "", {"color"}, {}, {0}, 3, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 3, 3, 2, 2, false, "", true, 0, max_score, 200, 0, 4294967295UL, "top_values");
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Value of `facet_sample_percent` must be less than 100.", res_op.error());
}
TEST_F(CollectionOptimizedFacetingTest, FacetOnArrayFieldWithSpecialChars) {
std::vector<field> fields = {
field("tags", field_types::STRING_ARRAY, true),
field("points", field_types::INT32, true),
};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
nlohmann::json doc;
doc["tags"] = {"gamma"};
doc["points"] = 10;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["tags"] = {"alpha", "| . |", "beta", "gamma"};
doc["points"] = 10;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("*", {},
"", {"tags"}, {}, {2}, 10, 1, FREQUENCY, {true}, 1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 3, 3, 2, 2, false, "", true, 0, max_score, 100, 0, 4294967295UL, "top_values").get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(4, results["facet_counts"][0]["counts"].size());
for(size_t i = 0; i < results["facet_counts"][0]["counts"].size(); i++) {
auto fvalue = results["facet_counts"][0]["counts"][i]["value"].get<std::string>();
if(fvalue == "gamma") {
ASSERT_EQ(2, results["facet_counts"][0]["counts"][i]["count"].get<size_t>());
} else {
ASSERT_EQ(1, results["facet_counts"][0]["counts"][i]["count"].get<size_t>());
}
}
}
TEST_F(CollectionOptimizedFacetingTest, FacetTestWithDeletedDoc) {
std::vector<field> fields = {
field("tags", field_types::STRING_ARRAY, true),
field("points", field_types::INT32, true),
};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
nlohmann::json doc;
doc["id"] = "0";
doc["tags"] = {"foobar"};
doc["points"] = 10;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "1";
doc["tags"] = {"gamma"};
doc["points"] = 10;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "2";
doc["tags"] = {"beta"};
doc["points"] = 10;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "3";
doc["tags"] = {"alpha"};
doc["points"] = 10;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
coll1->remove("0");
auto results = coll1->search("*", {},
"", {"tags"}, {}, {2}, 10, 1, FREQUENCY, {true}, 1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 3, 3, 2, 2, false, "", true, 0, max_score, 100, 0, 4294967295UL, "top_values").get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(3, results["facet_counts"][0]["counts"].size());
}
TEST_F(CollectionOptimizedFacetingTest, FacetQueryTest) {
std::vector<field> fields = {
field("color", field_types::STRING, true),
};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
std::vector<std::string> colors = {"apple red", "azure", "amazon green", "apricot orange",
"blue", "barrel blue", "banana yellow", "ball green", "baikal"};
for(size_t i = 0; i < 100; i++) {
nlohmann::json doc;
doc["color"] = colors[i % colors.size()];
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
// add colors that DON'T start with "b" to push these up the count list
for(size_t i = 0; i < 4; i++) {
nlohmann::json doc;
doc["color"] = colors[i];
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("*", {},
"", {"color"}, {}, {2}, 1, 1, FREQUENCY, {true}, 1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 5, "color:b", 30, 4, "", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 3, 3, 2, 2, false, "", true, 0, max_score, 100, 0, 4294967295UL, "top_values").get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(4, results["facet_counts"][0]["counts"].size()); // 4 is default candidate size
// junk string should produce no facets
results = coll1->search("*", {},
"", {"color"}, {}, {2}, 1, 1, FREQUENCY, {true}, 1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 5, "color:xsda", 30, 4, "", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 3, 3, 2, 2, false, "", true, 0, max_score, 100, 0, 4294967295UL, "top_values").get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(0, results["facet_counts"][0]["counts"].size());
results = coll1->search("*", {},
"", {"color"}, {}, {2}, 1, 1, FREQUENCY, {true}, 1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 5, "color:green a", 30, 4, "", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 3, 3, 2, 2, false, "", true, 0, max_score, 100, 0, 4294967295UL, "top_values").get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("amazon green", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("<mark>a</mark>mazon <mark>green</mark>", results["facet_counts"][0]["counts"][0]["highlighted"]);
}
TEST_F(CollectionOptimizedFacetingTest, FacetQueryWithSymbols) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string", "facet": true}
],
"symbols_to_index": ["[", "]"],
"token_separators": ["[", "]"]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
std::vector<std::string> titles = {"Article 4", "Article 4[7]", "Article 4[11]", "Article 4[22][a]"};
for(size_t i = 0; i < titles.size(); i++) {
nlohmann::json doc;
doc["title"] = titles[i];
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("*", {},
"", {"title"}, {}, {2}, 1, 1, FREQUENCY, {true}, 1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 5, "title:article 4[", 30, 4, "", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 3, 3, 2, 2, false, "", true, 0, max_score, 100, 0, 4294967295UL, "top_values").get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(3, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("<mark>Article</mark> <mark>4[</mark>7]", results["facet_counts"][0]["counts"][0]["highlighted"]);
ASSERT_EQ("<mark>Article</mark> <mark>4[</mark>11]", results["facet_counts"][0]["counts"][1]["highlighted"]);
ASSERT_EQ("<mark>Article</mark> <mark>4[</mark>22][a]", results["facet_counts"][0]["counts"][2]["highlighted"]);
}
TEST_F(CollectionOptimizedFacetingTest, StringLengthTest) {
std::vector<field> fields = {
field("tags", field_types::STRING_ARRAY, true),
field("points", field_types::INT32, true),
};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
nlohmann::json doc;
doc["tags"] = {"gamma"};
doc["points"] = 10;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["tags"] = {"beta"};
doc["points"] = 10;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["tags"] = {"alpha"};
doc["points"] = 10;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
std::string longStr = "";
for(auto i = 0; i < 20; ++i) {
longStr+="alphabetagamma";
}
ASSERT_TRUE(280 == longStr.size());
std::vector<std::string> vec;
vec.emplace_back(longStr);
doc["tags"] = vec;
doc["points"] = 10;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("*", {},
"", {"tags"}, {}, {2}, 10, 1, FREQUENCY, {true}, 1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 3, 3, 2, 2, false, "", true, 0, max_score, 100, 0, 4294967295UL, "top_values").get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(4, results["facet_counts"][0]["counts"].size());
longStr = results["facet_counts"][0]["counts"][3]["value"];
//string facet length is restricted to 255
ASSERT_EQ(255, longStr.size());
}
TEST_F(CollectionOptimizedFacetingTest, FacetingReturnParent) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "value.color", "type": "string", "optional": false, "facet": true },
{"name": "value.r", "type": "int32", "optional": false, "facet": true },
{"name": "value.g", "type": "int32", "optional": false, "facet": true },
{"name": "value.b", "type": "int32", "optional": false, "facet": true }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
nlohmann::json doc1 = R"({
"value": {
"color": "red",
"r": 255,
"g": 0,
"b": 0
}
})"_json;
nlohmann::json doc2 = R"({
"value": {
"color": "blue",
"r": 0,
"g": 0,
"b": 255
}
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
add_op = coll1->add(doc2.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto search_op = coll1->search("*", {},"", {"value.color"},
{}, {2}, 10, 1,FREQUENCY, {true},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(),10, "",
30, 4, "",
Index::TYPO_TOKENS_THRESHOLD, "", "",{},
3, "<mark>", "</mark>", {},
UINT32_MAX, true, false, true,
"", false, 6000*1000, 4, 7,
fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "",
true, 0, max_score, 100,
0, 0, "top_values", 30000,
2, "", {"value.color"});
if(!search_op.ok()) {
LOG(ERROR) << search_op.error();
FAIL();
}
auto results = search_op.get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("{\"b\":0,\"color\":\"red\",\"g\":0,\"r\":255}", results["facet_counts"][0]["counts"][0]["parent"].dump());
ASSERT_EQ("red", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("{\"b\":255,\"color\":\"blue\",\"g\":0,\"r\":0}", results["facet_counts"][0]["counts"][1]["parent"].dump());
ASSERT_EQ("blue", results["facet_counts"][0]["counts"][1]["value"]);
//not passing facet_fields in facet_return_parent list will only return facet value, not immediate parent for those field
search_op = coll1->search("*", {},"", {"value.color"},
{}, {2}, 10, 1,FREQUENCY, {true},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(),10, "",
30, 4, "",
Index::TYPO_TOKENS_THRESHOLD, "", "",{},
3, "<mark>", "</mark>", {},
UINT32_MAX, true, false, true,
"", false, 6000*1000, 4, 7,
fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "",
true, 0, max_score, 100,
0, 0, "top_values", 30000,
2, "", {});
if(!search_op.ok()) {
LOG(ERROR) << search_op.error();
FAIL();
}
results = search_op.get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("red", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("blue", results["facet_counts"][0]["counts"][1]["value"]);
search_op = coll1->search("*", {},"", {"value.color", "value.r"},
{}, {2}, 10, 1,FREQUENCY, {true},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(),10, "",
30, 4, "",
Index::TYPO_TOKENS_THRESHOLD, "", "",{},
3, "<mark>", "</mark>", {},
UINT32_MAX, true, false, true,
"", false, 6000*1000, 4, 7,
fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "",
true, 0, max_score, 100,
0, 0, "top_values", 30000,
2, "", {"value.r"});
if(!search_op.ok()) {
LOG(ERROR) << search_op.error();
FAIL();
}
results = search_op.get();
ASSERT_EQ(2, results["facet_counts"].size());
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("red", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("blue", results["facet_counts"][0]["counts"][1]["value"]);
ASSERT_EQ(2, results["facet_counts"][1]["counts"].size());
ASSERT_EQ("{\"b\":255,\"color\":\"blue\",\"g\":0,\"r\":0}", results["facet_counts"][1]["counts"][0]["parent"].dump());
ASSERT_EQ("0", results["facet_counts"][1]["counts"][0]["value"]);
ASSERT_EQ("{\"b\":0,\"color\":\"red\",\"g\":0,\"r\":255}", results["facet_counts"][1]["counts"][1]["parent"].dump());
ASSERT_EQ("255", results["facet_counts"][1]["counts"][1]["value"]);
//return parent for multiple facet fields
search_op = coll1->search("*", {},"", {"value.color", "value.r", "value.g", "value.b"},
{}, {2}, 10, 1,FREQUENCY, {true},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(),10, "",
30, 4, "",
Index::TYPO_TOKENS_THRESHOLD, "", "",{},
3, "<mark>", "</mark>", {},
UINT32_MAX, true, false, true,
"", false, 6000*1000, 4, 7,
fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "",
true, 0, max_score, 100,
0, 0, "top_values", 30000,
2, "", {"value.r", "value.g", "value.b"});
if(!search_op.ok()) {
LOG(ERROR) << search_op.error();
FAIL();
}
results = search_op.get();
ASSERT_EQ(4, results["facet_counts"].size());
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("red", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("blue", results["facet_counts"][0]["counts"][1]["value"]);
ASSERT_EQ(2, results["facet_counts"][1]["counts"].size());
ASSERT_EQ("{\"b\":255,\"color\":\"blue\",\"g\":0,\"r\":0}", results["facet_counts"][1]["counts"][0]["parent"].dump());
ASSERT_EQ("0", results["facet_counts"][1]["counts"][0]["value"]);
ASSERT_EQ("{\"b\":0,\"color\":\"red\",\"g\":0,\"r\":255}", results["facet_counts"][1]["counts"][1]["parent"].dump());
ASSERT_EQ("255", results["facet_counts"][1]["counts"][1]["value"]);
ASSERT_EQ(1, results["facet_counts"][2]["counts"].size());
ASSERT_EQ("0", results["facet_counts"][2]["counts"][0]["value"]);
//same facet value appearing in multiple records can return any parent
ASSERT_TRUE(("{\"b\":255,\"color\":\"blue\",\"g\":0,\"r\":0}" == results["facet_counts"][2]["counts"][0]["parent"].dump())
|| ("{\"b\":0,\"color\":\"red\",\"g\":0,\"r\":255}" == results["facet_counts"][2]["counts"][0]["parent"].dump()));
ASSERT_EQ(2, results["facet_counts"][3]["counts"].size());
ASSERT_EQ("{\"b\":0,\"color\":\"red\",\"g\":0,\"r\":255}", results["facet_counts"][3]["counts"][0]["parent"].dump());
ASSERT_EQ("0", results["facet_counts"][3]["counts"][0]["value"]);
ASSERT_EQ("{\"b\":255,\"color\":\"blue\",\"g\":0,\"r\":0}", results["facet_counts"][3]["counts"][1]["parent"].dump());
ASSERT_EQ("255", results["facet_counts"][3]["counts"][1]["value"]);
}
TEST_F(CollectionOptimizedFacetingTest, FacetingReturnParentDeepNested) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "product.specification.detail.width", "type": "int32", "optional": false, "facet": true }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
nlohmann::json doc1 = R"({
"product" : {
"specification": {
"detail" : {
"width": 25
}
}
}
})"_json;
nlohmann::json doc2 = R"({
"product" : {
"specification": {
"detail" : {
"width": 30
}
}
}
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
add_op = coll1->add(doc2.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto search_op = coll1->search("*", {},"", {"product.specification.detail.width"},
{}, {2}, 10, 1,FREQUENCY, {true},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(),10, "",
30, 4, "",
Index::TYPO_TOKENS_THRESHOLD, "", "",{},
3, "<mark>", "</mark>", {},
UINT32_MAX, true, false, true,
"", false, 6000*1000, 4, 7,
fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "",
true, 0, max_score, 100,
0, 0, "top_values", 30000,
2, "", {"product.specification.detail.width"});
if(!search_op.ok()) {
LOG(ERROR) << search_op.error();
FAIL();
}
auto results = search_op.get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("{\"width\":30}", results["facet_counts"][0]["counts"][0]["parent"].dump());
ASSERT_EQ("30", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("{\"width\":25}", results["facet_counts"][0]["counts"][1]["parent"].dump());
ASSERT_EQ("25", results["facet_counts"][0]["counts"][1]["value"]);
}
TEST_F(CollectionOptimizedFacetingTest, FacetingReturnParentObject) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "value", "type": "object", "optional": false, "facet": true }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
nlohmann::json doc1 = R"({
"value": {
"color": "red",
"r": 255,
"g": 0,
"b": 0
}
})"_json;
nlohmann::json doc2 = R"({
"value": {
"color": "blue",
"r": 0,
"g": 0,
"b": 255
}
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
add_op = coll1->add(doc2.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto search_op = coll1->search("*", {},"", {"value.color"},
{}, {2}, 10, 1,FREQUENCY, {true},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(),10, "",
30, 4, "",
Index::TYPO_TOKENS_THRESHOLD, "", "",{},
3, "<mark>", "</mark>", {},
UINT32_MAX, true, false, true,
"", false, 6000*1000, 4, 7,
fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "",
true, 0, max_score, 100,
0, 0, "top_values", 30000,
2, "", {"value.color"});
if(!search_op.ok()) {
LOG(ERROR) << search_op.error();
FAIL();
}
auto results = search_op.get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("{\"b\":0,\"color\":\"red\",\"g\":0,\"r\":255}", results["facet_counts"][0]["counts"][0]["parent"].dump());
ASSERT_EQ("red", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("{\"b\":255,\"color\":\"blue\",\"g\":0,\"r\":0}", results["facet_counts"][0]["counts"][1]["parent"].dump());
ASSERT_EQ("blue", results["facet_counts"][0]["counts"][1]["value"]);
}
TEST_F(CollectionOptimizedFacetingTest, FacetingReturnParentArrayFields) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "tags.id", "type": "string[]", "facet": true }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
nlohmann::json doc1 = R"({
"tags": [
{
"id": "tag-1",
"name": "name for tag-1"
},
{
"id": "tag-2",
"name": "name for tag-2"
}
]
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto search_op = coll1->search("*", {}, "", {"tags.id"},
{}, {2}, 10, 1, FREQUENCY, {true},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "",
30, 4, "",
Index::TYPO_TOKENS_THRESHOLD, "", "", {},
3, "<mark>", "</mark>", {},
UINT32_MAX, true, false, true,
"", false, 6000 * 1000, 4, 7,
fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "",
true, 0, max_score, 100,
0, 0, "top_values", 30000,
2, "", {"tags.id"});
if(!search_op.ok()) {
LOG(ERROR) << search_op.error();
FAIL();
}
auto results = search_op.get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("{\"id\":\"tag-2\",\"name\":\"name for tag-2\"}", results["facet_counts"][0]["counts"][0]["parent"].dump());
ASSERT_EQ("tag-2", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("{\"id\":\"tag-1\",\"name\":\"name for tag-1\"}", results["facet_counts"][0]["counts"][1]["parent"].dump());
ASSERT_EQ("tag-1", results["facet_counts"][0]["counts"][1]["value"]);
}
TEST_F(CollectionOptimizedFacetingTest, FacetingReturnParentArrayFields2) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "nestedCategories", "type": "object"},
{"name": "nestedCategories.categories.FullPath", "type": "string[]", "facet": true }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
nlohmann::json doc1 = R"({
"nestedCategories": {
"categories": [
{"FullPath": "foobar"}
]
}
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto search_op = coll1->search("*", {}, "", {"nestedCategories.categories.FullPath"},
{}, {2}, 10, 1, FREQUENCY, {true},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "",
30, 4, "",
Index::TYPO_TOKENS_THRESHOLD, "", "", {},
3, "<mark>", "</mark>", {},
UINT32_MAX, true, false, true,
"", false, 6000 * 1000, 4, 7,
fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "",
true, 0, max_score, 100,
0, 0, "top_values", 30000,
2, "", {"nestedCategories.categories.FullPath"});
if(!search_op.ok()) {
LOG(ERROR) << search_op.error();
FAIL();
}
auto results = search_op.get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ(R"({"FullPath":"foobar"})", results["facet_counts"][0]["counts"][0]["parent"].dump());
ASSERT_EQ("foobar", results["facet_counts"][0]["counts"][0]["value"]);
}
TEST_F(CollectionOptimizedFacetingTest, FacetingReturnParentArrayFields3) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "nestedCategories", "type": "object"},
{"name": "nestedCategories.categories", "type": "string[]", "facet": true }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
nlohmann::json doc1 = R"({
"nestedCategories": {
"categories": [
"hello", "world"
]
}
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto search_op = coll1->search("*", {}, "", {"nestedCategories.categories"},
{}, {2}, 10, 1, FREQUENCY, {true},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "",
30, 4, "",
Index::TYPO_TOKENS_THRESHOLD, "", "", {},
3, "<mark>", "</mark>", {},
UINT32_MAX, true, false, true,
"", false, 6000 * 1000, 4, 7,
fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "",
true, 0, max_score, 100,
0, 0, "top_values", 30000,
2, "", {"nestedCategories.categories"});
if(!search_op.ok()) {
LOG(ERROR) << search_op.error();
FAIL();
}
auto results = search_op.get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("{\"categories\":[\"hello\",\"world\"]}", results["facet_counts"][0]["counts"][0]["parent"].dump());
ASSERT_EQ("world", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("{\"categories\":[\"hello\",\"world\"]}", results["facet_counts"][0]["counts"][1]["parent"].dump());
ASSERT_EQ("hello", results["facet_counts"][0]["counts"][1]["value"]);
}
TEST_F(CollectionOptimizedFacetingTest, FacetSortByAlpha) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "phone", "type": "string", "optional": false, "facet": true },
{"name": "brand", "type": "string", "optional": false, "facet": true },
{"name": "rating", "type": "float", "optional": false, "facet": true }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection *coll1 = op.get();
nlohmann::json doc;
doc["phone"] = "Oneplus 11R";
doc["brand"] = "Oneplus";
doc["rating"] = 4.6;
auto add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
doc["phone"] = "Fusion Plus";
doc["brand"] = "Moto";
doc["rating"] = 4.2;
add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
doc["phone"] = "S22 Ultra";
doc["brand"] = "Samsung";
doc["rating"] = 4.1;
add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
doc["phone"] = "GT Master";
doc["brand"] = "Realme";
doc["rating"] = 4.4;
add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
doc["phone"] = "T2";
doc["brand"] = "Vivo";
doc["rating"] = 4.0;
add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
doc["phone"] = "Mi 6";
doc["brand"] = "Xiaomi";
doc["rating"] = 3.9;
add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
doc["phone"] = "Z6 Lite";
doc["brand"] = "Iqoo";
doc["rating"] = 4.3;
add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
//sort facets by phone in asc order
auto search_op = coll1->search("*", {}, "", {"phone(sort_by:_alpha:asc)"},
{}, {2});
if (!search_op.ok()) {
LOG(ERROR) << search_op.error();
FAIL();
}
auto results = search_op.get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(7, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("Fusion Plus", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("GT Master", results["facet_counts"][0]["counts"][1]["value"]);
ASSERT_EQ("Mi 6", results["facet_counts"][0]["counts"][2]["value"]);
ASSERT_EQ("Oneplus 11R", results["facet_counts"][0]["counts"][3]["value"]);
ASSERT_EQ("S22 Ultra", results["facet_counts"][0]["counts"][4]["value"]);
ASSERT_EQ("T2", results["facet_counts"][0]["counts"][5]["value"]);
ASSERT_EQ("Z6 Lite", results["facet_counts"][0]["counts"][6]["value"]);
//sort facets by brand in desc order
search_op = coll1->search("*", {}, "", {"brand(sort_by:_alpha:desc)"},
{}, {2});
if (!search_op.ok()) {
LOG(ERROR) << search_op.error();
FAIL();
}
results = search_op.get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(7, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("Xiaomi", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("Vivo", results["facet_counts"][0]["counts"][1]["value"]);
ASSERT_EQ("Samsung", results["facet_counts"][0]["counts"][2]["value"]);
ASSERT_EQ("Realme", results["facet_counts"][0]["counts"][3]["value"]);
ASSERT_EQ("Oneplus", results["facet_counts"][0]["counts"][4]["value"]);
ASSERT_EQ("Moto", results["facet_counts"][0]["counts"][5]["value"]);
ASSERT_EQ("Iqoo", results["facet_counts"][0]["counts"][6]["value"]);
//sort facets by brand in desc order and phone by asc order
search_op = coll1->search("*", {}, "", {"brand(sort_by:_alpha:desc)",
"phone(sort_by:_alpha:asc)"},
{}, {2});
if (!search_op.ok()) {
LOG(ERROR) << search_op.error();
FAIL();
}
results = search_op.get();
ASSERT_EQ(2, results["facet_counts"].size());
ASSERT_EQ(7, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("Xiaomi", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("Vivo", results["facet_counts"][0]["counts"][1]["value"]);
ASSERT_EQ("Samsung", results["facet_counts"][0]["counts"][2]["value"]);
ASSERT_EQ("Realme", results["facet_counts"][0]["counts"][3]["value"]);
ASSERT_EQ("Oneplus", results["facet_counts"][0]["counts"][4]["value"]);
ASSERT_EQ("Moto", results["facet_counts"][0]["counts"][5]["value"]);
ASSERT_EQ("Iqoo", results["facet_counts"][0]["counts"][6]["value"]);
ASSERT_EQ(7, results["facet_counts"][1]["counts"].size());
ASSERT_EQ("Fusion Plus", results["facet_counts"][1]["counts"][0]["value"]);
ASSERT_EQ("GT Master", results["facet_counts"][1]["counts"][1]["value"]);
ASSERT_EQ("Mi 6", results["facet_counts"][1]["counts"][2]["value"]);
ASSERT_EQ("Oneplus 11R", results["facet_counts"][1]["counts"][3]["value"]);
ASSERT_EQ("S22 Ultra", results["facet_counts"][1]["counts"][4]["value"]);
ASSERT_EQ("T2", results["facet_counts"][1]["counts"][5]["value"]);
ASSERT_EQ("Z6 Lite", results["facet_counts"][1]["counts"][6]["value"]);
}
TEST_F(CollectionOptimizedFacetingTest, FacetSortByOtherField) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "receipe", "type": "object", "optional": false, "facet": true }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
nlohmann::json doc1 = R"({
"receipe": {
"name": "cheese pizza",
"calories": 300,
"origin": "america"
}
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
nlohmann::json doc2 = R"({
"receipe": {
"name": "noodles",
"calories": 250,
"origin": "china"
}
})"_json;
add_op = coll1->add(doc2.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
nlohmann::json doc3 = R"({
"receipe": {
"name": "hamburger",
"calories": 350,
"origin": "america"
}
})"_json;
add_op = coll1->add(doc3.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
nlohmann::json doc4 = R"({
"receipe": {
"name": "schezwan rice",
"calories": 150,
"origin": "china"
}
})"_json;
add_op = coll1->add(doc4.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
nlohmann::json doc5 = R"({
"receipe": {
"name": "butter chicken",
"calories": 270,
"origin": "india"
}
})"_json;
add_op = coll1->add(doc5.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
//search by calories in asc order
auto search_op = coll1->search("*", {},"",
{"receipe.name(sort_by:receipe.calories:asc)"},
{}, {2});
if(!search_op.ok()) {
LOG(ERROR) << search_op.error();
FAIL();
}
auto results = search_op.get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(5, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("schezwan rice", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("noodles", results["facet_counts"][0]["counts"][1]["value"]);
ASSERT_EQ("butter chicken", results["facet_counts"][0]["counts"][2]["value"]);
ASSERT_EQ("cheese pizza", results["facet_counts"][0]["counts"][3]["value"]);
ASSERT_EQ("hamburger", results["facet_counts"][0]["counts"][4]["value"]);
//search by calories in desc order
search_op = coll1->search("*", {},"",
{"receipe.name(sort_by:receipe.calories:desc)"},
{}, {2});
if(!search_op.ok()) {
LOG(ERROR) << search_op.error();
FAIL();
}
results = search_op.get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(5, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("hamburger", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("cheese pizza", results["facet_counts"][0]["counts"][1]["value"]);
ASSERT_EQ("butter chicken", results["facet_counts"][0]["counts"][2]["value"]);
ASSERT_EQ("noodles", results["facet_counts"][0]["counts"][3]["value"]);
ASSERT_EQ("schezwan rice", results["facet_counts"][0]["counts"][4]["value"]);
}
TEST_F(CollectionOptimizedFacetingTest, FacetSortByOtherFloatField) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "investment", "type": "object", "optional": false, "facet": true }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
nlohmann::json doc1 = R"({
"investment": {
"name": "Term Deposits",
"interest_rate": 7.1,
"class": "fixed"
}
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
nlohmann::json doc2 = R"({
"investment": {
"name": "Gold",
"interest_rate": 5.4,
"class": "fixed"
}
})"_json;
add_op = coll1->add(doc2.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
nlohmann::json doc3 = R"({
"investment": {
"name": "Mutual Funds",
"interest_rate": 12,
"class": "Equity"
}
})"_json;
add_op = coll1->add(doc3.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
nlohmann::json doc4 = R"({
"investment": {
"name": "Land",
"interest_rate": 9.1,
"class": "real estate"
}
})"_json;
add_op = coll1->add(doc4.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
nlohmann::json doc5 = R"({
"investment": {
"name": "Bonds",
"interest_rate": 7.24,
"class": "g-sec"
}
})"_json;
add_op = coll1->add(doc5.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
//search by calories in asc order
auto search_op = coll1->search("*", {},"",
{"investment.name(sort_by:investment.interest_rate:asc)"},
{}, {2});
if(!search_op.ok()) {
LOG(ERROR) << search_op.error();
FAIL();
}
auto results = search_op.get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(5, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("Gold", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("Term Deposits", results["facet_counts"][0]["counts"][1]["value"]);
ASSERT_EQ("Bonds", results["facet_counts"][0]["counts"][2]["value"]);
ASSERT_EQ("Land", results["facet_counts"][0]["counts"][3]["value"]);
ASSERT_EQ("Mutual Funds", results["facet_counts"][0]["counts"][4]["value"]);
//search by calories in desc order
search_op = coll1->search("*", {},"",
{"investment.name(sort_by:investment.interest_rate:desc)"},
{}, {2});
if(!search_op.ok()) {
LOG(ERROR) << search_op.error();
FAIL();
}
results = search_op.get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(5, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("Mutual Funds", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("Land", results["facet_counts"][0]["counts"][1]["value"]);
ASSERT_EQ("Bonds", results["facet_counts"][0]["counts"][2]["value"]);
ASSERT_EQ("Term Deposits", results["facet_counts"][0]["counts"][3]["value"]);
ASSERT_EQ("Gold", results["facet_counts"][0]["counts"][4]["value"]);
}
TEST_F(CollectionOptimizedFacetingTest, FacetSortValidation) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "phone", "type": "string", "optional": false, "facet": true },
{"name": "brand", "type": "string", "optional": false, "facet": true },
{"name": "rating", "type": "float", "optional": false, "facet": true }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection *coll1 = op.get();
nlohmann::json doc;
doc["phone"] = "Oneplus 11R";
doc["brand"] = "Oneplus";
doc["rating"] = 4.6;
auto add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
doc["phone"] = "Fusion Plus";
doc["brand"] = "Moto";
doc["rating"] = 4.2;
add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
doc["phone"] = "S22 Ultra";
doc["brand"] = "Samsung";
doc["rating"] = 4.1;
add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
//try sort on non string field
auto search_op = coll1->search("*", {}, "", {"rating(sort_by:_alpha:desc)"},
{}, {2});
ASSERT_EQ(400, search_op.code());
ASSERT_EQ("Facet field should be string type to apply alpha sort.", search_op.error());
//try sort by string field
search_op = coll1->search("*", {}, "", {"phone(sort_by:brand:desc)"},
{}, {2});
ASSERT_EQ(400, search_op.code());
ASSERT_EQ("Sort field should be non string type to apply sort.", search_op.error());
//incorrect syntax
search_op = coll1->search("*", {}, "", {"phone(sort_by:desc)"},
{}, {2});
ASSERT_EQ(400, search_op.code());
ASSERT_EQ("Invalid sort format.", search_op.error());
search_op = coll1->search("*", {}, "", {"phone(sort:_alpha:desc)"},
{}, {2});
ASSERT_EQ(400, search_op.code());
ASSERT_EQ("Invalid facet param `sort`.", search_op.error());
//invalid param
search_op = coll1->search("*", {}, "", {"phone(sort_by:_alpha:foo)"},
{}, {2});
ASSERT_EQ(400, search_op.code());
ASSERT_EQ("Invalid sort param.", search_op.error());
//whitespace is allowed
search_op = coll1->search("*", {}, "", {"phone( sort_by: _alpha : asc)"},
{}, {2});
if (!search_op.ok()) {
LOG(ERROR) << search_op.error();
FAIL();
}
auto results = search_op.get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(3, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("Fusion Plus", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("Oneplus 11R", results["facet_counts"][0]["counts"][1]["value"]);
ASSERT_EQ("S22 Ultra", results["facet_counts"][0]["counts"][2]["value"]);
//facet sort with facet query should work
search_op = coll1->search("*", query_fields, "", {"phone(sort_by:_alpha:desc)"},
sort_fields, {0}, 10, 1, FREQUENCY,{false},
Index::DROP_TOKENS_THRESHOLD,spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(),10, "phone: plus",
30UL, 4UL,"", 1UL,
"", "", {}, 3UL, "<mark>",
"</mark>", {},4294967295UL, true,
false, true, "", false, 6000000UL,
4UL,7UL, fallback, 4UL, {off}, 32767UL,
32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL,
0UL, 4294967295UL, "top_values");
results = search_op.get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("Fusion Plus", results["facet_counts"][0]["counts"][0]["value"]);
}
TEST_F(CollectionOptimizedFacetingTest, FacetQueryWithDifferentLocale) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "phone", "type": "string", "optional": false, "facet": true },
{"name": "brand", "type": "string", "optional": false, "facet": true },
{"name": "rating", "type": "float", "optional": false, "facet": true }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection *coll1 = op.get();
nlohmann::json doc;
doc["phone"] = "çapeta";
doc["brand"] = "Samsung";
doc["rating"] = 4.1;
auto add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
doc["phone"] = "teléfono justo";
doc["brand"] = "Oneplus";
doc["rating"] = 4.6;
add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto search_op = coll1->search("*", query_fields, "", {"phone(sort_by:_alpha:desc)"},
sort_fields, {0}, 10, 1, FREQUENCY,{false},
Index::DROP_TOKENS_THRESHOLD,spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(),10, "phone: ç",
30UL, 4UL,"", 1UL,
"", "", {}, 3UL, "<mark>",
"</mark>", {},4294967295UL, true,
false, true, "", false, 6000000UL,
4UL,7UL, fallback, 4UL, {off}, 32767UL,
32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL,
0UL, 4294967295UL, "top_values");
auto results = search_op.get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("çapeta", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("<mark>ç</mark>apeta", results["facet_counts"][0]["counts"][0]["highlighted"]);
search_op = coll1->search("*", query_fields, "", {"phone(sort_by:_alpha:desc)"},
sort_fields, {0}, 10, 1, FREQUENCY,{false},
Index::DROP_TOKENS_THRESHOLD,spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(),10, "phone: telé",
30UL, 4UL,"", 1UL,
"", "", {}, 3UL, "<mark>",
"</mark>", {},4294967295UL, true,
false, true, "", false, 6000000UL,
4UL,7UL, fallback, 4UL, {off}, 32767UL,
32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL,
0UL, 4294967295UL, "top_values");
results = search_op.get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("teléfono justo", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("<mark>telé</mark>fono justo", results["facet_counts"][0]["counts"][0]["highlighted"]);
}
TEST_F(CollectionOptimizedFacetingTest, ValueIndexStatsMinMax) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("rating", field_types::FLOAT, true)};
std::vector<sort_by> sort_fields = {sort_by("rating", "DESC")};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "rating").get();
}
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "The Shawshank Redemption";
doc["rating"] = 9.3;
coll1->add(doc.dump());
doc["id"] = "1";
doc["title"] = "The Godfather";
doc["rating"] = 9.2;
coll1->add(doc.dump());
doc["id"] = "2";
doc["title"] = "The Dark Knight";
doc["rating"] = 9;
coll1->add(doc.dump());
doc["id"] = "3";
doc["title"] = "Pulp Fiction";
doc["rating"] = 8.9;
coll1->add(doc.dump());
doc["id"] = "4";
doc["title"] = "Fight Club";
doc["rating"] = 8.8;
coll1->add(doc.dump());
std::vector<std::string> facets = {"rating"};
//limit max facets to 2
nlohmann::json results = coll1->search("*", {"title"}, "", facets, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 2,"", 30UL, 4UL,
"", 1UL, "", "", {}, 3UL, "<mark>", "</mark>", {},
4294967295UL, true, false, true, "", false, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values").get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("9.3", results["facet_counts"][0]["counts"][0]["value"].get<std::string>());
ASSERT_EQ("9.2", results["facet_counts"][0]["counts"][1]["value"].get<std::string>());
//stats
ASSERT_EQ(5, results["facet_counts"][0]["stats"].size());
ASSERT_FLOAT_EQ(9.25, results["facet_counts"][0]["stats"]["avg"].get<double>());
ASSERT_FLOAT_EQ(8.800000190734863, results["facet_counts"][0]["stats"]["min"].get<double>());
ASSERT_FLOAT_EQ(9.300000190734863, results["facet_counts"][0]["stats"]["max"].get<double>());
ASSERT_FLOAT_EQ(18.5, results["facet_counts"][0]["stats"]["sum"].get<double>());
ASSERT_FLOAT_EQ(2, results["facet_counts"][0]["stats"]["total_values"].get<size_t>());
}
TEST_F(CollectionOptimizedFacetingTest, FacetWithPhraseSearch) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, true),
field("rating", field_types::FLOAT, false)};
std::vector<sort_by> sort_fields = {sort_by("rating", "DESC")};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "rating").get();
}
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "The Shawshank Redemption";
doc["rating"] = 9.3;
coll1->add(doc.dump());
doc["id"] = "1";
doc["title"] = "The Godfather";
doc["rating"] = 9.2;
coll1->add(doc.dump());
std::vector<std::string> facets = {"title"};
nlohmann::json results = coll1->search(R"("shawshank")", {"title"}, "", facets, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 2,"", 30UL, 4UL,
"", 1UL, "", "", {}, 3UL, "<mark>", "</mark>", {},
4294967295UL, true, false, true, "", false, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values").get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("The Shawshank Redemption", results["facet_counts"][0]["counts"][0]["value"]);
}
TEST_F(CollectionOptimizedFacetingTest, StringFacetsCountListOrderTest) {
//check if count list is ordering facets
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, true),
field("rating", field_types::FLOAT, false)};
std::vector<sort_by> sort_fields = {sort_by("rating", "DESC")};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "rating").get();
}
std::vector<std::string> titles {"The Shawshank Redemption", "The Godfather", "The Dark Knight"};
nlohmann::json doc;
int i = 0;
for(; i < 6; ++i) {
doc["id"] = std::to_string(i);
doc["title"] = titles[i%3];
doc["rating"] = 8.5;
coll1->add(doc.dump());
}
//add last title more
for(; i < 10; ++i) {
doc["id"] = std::to_string(i);
doc["title"] = titles[2];
doc["rating"] = 8.5;
coll1->add(doc.dump());
}
std::vector<std::string> facets = {"title"};
//limit max facets to 2
nlohmann::json results = coll1->search("*", {"title"}, "", facets, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 2,"", 30UL, 4UL,
"", 1UL, "", "", {}, 3UL, "<mark>", "</mark>", {},
4294967295UL, true, false, true, "", false, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values").get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("The Dark Knight", results["facet_counts"][0]["counts"][0]["value"].get<std::string>());
ASSERT_EQ(6, results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_EQ("The Shawshank Redemption", results["facet_counts"][0]["counts"][1]["value"].get<std::string>());
ASSERT_EQ(2, results["facet_counts"][0]["counts"][1]["count"]);
}
TEST_F(CollectionOptimizedFacetingTest, StringFacetsCountListRemoveTest) {
//delete records and check if counts are updated
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, true),
field("rating", field_types::FLOAT, false)};
std::vector<sort_by> sort_fields = {sort_by("rating", "DESC")};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "rating").get();
}
std::vector<std::string> titles {"The Shawshank Redemption", "The Godfather", "The Dark Knight"};
nlohmann::json doc;
int i = 0;
for(; i < 6; ++i) {
doc["id"] = std::to_string(i);
doc["title"] = titles[i%3];
doc["rating"] = 8.5;
coll1->add(doc.dump());
}
//add last title more
for(; i < 10; ++i) {
doc["id"] = std::to_string(i);
doc["title"] = titles[2];
doc["rating"] = 8.5;
coll1->add(doc.dump());
}
// remove first doc
coll1->remove("0");
std::vector<std::string> facets = {"title"};
// limit max facets to 2
nlohmann::json results = coll1->search("*", {"title"}, "", facets, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 2,"", 30UL, 4UL,
"", 1UL, "", "", {}, 3UL, "<mark>", "</mark>", {},
4294967295UL, true, false, true, "", false, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values").get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("The Dark Knight", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ(6, results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_EQ("The Godfather", results["facet_counts"][0]["counts"][1]["value"]);
ASSERT_EQ(2, results["facet_counts"][0]["counts"][1]["count"]);
// another collection with a single facet value
auto coll2 = collectionManager.create_collection("coll2", 4, fields, "rating").get();
doc["id"] = "0";
doc["title"] = titles[0];
doc["rating"] = 8.5;
coll2->add(doc.dump());
doc["id"] = "1";
coll2->add(doc.dump());
coll2->remove("0");
results = coll2->search("*", {"title"}, "", facets, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 2,"", 30UL, 4UL,
"", 1UL, "", "", {}, 3UL, "<mark>", "</mark>", {},
4294967295UL, true, false, true, "", false, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "top_values").get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("The Shawshank Redemption", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ(1, results["facet_counts"][0]["counts"][0]["count"]);
}
TEST_F(CollectionOptimizedFacetingTest, RangeFacetAlphanumericLabels) {
std::vector<field> fields = {field("monuments", field_types::STRING, false),
field("year", field_types::INT32, true),};
Collection* coll1 = collectionManager.create_collection(
"coll1", 1, fields, "", 0, "",
{},{}).get();
nlohmann::json doc;
doc["id"] = "0";
doc["monuments"] = "Statue Of Unity";
doc["year"] = 2018;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "1";
doc["monuments"] = "Taj Mahal";
doc["year"] = 1653;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "2";
doc["monuments"] = "Mysore Palace";
doc["year"] = 1897;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "3";
doc["monuments"] = "Chennakesava Temple";
doc["year"] = 1117;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("*", {},
"", {"year(10thAD:[1000,1500], 15thAD:[1500,2000], 20thAD:[2000, ])"},
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true, 6000*1000, 4, 7, fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "", true, 0, max_score, 100, 0, 0, "top_values").get();
ASSERT_EQ(3, results["facet_counts"][0]["counts"].size());
ASSERT_EQ(2, results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_EQ("15thAD", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ(1, results["facet_counts"][0]["counts"][1]["count"]);
ASSERT_EQ("20thAD", results["facet_counts"][0]["counts"][1]["value"]);
ASSERT_EQ(1, results["facet_counts"][0]["counts"][2]["count"]);
ASSERT_EQ("10thAD", results["facet_counts"][0]["counts"][2]["value"]);
}
TEST_F(CollectionOptimizedFacetingTest, RangeFacetsFloatRange) {
std::vector<field> fields = {field("name", field_types::STRING, false),
field("inches", field_types::FLOAT, true),};
Collection* coll1 = collectionManager.create_collection(
"coll1", 1, fields, "", 0, "", {}, {}).get();
nlohmann::json doc;
doc["id"] = "0";
doc["name"] = "TV 1";
doc["inches"] = 32.4;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "1";
doc["name"] = "TV 2";
doc["inches"] = 55;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "2";
doc["name"] = "TV 3";
doc["inches"] = 55.6;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("*", {},
"", {"inches(small:[0, 55.5])"},
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true,
6000*1000, 4, 7, fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "", true, 0, max_score, 100, 0, 0, "top_values").get();
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ(2, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_EQ("small", results["facet_counts"][0]["counts"][0]["value"]);
results = coll1->search("*", {},
"", {"inches(big:[55, 55.6])"},
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true,
6000*1000, 4, 7, fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "", true, 0, max_score, 100, 0, 0, "top_values").get();
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_EQ("big", results["facet_counts"][0]["counts"][0]["value"]);
}
TEST_F(CollectionOptimizedFacetingTest, RangeFacetsMinMaxRange) {
std::vector<field> fields = {field("name", field_types::STRING, false),
field("inches", field_types::FLOAT, true),};
Collection* coll1 = collectionManager.create_collection(
"coll1", 1, fields, "", 0, "", {}, {}).get();
nlohmann::json doc;
doc["id"] = "0";
doc["name"] = "TV 1";
doc["inches"] = 32.4;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "1";
doc["name"] = "TV 2";
doc["inches"] = 55;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "2";
doc["name"] = "TV 3";
doc["inches"] = 55.6;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("*", {},
"", {"inches(small:[0, 55], large:[55, ])"},
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true,
6000*1000, 4, 7, fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "", true, 0, max_score, 100, 0, 0, "top_values").get();
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_EQ(2, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_EQ("large", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][1]["count"]);
ASSERT_EQ("small", results["facet_counts"][0]["counts"][1]["value"]);
results = coll1->search("*", {},
"", {"inches(small:[,55])"},
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true,
6000*1000, 4, 7, fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "", true, 0, max_score, 100, 0, 0, "top_values").get();
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_EQ("small", results["facet_counts"][0]["counts"][0]["value"]);
}
TEST_F(CollectionOptimizedFacetingTest, RangeFacetRangeLabelWithSpace) {
std::vector<field> fields = {field("name", field_types::STRING, false),
field("inches", field_types::FLOAT, true),};
Collection* coll1 = collectionManager.create_collection(
"coll1", 1, fields, "", 0, "", {}, {}).get();
nlohmann::json doc;
doc["id"] = "0";
doc["name"] = "TV 1";
doc["inches"] = 32.4;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "1";
doc["name"] = "TV 2";
doc["inches"] = 55;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "2";
doc["name"] = "TV 3";
doc["inches"] = 55.6;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("*", {},
"", {"inches(small tvs with display size:[0,55])"},
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true,
6000*1000, 4, 7, fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "", true, 0, max_score, 100, 0, 0, "top_values").get();
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_EQ("small tvs with display size", results["facet_counts"][0]["counts"][0]["value"]);
}
TEST_F(CollectionOptimizedFacetingTest, RangeFacetsWithSortDisabled) {
std::vector<field> fields = {field("name", field_types::STRING, false, false, true, "", 1),
field("brand", field_types::STRING, true, false, true, "", -1),
field("price", field_types::FLOAT, true, false, true, "", -1)};
Collection* coll2 = collectionManager.create_collection(
"coll2", 1, fields, "", 0, "",
{},{}).get();
nlohmann::json doc;
doc["name"] = "keyboard";
doc["id"] = "pd-1";
doc["brand"] = "Logitech";
doc["price"] = 49.99;
ASSERT_TRUE(coll2->add(doc.dump()).ok());
doc["name"] = "mouse";
doc["id"] = "pd-2";
doc["brand"] = "Logitech";
doc["price"] = 29.99;
ASSERT_TRUE(coll2->add(doc.dump()).ok());
auto results = coll2->search("*", {},
"brand:=Logitech", {"price(Low:[0, 30], Medium:[30, 75], High:[75, ])"},
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true,
6000*1000, 4, 7, fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "", true, 0, max_score, 100, 0, 0, "top_values").get();
//when value index is forced it works
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_EQ("Low", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ(1, results["facet_counts"][0]["counts"][1]["count"]);
ASSERT_EQ("Medium", results["facet_counts"][0]["counts"][1]["value"]);
}
TEST_F(CollectionOptimizedFacetingTest, TopKFaceting) {
std::vector<field> fields = {field("name", field_types::STRING, true, false, true, "", 1),
field("price", field_types::FLOAT, true, false, true, "", 0)};
Collection* coll2 = collectionManager.create_collection(
"coll2", 1, fields, "", 0, "",
{},{}).get();
nlohmann::json doc;
for(int i=0; i < 500; ++i) {
doc["name"] = "jeans";
doc["price"] = 49.99;
ASSERT_TRUE(coll2->add(doc.dump()).ok());
doc["name"] = "narrow jeans";
doc["price"] = 29.99;
ASSERT_TRUE(coll2->add(doc.dump()).ok());
}
//normal facet
auto results = coll2->search("jeans", {"name"},
"", {"name"},
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true,
6000*1000, 4, 7, fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "", true, 0, max_score, 100, 0, 0, "top_values").get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ("name", results["facet_counts"][0]["field_name"]);
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("jeans", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ(500, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_EQ("narrow jeans", results["facet_counts"][0]["counts"][1]["value"]);
ASSERT_EQ(500, (int) results["facet_counts"][0]["counts"][1]["count"]);
//facet with top_k
results = coll2->search("jeans", {"name"},
"", {"name(top_k:true)"},
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true,
6000*1000, 4, 7, fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "", true, 0, max_score, 100, 0, 0, "top_values").get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ("name", results["facet_counts"][0]["field_name"]);
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("jeans", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ(250, (int) results["facet_counts"][0]["counts"][0]["count"]);
//some are facets with top-K
results = coll2->search("jeans", {"name"}, "",
{"name(top_k:true)", "price"}, {}, {2},
10, 1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true,
6000*1000, 4, 7, fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "", true, 0, max_score, 100, 0, 0, "top_values").get();
ASSERT_EQ(2, results["facet_counts"].size());
ASSERT_EQ("name", results["facet_counts"][0]["field_name"]);
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("jeans", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ(250, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_EQ("price", results["facet_counts"][1]["field_name"]);
ASSERT_EQ(2, results["facet_counts"][1]["counts"].size());
ASSERT_EQ("49.99", results["facet_counts"][1]["counts"][0]["value"]);
ASSERT_EQ(500, (int) results["facet_counts"][1]["counts"][0]["count"]);
ASSERT_EQ("29.99", results["facet_counts"][1]["counts"][1]["value"]);
ASSERT_EQ(500, (int) results["facet_counts"][1]["counts"][1]["count"]);
}
TEST_F(CollectionOptimizedFacetingTest, TopKFacetValidation) {
std::vector<field> fields = {field("name", field_types::STRING, true, false, true, "", 1),
field("price", field_types::FLOAT, true, false, true, "", 1)};
Collection* coll2 = collectionManager.create_collection(
"coll2", 1, fields, "", 0, "",
{},{}).get();
//'=' separator instead of ":"
auto results = coll2->search("jeans", {"name"}, "",
{"name(top_k=true)"}, {}, {2},
10, 1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true,
6000*1000, 4, 7, fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "", true, 0, max_score, 100, 0, 0, "top_values");
ASSERT_FALSE(results.ok());
ASSERT_EQ("Invalid facet format.", results.error());
//typo in top_k
results = coll2->search("jeans", {"name"}, "",
{"name(top-k:true)"}, {}, {2},
10, 1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true,
6000*1000, 4, 7, fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "", true, 0, max_score, 100, 0, 0, "top_values");
ASSERT_FALSE(results.ok());
ASSERT_EQ("Invalid facet param `top-k`.", results.error());
results = coll2->search("jeans", {"name"}, "",
{"name(topk:true)"}, {}, {2},
10, 1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true,
6000*1000, 4, 7, fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "", true, 0, max_score, 100, 0, 0, "top_values");
ASSERT_FALSE(results.ok());
ASSERT_EQ("Invalid facet param `topk`.", results.error());
//value should be boolean
results = coll2->search("jeans", {"name"}, "",
{"name(top_k:10)"}, {}, {2},
10, 1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true,
6000*1000, 4, 7, fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "", true, 0, max_score, 100, 0, 0, "top_values");
ASSERT_FALSE(results.ok());
ASSERT_EQ("top_k string format is invalid.", results.error());
//correct val
results = coll2->search("jeans", {"name"}, "",
{"name(top_k:false)"}, {}, {2},
10, 1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true,
6000*1000, 4, 7, fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "", true, 0, max_score, 100, 0, 0, "top_values");
ASSERT_TRUE(results.ok());
//with sort params
results = coll2->search("jeans", {"name"}, "",
{"name(top_k:false, sort_by:_alpha:desc)"}, {}, {2},
10, 1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true,
6000*1000, 4, 7, fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "", true, 0, max_score, 100, 0, 0, "top_values");
ASSERT_TRUE(results.ok());
results = coll2->search("jeans", {"name"}, "",
{"name(top_k:false, sort_by:price:desc)"}, {}, {2},
10, 1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true,
6000*1000, 4, 7, fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "", true, 0, max_score, 100, 0, 0, "top_values");
ASSERT_TRUE(results.ok());
//with range facets
results = coll2->search("jeans", {"name"}, "",
{"price(top_k:false, economic:[0, 30], Luxury:[30, 50])"}, {}, {2},
10, 1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true,
6000*1000, 4, 7, fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "", true, 0, max_score, 100, 0, 0, "top_values");
ASSERT_TRUE(results.ok());
results = coll2->search("jeans", {"name"}, "",
{"price(economic:[0, 30], top_k:true, Luxury:[30, 50])"}, {}, {2},
10, 1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true,
6000*1000, 4, 7, fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "", true, 0, max_score, 100, 0, 0, "top_values");
ASSERT_TRUE(results.ok());
results = coll2->search("jeans", {"name"}, "",
{"price(economic:[0, 30], Luxury:[30, 50], top_k:true)"}, {}, {2},
10, 1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true,
6000*1000, 4, 7, fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "", true, 0, max_score, 100, 0, 0, "top_values");
ASSERT_TRUE(results.ok());
//missing , seperator
results = coll2->search("jeans", {"name"}, "",
{"price(economic:[0, 30], Luxury:[30, 50] top_k:true)"}, {}, {2},
10, 1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true,
6000*1000, 4, 7, fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "", true, 0, max_score, 100, 0, 0, "top_values");
ASSERT_FALSE(results.ok());
ASSERT_EQ("Invalid facet format.", results.error());
results = coll2->search("jeans", {"name"}, "",
{"name(top_k:false sort_by:_alpha:desc)"}, {}, {2},
10, 1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true,
6000*1000, 4, 7, fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "", true, 0, max_score, 100, 0, 0, "top_values");
ASSERT_FALSE(results.ok());
ASSERT_EQ("top_k string format is invalid.", results.error());
}
| 163,822
|
C++
|
.cpp
| 2,812
| 43.512091
| 146
| 0.490465
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,697
|
num_tree_test.cpp
|
typesense_typesense/test/num_tree_test.cpp
|
#include <gtest/gtest.h>
#include <art.h>
#include "num_tree.h"
TEST(NumTreeTest, Searches) {
num_tree_t tree;
tree.insert(-1200, 0);
tree.insert(-1750, 1);
tree.insert(0, 2);
tree.insert(100, 3);
tree.insert(2000, 4);
tree.insert(-1200, 5);
tree.insert(100, 6);
uint32_t* ids = nullptr;
size_t ids_len;
tree.search(NUM_COMPARATOR::EQUALS, -1750, &ids, ids_len);
ASSERT_EQ(1, ids_len);
ASSERT_EQ(1, ids[0]);
delete [] ids;
ids = nullptr;
tree.search(NUM_COMPARATOR::GREATER_THAN_EQUALS, -1200, &ids, ids_len);
ASSERT_EQ(6, ids_len);
delete [] ids;
ids = nullptr;
tree.search(NUM_COMPARATOR::GREATER_THAN, -1200, &ids, ids_len);
ASSERT_EQ(4, ids_len);
delete [] ids;
ids = nullptr;
tree.search(NUM_COMPARATOR::LESS_THAN_EQUALS, 100, &ids, ids_len);
ASSERT_EQ(6, ids_len);
delete [] ids;
ids = nullptr;
tree.search(NUM_COMPARATOR::LESS_THAN, 100, &ids, ids_len);
ASSERT_EQ(4, ids_len);
delete [] ids;
ids = nullptr;
}
TEST(NumTreeTest, EraseFullList) {
num_tree_t tree;
// this stores the IDs as a full list
for(size_t i = 0; i < 200; i++) {
tree.insert(0, i);
}
// we erase all but 1 ID
for(size_t i = 0; i < 199; i++) {
tree.remove(0, i);
}
// now try searching for the value
uint32_t* ids = nullptr;
size_t ids_len;
tree.search(NUM_COMPARATOR::EQUALS, 0, &ids, ids_len);
ASSERT_EQ(1, ids_len);
ASSERT_EQ(199, ids[0]);
delete [] ids;
ids = nullptr;
// deleting the last ID as well
tree.remove(0, 199);
tree.search(NUM_COMPARATOR::EQUALS, 0, &ids, ids_len);
ASSERT_EQ(nullptr, ids);
}
TEST(NumTreeTest, Iterator) {
num_tree_t compact_tree;
compact_tree.insert(-1200, 0);
compact_tree.insert(-1750, 1);
compact_tree.insert(0, 2);
compact_tree.insert(100, 3);
compact_tree.insert(2000, 4);
compact_tree.insert(-1200, 5);
compact_tree.insert(100, 6);
auto iterator = num_tree_t::iterator_t(&compact_tree, EQUALS, 1);
ASSERT_FALSE(iterator.is_valid);
iterator.reset();
ASSERT_FALSE(iterator.is_valid);
iterator = num_tree_t::iterator_t(&compact_tree, GREATER_THAN_EQUALS, 0);
ASSERT_FALSE(iterator.is_valid);
iterator.reset();
ASSERT_FALSE(iterator.is_valid);
iterator = num_tree_t::iterator_t(&compact_tree, EQUALS, 0);
std::vector<uint32_t> expected_ids = {2};
for (const auto& id: expected_ids) {
ASSERT_TRUE(iterator.is_valid);
ASSERT_EQ(id, iterator.seq_id);
iterator.next();
}
ASSERT_FALSE(iterator.is_valid);
iterator.reset();
for (const auto& id: expected_ids) {
ASSERT_TRUE(iterator.is_valid);
ASSERT_EQ(id, iterator.seq_id);
iterator.next();
}
ASSERT_FALSE(iterator.is_valid);
iterator = num_tree_t::iterator_t(&compact_tree, EQUALS, -1200);
expected_ids = {0, 5};
for (const auto& id: expected_ids) {
ASSERT_TRUE(iterator.is_valid);
ASSERT_EQ(id, iterator.seq_id);
iterator.next();
}
ASSERT_FALSE(iterator.is_valid);
iterator.reset();
for (const auto& id: expected_ids) {
ASSERT_TRUE(iterator.is_valid);
ASSERT_EQ(id, iterator.seq_id);
iterator.next();
}
ASSERT_FALSE(iterator.is_valid);
iterator.reset();
iterator.skip_to(1);
ASSERT_TRUE(iterator.is_valid);
ASSERT_EQ(5, iterator.seq_id);
iterator.skip_to(10);
ASSERT_FALSE(iterator.is_valid);
num_tree_t tree;
for (uint32_t i = 0; i < 100; i++) {
tree.insert(1, i);
}
iterator = num_tree_t::iterator_t(&tree, EQUALS, 1);
expected_ids = {};
for (uint32_t i = 0; i < 100; i++) {
expected_ids.push_back(i);
}
for (const auto& id: expected_ids) {
ASSERT_TRUE(iterator.is_valid);
ASSERT_EQ(id, iterator.seq_id);
iterator.next();
}
ASSERT_FALSE(iterator.is_valid);
iterator.reset();
for (const auto& id: expected_ids) {
ASSERT_TRUE(iterator.is_valid);
ASSERT_EQ(id, iterator.seq_id);
iterator.next();
}
ASSERT_FALSE(iterator.is_valid);
iterator.reset();
iterator.skip_to(50);
ASSERT_TRUE(iterator.is_valid);
ASSERT_EQ(50, iterator.seq_id);
iterator.skip_to(100);
ASSERT_FALSE(iterator.is_valid);
}
| 4,395
|
C++
|
.cpp
| 141
| 25.595745
| 77
| 0.619172
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,698
|
collection_specific_test.cpp
|
typesense_typesense/test/collection_specific_test.cpp
|
#include <gtest/gtest.h>
#include <string>
#include <vector>
#include <fstream>
#include <algorithm>
#include <collection_manager.h>
#include "collection.h"
class CollectionSpecificTest : public ::testing::Test {
protected:
Store *store;
CollectionManager & collectionManager = CollectionManager::get_instance();
std::atomic<bool> quit = false;
std::vector<std::string> query_fields;
std::vector<sort_by> sort_fields;
void setupCollection() {
std::string state_dir_path = "/tmp/typesense_test/collection_specific";
LOG(INFO) << "Truncating and creating: " << state_dir_path;
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
store = new Store(state_dir_path);
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
}
virtual void SetUp() {
setupCollection();
}
virtual void TearDown() {
collectionManager.dispose();
delete store;
}
};
TEST_F(CollectionSpecificTest, SearchTextWithHyphen) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "open-access-may-become-mandatory-for-nih-funded-research";
doc["points"] = 100;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("open-access-may-become-mandatory-for-nih-funded-research",
{"title"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, ExplicitHighlightFieldsConfig) {
// Allow highlighting independent of query_by fields
std::vector<field> fields = {field("title", field_types::STRING, false),
field("description", field_types::STRING, false),
field("author", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "The quick brown fox was too fast.";
doc["description"] = "A story about a brown fox who was fast.";
doc["author"] = "David Pernell";
doc["points"] = 100;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("brown fox pernell", {"title"}, "", {}, {}, {2}, 10,
1, FREQUENCY, {false}, 1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 1, {}, {}, {}, 0,
"<mark>", "</mark>", {1}, 10000, true, false, true, "description,author").get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ(2, results["hits"][0]["highlights"].size());
ASSERT_EQ("description", results["hits"][0]["highlights"][0]["field"].get<std::string>());
ASSERT_EQ("A story about a <mark>brown</mark> <mark>fox</mark> who was fast.", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
ASSERT_EQ("author", results["hits"][0]["highlights"][1]["field"].get<std::string>());
ASSERT_EQ("David <mark>Pernell</mark>", results["hits"][0]["highlights"][1]["snippet"].get<std::string>());
// excluded fields are NOT respected if explicit highlight fields are provided
results = coll1->search("brown fox pernell", {"title"}, "", {}, {}, {2}, 10,
1, FREQUENCY, {false}, 1, spp::sparse_hash_set<std::string>(),
{"description"}, 10, "", 30, 4, "", 1, {}, {}, {}, 0,
"<mark>", "</mark>", {1}, 10000, true, false, true, "description,author").get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ(2, results["hits"][0]["highlights"].size());
ASSERT_FALSE(results["hits"][0]["document"].contains("description"));
ASSERT_EQ("description", results["hits"][0]["highlights"][0]["field"].get<std::string>());
ASSERT_EQ("author", results["hits"][0]["highlights"][1]["field"].get<std::string>());
// query_by not matching field selected for highlighting
results = coll1->search("fox", {"title", "author"}, "", {}, {}, {2}, 10,
1, FREQUENCY, {false}, 1, spp::sparse_hash_set<std::string>(),
{"description"}, 10, "", 30, 4, "", 1, {}, {}, {}, 0,
"<mark>", "</mark>", {1,1}, 10000, true, false, true, "description").get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(1, results["hits"][0]["highlights"].size());
ASSERT_EQ("description", results["hits"][0]["highlights"][0]["field"].get<std::string>());
ASSERT_EQ("A story about a brown <mark>fox</mark> who was fast.", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
ASSERT_FALSE(results["hits"][0]["highlights"][0].contains("value"));
// query_by not matching field selected for full highlighting
results = coll1->search("fox", {"title", "author"}, "", {}, {}, {2}, 10,
1, FREQUENCY, {false}, 1, spp::sparse_hash_set<std::string>(),
{"description"}, 10, "", 30, 4, "description", 1, {}, {}, {}, 0,
"<mark>", "</mark>", {1,1}, 10000, true, false, true, "description").get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(1, results["hits"][0]["highlights"].size());
ASSERT_EQ("description", results["hits"][0]["highlights"][0]["field"].get<std::string>());
ASSERT_EQ("A story about a brown <mark>fox</mark> who was fast.", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
ASSERT_TRUE(results["hits"][0]["highlights"][0].contains("value"));
ASSERT_EQ("A story about a brown <mark>fox</mark> who was fast.", results["hits"][0]["highlights"][0]["value"].get<std::string>());
// wildcard query with search field names
results = coll1->search("*", {"title", "author"}, "", {}, {}, {2}, 10,
1, FREQUENCY, {false}, 1, spp::sparse_hash_set<std::string>(),
{"description"}, 10, "", 30, 4, "", 1, {}, {}, {}, 0,
"<mark>", "</mark>", {1,1}, 10000, true, false, true, "description,author").get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(0, results["hits"][0]["highlights"].size());
// wildcard query without search field names
results = coll1->search("*", {}, "", {}, {}, {2}, 10,
1, FREQUENCY, {false}, 1, spp::sparse_hash_set<std::string>(),
{"description"}, 10, "", 30, 4, "", 1, {}, {}, {}, 0,
"<mark>", "</mark>", {1,1}, 10000, true, false, true, "description,author").get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(0, results["hits"][0]["highlights"].size());
// highlight field that does not exist
results = coll1->search("brown fox pernell", {"title"}, "", {}, {}, {2}, 10,
1, FREQUENCY, {false}, 1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 1, {}, {}, {}, 0,
"<mark>", "</mark>", {1}, 10000, true, false, true, "not-found").get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(0, results["hits"][0]["highlights"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, ExactSingleFieldMatch) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("description", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "Fast Electric Charger";
doc1["description"] = "A product you should buy.";
doc1["points"] = 100;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["title"] = "Omega Chargex";
doc2["description"] = "Chargex is a great product.";
doc2["points"] = 200;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
auto results = coll1->search("charger", {"title", "description"}, "", {}, {}, {2}, 10,
1, FREQUENCY, {true, true}, 10,
spp::sparse_hash_set<std::string>(), spp::sparse_hash_set<std::string>(), 10, "", 30,
4, "title", 10).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ(0, results["hits"][0]["text_match_info"]["typo_prefix_score"]);
ASSERT_EQ(2, results["hits"][1]["text_match_info"]["typo_prefix_score"]);
// with typo_tokens_threshold = 1, only exact token match is fetched
results = coll1->search("charger", {"title", "description"}, "", {}, {}, {2}, 10,
1, FREQUENCY, {true, true}, 10,
spp::sparse_hash_set<std::string>(), spp::sparse_hash_set<std::string>(), 10, "", 30,
4, "title", 1).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ(0, results["hits"][0]["text_match_info"]["typo_prefix_score"]);
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, CheckProgressiveTypoSearching) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("description", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
// two records, one with single typo and the other with 2 typos
// only the single typo record is returned with typo_tokens_threshold of 1
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "Fast Conveniant Charger";
doc1["description"] = "A product you should buy.";
doc1["points"] = 100;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["title"] = "Omega";
doc2["description"] = "Conxeniant product.";
doc2["points"] = 200;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
auto results = coll1->search("convenient", {"title", "description"}, "", {}, {}, {2}, 10,
1, FREQUENCY, {true, true}, 10,
spp::sparse_hash_set<std::string>(), spp::sparse_hash_set<std::string>(), 10, "", 30,
4, "title", 1).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ(2, results["hits"][0]["text_match_info"]["typo_prefix_score"]);
// with typo_tokens_threshold = 10, both matches are fetched
results = coll1->search("convenient", {"title", "description"}, "", {}, {}, {2}, 10,
1, FREQUENCY, {true, true}, 10,
spp::sparse_hash_set<std::string>(), spp::sparse_hash_set<std::string>(), 10, "", 30,
4, "title", 10).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ(2, results["hits"][0]["text_match_info"]["typo_prefix_score"]);
ASSERT_EQ(4, results["hits"][1]["text_match_info"]["typo_prefix_score"]);
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, OrderMultiFieldFuzzyMatch) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("description", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "Moto Insta Charge";
doc1["description"] = "Share information with this device.";
doc1["points"] = 50;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["title"] = "Portable USB Store";
doc2["description"] = "Use it to charge your phone.";
doc2["points"] = 100;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
auto results = coll1->search("charger", {"title", "description"}, "", {}, {}, {2}, 10,
1, FREQUENCY, {true, true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {1, 1}).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
// use weights to push title matching ahead
results = coll1->search("charger", {"title", "description"}, "", {}, {}, {2}, 10,
1, FREQUENCY, {true, true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {2, 1}).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][1]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, TypoBeforeDropTokens) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "Josh Wexler";
doc1["points"] = 500;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["title"] = "Josh Lipson";
doc2["points"] = 100;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
auto results = coll1->search("Josh Lixson", {"title"}, "", {}, {}, {2}, 10,
1, FREQUENCY, {true}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
results = coll1->search("Josh Lixson", {"title"}, "", {}, {}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, FieldWeighting) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("description", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "The Quick Brown Fox";
doc1["description"] = "Share information with this device.";
doc1["points"] = 100;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["title"] = "Random Title";
doc2["description"] = "The Quick Brown Fox";
doc2["points"] = 50;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
auto results = coll1->search("brown fox", {"title", "description"}, "", {}, {}, {2}, 10,
1, FREQUENCY, {true, true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {1, 4}).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, MultiFieldArrayRepeatingTokens) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("description", field_types::STRING, false),
field("attrs", field_types::STRING_ARRAY, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "E182-72/4";
doc1["description"] = "Nexsan Technologies 18 SAN Array - 18 x HDD Supported - 18 x HDD Installed";
doc1["attrs"] = {"Hard Drives Supported > 18", "Hard Drives Installed > 18", "SSD Supported > 18"};
doc1["points"] = 100;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["title"] = "RV345-K9-NA";
doc2["description"] = "Cisco RV345P Router - 18 Ports";
doc2["attrs"] = {"Number of Ports > 18", "Product Type > Router"};
doc2["points"] = 50;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
auto results = coll1->search("rv345 cisco 18", {"title", "description", "attrs"}, "", {}, {}, {1}, 10,
1, FREQUENCY, {true, true, true}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, ExactMatchOnPrefix) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "Yeshivah Gedolah High School";
doc1["points"] = 100;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["title"] = "GED";
doc2["points"] = 50;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
auto results = coll1->search("ged", {"title"}, "", {}, {}, {2}, 10,
1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 1).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, TypoPrefixSearchWithoutPrefixEnabled) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "Cisco SG25026HP Gigabit Smart Switch";
doc1["points"] = 100;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
auto results = coll1->search("SG25026H", {"title"}, "", {}, {}, {2}, 10,
1, FREQUENCY, {false}, 0,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 1).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, PrefixWithTypos) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "PRÍNCIPE - Restaurante e Snack Bar";
doc1["points"] = 100;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
auto results = coll1->search("maria", {"title"}, "", {}, {}, {2}, 10,
1, FREQUENCY, {true}).get();
ASSERT_EQ(0, results["hits"].size());
results = coll1->search("maria", {"title"}, "", {}, {}, {2}, 10,
1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, PrefixVsExactMatch) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"Equivalent Ratios"},
{"Simplifying Ratios 1"},
{"Rational and Irrational Numbers"},
{"Simplifying Ratios 2"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("ration",
{"title"}, "", {}, {}, {1}, 10, 1, FREQUENCY, {true}, 10,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10).get();
ASSERT_EQ(4, results["found"].get<size_t>());
ASSERT_EQ(4, results["hits"].size());
ASSERT_STREQ("2", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("3", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("1", results["hits"][2]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("0", results["hits"][3]["document"]["id"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, PrefixWithTypos2) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "Av. Mal. Humberto Delgado 206, 4760-012 Vila Nova de Famalicão, Portugal";
doc1["points"] = 100;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
auto results = coll1->search("maria", {"title"}, "", {}, {}, {2}, 10,
1, FREQUENCY, {true}).get();
ASSERT_EQ(0, results["hits"].size());
results = coll1->search("maria", {"title"}, "", {}, {}, {2}, 10,
1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, ImportDocumentWithIntegerID) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = 100;
doc1["title"] = "East India House on Wednesday evening";
doc1["points"] = 100;
auto add_op = coll1->add(doc1.dump());
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Document's `id` field should be a string.", add_op.error());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, CreateManyCollectionsAndDeleteOneOfThem) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
for(size_t i = 0; i <= 10; i++) {
const std::string& coll_name = "coll" + std::to_string(i);
collectionManager.drop_collection(coll_name);
ASSERT_TRUE(collectionManager.create_collection(coll_name, 1, fields, "points").ok());
}
auto coll1 = collectionManager.get_collection_unsafe("coll1");
auto coll10 = collectionManager.get_collection_unsafe("coll10");
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "The quick brown fox was too fast.";
doc["points"] = 100;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
ASSERT_TRUE(coll10->add(doc.dump()).ok());
collectionManager.drop_collection("coll1", true);
// Record with id "0" should exist in coll10
ASSERT_TRUE(coll10->get("0").ok());
for(size_t i = 0; i <= 10; i++) {
const std::string& coll_name = "coll" + std::to_string(i);
collectionManager.drop_collection(coll_name);
}
}
TEST_F(CollectionSpecificTest, DeleteOverridesAndSynonymsOnDiskDuringCollDrop) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
for (size_t i = 0; i <= 10; i++) {
const std::string& coll_name = "coll" + std::to_string(i);
collectionManager.drop_collection(coll_name);
ASSERT_TRUE(collectionManager.create_collection(coll_name, 1, fields, "points").ok());
}
auto coll1 = collectionManager.get_collection_unsafe("coll1");
nlohmann::json override_json = {
{"id", "exclude-rule"},
{
"rule", {
{"query", "of"},
{"match", override_t::MATCH_EXACT}
}
}
};
override_json["excludes"] = nlohmann::json::array();
override_json["excludes"][0] = nlohmann::json::object();
override_json["excludes"][0]["id"] = "4";
override_json["excludes"][1] = nlohmann::json::object();
override_json["excludes"][1]["id"] = "11";
override_t override;
override_t::parse(override_json, "", override);
coll1->add_override(override);
// add synonym
coll1->add_synonym(R"({"id": "ipod-synonyms", "synonyms": ["ipod", "i pod", "pod"]})"_json);
collectionManager.drop_collection("coll1");
// overrides should have been deleted from the store
std::vector<std::string> stored_values;
store->scan_fill(Collection::COLLECTION_OVERRIDE_PREFIX, std::string(Collection::COLLECTION_OVERRIDE_PREFIX) + "`",
stored_values);
ASSERT_TRUE(stored_values.empty());
// synonyms should also have been deleted from the store
store->scan_fill(SynonymIndex::COLLECTION_SYNONYM_PREFIX, std::string(SynonymIndex::COLLECTION_SYNONYM_PREFIX) + "`",
stored_values);
ASSERT_TRUE(stored_values.empty());
}
TEST_F(CollectionSpecificTest, SingleCharMatchFullFieldHighlight) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "Which of the following is a probable sign of infection?";
doc1["points"] = 100;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
auto results = coll1->search("a 3-month", {"title"}, "", {}, {}, {2}, 10,
1, FREQUENCY, {false}, 1,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"title", 1).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("Which of the following is <mark>a</mark> probable sign of infection?",
results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
ASSERT_EQ("Which of the following is <mark>a</mark> probable sign of infection?",
results["hits"][0]["highlights"][0]["value"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, TokensSpreadAcrossFields) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("description", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "Foo bar baz";
doc1["description"] = "Share information with this device.";
doc1["points"] = 100;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["title"] = "Foo Random";
doc2["description"] = "The Bar Fox";
doc2["points"] = 250;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
auto results = coll1->search("foo bar", {"title", "description"}, "", {}, {}, {0}, 10,
1, FREQUENCY, {false, false},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {4, 1}).get();
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][1]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, GuardAgainstIdFieldInSchema) {
// The "id" field, if defined in the schema should be ignored
std::vector<field> fields = {field("title", field_types::STRING, false),
field("id", field_types::STRING, false),
field("points", field_types::INT32, false),};
nlohmann::json schema;
schema["name"] = "books";
schema["fields"] = nlohmann::json::array();
schema["fields"][0]["name"] = "title";
schema["fields"][0]["type"] = "string";
schema["fields"][1]["name"] = "id";
schema["fields"][1]["type"] = "string";
schema["fields"][2]["name"] = "points";
schema["fields"][2]["type"] = "int32";
Collection* coll1 = collectionManager.create_collection(schema).get();
ASSERT_EQ(0, coll1->get_schema().count("id"));
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, HandleBadCharactersInStringGracefully) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
std::string doc_str = "不推荐。\",\"price\":10.12,\"ratings\":5}";
auto add_op = coll1->add(doc_str);
ASSERT_FALSE(add_op.ok());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, HighlightSecondaryFieldWithPrefixMatch) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("description", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "Functions and Equations";
doc1["description"] = "Use a function to solve an equation.";
doc1["points"] = 100;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["title"] = "Function of effort";
doc2["description"] = "Learn all about it.";
doc2["points"] = 100;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
auto results = coll1->search("function", {"title", "description"}, "", {}, {}, {0}, 10,
1, FREQUENCY, {true, true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {1, 1}).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ(2, results["hits"][1]["highlights"].size());
ASSERT_EQ("<mark>Function</mark>s and Equations",
results["hits"][1]["highlights"][0]["snippet"].get<std::string>());
ASSERT_EQ("Use a <mark>function</mark> to solve an equation.",
results["hits"][1]["highlights"][1]["snippet"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, HighlightWithDropTokens) {
std::vector<field> fields = {field("description", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["description"] = "HPE Aruba AP-575 802.11ax Wireless Access Point - TAA Compliant - 2.40 GHz, "
"5 GHz - MIMO Technology - 1 x Network (RJ-45) - Gigabit Ethernet - Bluetooth 5";
doc1["points"] = 100;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
auto results = coll1->search("HPE Aruba AP-575 Technology Gigabit Bluetooth 5", {"description"}, "", {}, {}, {0}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "description", 40, {}, {}, {}, 0,
"<mark>", "</mark>").get();
ASSERT_EQ(1, results["hits"][0]["highlights"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("<mark>HPE</mark> <mark>Aruba</mark> <mark>AP-575</mark> 802.11ax Wireless Access Point - "
"TAA Compliant - 2.40 GHz, <mark>5</mark> GHz - MIMO <mark>Technology</mark> - 1 x Network (RJ-45) - "
"<mark>Gigabit</mark> Ethernet - <mark>Bluetooth</mark> <mark>5</mark>",
results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, HighlightLongFieldWithDropTokens) {
std::vector<field> fields = {field("description", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["description"] = "Tripp Lite USB C to VGA Multiport Video Adapter Converter w/ USB-A Hub, USB-C PD Charging "
"Port & Gigabit Ethernet Port, Thunderbolt 3 Compatible, USB Type C to VGA, USB-C, USB "
"Type-C - for Notebook/Tablet PC - 2 x USB Ports - 2 x USB 3.0 - "
"Network (RJ-45) - VGA - Wired";
doc1["points"] = 100;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
auto results = coll1->search("wired charging gigabit port", {"description"}, "", {}, {}, {0}, 10,
1, FREQUENCY, {true},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "description", 1, {}, {}, {}, 0,
"<mark>", "</mark>").get();
ASSERT_EQ(1, results["hits"][0]["highlights"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("Tripp Lite USB C to VGA Multiport Video Adapter Converter w/ USB-A Hub, "
"USB-C PD <mark>Charging</mark> <mark>Port</mark> & <mark>Gigabit</mark> Ethernet "
"<mark>Port</mark>, Thunderbolt 3 Compatible, USB Type C to VGA, USB-C, USB Type-C - for "
"Notebook/Tablet PC - 2 x USB <mark>Port</mark>s - 2 x USB 3.0 - Network (RJ-45) - "
"VGA - <mark>Wired</mark>",
results["hits"][0]["highlights"][0]["value"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, HighlightWithTypoTokensAndPrefixSearch) {
std::vector<field> fields = {field("username", field_types::STRING, false),
field("name", field_types::STRING, false),
field("tags", field_types::STRING_ARRAY, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["username"] = "Pandaabear";
doc1["name"] = "Panda's Basement";
doc1["tags"] = {"Foobar", "Panda's Basement"};
doc1["points"] = 100;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["username"] = "Pandaabear";
doc2["name"] = "Pandaabear Basic";
doc2["tags"] = {"Pandaabear Basic"};
doc2["points"] = 100;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
auto results = coll1->search("pandaabear bas", {"username", "name"},
"", {}, {}, {2, 2}, 10,
1, FREQUENCY, {true, true},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 1, {}, {}, {}, 0,
"<mark>", "</mark>").get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ(2, results["hits"][0]["highlights"].size());
ASSERT_EQ("<mark>Pandaabear</mark> <mark>Bas</mark>ic",
results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
ASSERT_EQ("<mark>Pandaabear</mark>",
results["hits"][0]["highlights"][1]["snippet"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ(2, results["hits"][1]["highlights"].size());
ASSERT_EQ("<mark>Pandaabear</mark>",
results["hits"][1]["highlights"][0]["snippet"].get<std::string>());
ASSERT_EQ("Panda's <mark>Bas</mark>ement",
results["hits"][1]["highlights"][1]["snippet"].get<std::string>());
results = coll1->search("pandaabear bas", {"username", "name"},
"", {}, {}, {2, 2}, 10,
1, FREQUENCY, {true, true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 1, {}, {}, {}, 0,
"<mark>", "</mark>").get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ(2, results["hits"][0]["highlights"].size());
ASSERT_EQ("<mark>Pandaabear</mark> <mark>Bas</mark>ic",
results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
ASSERT_EQ("<mark>Pandaabear</mark>",
results["hits"][0]["highlights"][1]["snippet"].get<std::string>());
ASSERT_EQ(2, results["hits"][1]["highlights"].size());
ASSERT_EQ("<mark>Pandaabear</mark>",
results["hits"][1]["highlights"][0]["snippet"].get<std::string>());
ASSERT_EQ("Panda's <mark>Bas</mark>ement",
results["hits"][1]["highlights"][1]["snippet"].get<std::string>());
results = coll1->search("pandaabear bas", {"username", "tags"},
"", {}, {}, {2, 2}, 10,
1, FREQUENCY, {true, true},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 1, {}, {}, {}, 0,
"<mark>", "</mark>").get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ(2, results["hits"][0]["highlights"].size());
ASSERT_EQ("<mark>Pandaabear</mark> <mark>Bas</mark>ic",
results["hits"][0]["highlights"][0]["snippets"][0].get<std::string>());
ASSERT_EQ("<mark>Pandaabear</mark>",
results["hits"][0]["highlights"][1]["snippet"].get<std::string>());
ASSERT_EQ(2, results["hits"][1]["highlights"].size());
ASSERT_EQ("<mark>Pandaabear</mark>",
results["hits"][1]["highlights"][0]["snippet"].get<std::string>());
ASSERT_EQ("Panda's <mark>Bas</mark>ement",
results["hits"][1]["highlights"][1]["snippets"][0].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, PrefixSearchOnlyOnLastToken) {
std::vector<field> fields = {field("concat", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["concat"] = "SPZ005 SPACEPOLE Spz005 Space Pole Updated!!! Accessories Stands & Equipment Cabinets POS "
"Terminal Stand Spacepole 0 SPZ005";
doc1["points"] = 100;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
auto results = coll1->search("spz space", {"concat"},
"", {}, {}, {1}, 10,
1, FREQUENCY, {true},
0, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "concat", 20, {}, {}, {}, 0,
"<mark>", "</mark>").get();
ASSERT_EQ(0, results["hits"][0]["highlights"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, TokenStartingWithSameLetterAsPrevToken) {
std::vector<field> fields = {field("name", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "John Jack";
doc1["points"] = 100;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "John Williams";
doc2["points"] = 100;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
auto results = coll1->search("john j", {"name"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>").get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][1]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, CrossFieldMatchingExactMatchOnSingleField) {
std::vector<field> fields = {field("name", field_types::STRING, false),
field("description", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "John";
doc1["description"] = "Vegetable Farmer";
doc1["points"] = 100;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "John";
doc2["description"] = "Organic Vegetable Farmer";
doc2["points"] = 100;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
auto results = coll1->search("john vegetable farmer", {"name", "description"},
"", {}, {}, {0, 0}, 10,
1, FREQUENCY, {true, true},
2, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>").get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ(0, results["hits"][0]["text_match_info"]["num_tokens_dropped"]);
ASSERT_EQ("1", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ(0, results["hits"][1]["text_match_info"]["num_tokens_dropped"]);
results = coll1->search("john vegatable farmer", {"name", "description"},
"", {}, {}, {1, 1}, 10,
1, FREQUENCY, {true, true},
2, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>").get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ(0, results["hits"][0]["text_match_info"]["num_tokens_dropped"]);
ASSERT_EQ("1", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ(0, results["hits"][1]["text_match_info"]["num_tokens_dropped"]);
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, HighlightEmptyArray) {
std::vector<field> fields = {field("name", field_types::STRING, false),
field("tags", field_types::STRING_ARRAY, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "John";
doc1["tags"] = std::vector<std::string>();
doc1["points"] = 100;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
auto results = coll1->search("john", {"name", "tags"},
"", {}, {}, {0, 0}, 10,
1, FREQUENCY, {true, true},
2, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>").get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(1, results["hits"][0]["highlights"].size());
ASSERT_EQ("name", results["hits"][0]["highlights"][0]["field"]);
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, CustomSeparators) {
std::vector<field> fields = {field("name", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection(
"coll1", 1, fields, "points", 0, "", {}, {"-"}
).get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "alpha-beta-gamma-omega-zeta";
doc1["points"] = 100;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
auto results = coll1->search("gamma", {"name"},
"", {}, {}, {0}, 10,
1, FREQUENCY, {false},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 1, {}, {}, {}, 0,
"<mark>", "</mark>",{}, 1000,
true, false, true, "", true).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(1, results["hits"][0]["highlights"].size());
ASSERT_EQ("name", results["hits"][0]["highlights"][0]["field"]);
ASSERT_EQ("alpha-beta-<mark>gamma</mark>-omega-zeta", results["hits"][0]["highlights"][0]["snippet"]);
results = coll1->search("gamma-omega", {"name"},
"", {}, {}, {0}, 10,
1, FREQUENCY, {false},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 1, {}, {}, {}, 0,
"<mark>", "</mark>",{}, 1000,
true, false, true, "", false).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(1, results["hits"][0]["highlights"].size());
ASSERT_EQ("name", results["hits"][0]["highlights"][0]["field"]);
ASSERT_EQ("alpha-beta-<mark>gamma</mark>-<mark>omega</mark>-zeta", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
// ensure that symbols are validated
nlohmann::json coll_def;
coll_def["fields"] = {
{{"name", "foo"}, {"type", "string"}, {"facet", false}}
};
coll_def["name"] = "foo";
coll_def["token_separators"] = {"foo"};
auto coll_op = collectionManager.create_collection(coll_def);
ASSERT_FALSE(coll_op.ok());
ASSERT_EQ("`token_separators` should be an array of character symbols.", coll_op.error());
coll_def["token_separators"] = "f";
coll_op = collectionManager.create_collection(coll_def);
ASSERT_FALSE(coll_op.ok());
ASSERT_EQ("`token_separators` should be an array of character symbols.", coll_op.error());
coll_def["token_separators"] = 123;
coll_op = collectionManager.create_collection(coll_def);
ASSERT_FALSE(coll_op.ok());
ASSERT_EQ("`token_separators` should be an array of character symbols.", coll_op.error());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, CustomSymbolsForIndexing) {
std::vector<field> fields = {field("name", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection(
"coll1", 1, fields, "points", 0, "", {"+"}, {}
).get();
nlohmann::json coll_summary = coll1->get_summary_json();
ASSERT_EQ(1, coll_summary["symbols_to_index"].size());
ASSERT_EQ(0, coll_summary["token_separators"].size());
ASSERT_EQ("+", coll_summary["symbols_to_index"][0].get<std::string>());
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "Yes, C++ is great!";
doc1["points"] = 100;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "Yes, C is great!";
doc2["points"] = 100;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
auto results = coll1->search("c++", {"name"},
"", {}, {}, {0}, 10,
1, FREQUENCY, {false},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 1, {}, {}, {}, 0,
"<mark>", "</mark>",{}, 1000,
true, false, true, "", false).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ(1, results["hits"][0]["highlights"].size());
ASSERT_EQ("name", results["hits"][0]["highlights"][0]["field"].get<std::string>());
ASSERT_EQ("Yes, <mark>C++</mark> is great!", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
// without custom symbols, + should not be indexed, so the "C" record will show up first
Collection* coll2 = collectionManager.create_collection("coll2", 1, fields, "points", 0, "").get();
ASSERT_TRUE(coll2->add(doc1.dump()).ok());
ASSERT_TRUE(coll2->add(doc2.dump()).ok());
results = coll2->search("c++", {"name"},
"", {}, {}, {0}, 10,
1, FREQUENCY, {false},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 1, {}, {}, {}, 0,
"<mark>", "</mark>",{}, 1000,
true, false, true, "", false).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
// ensure that symbols are validated
nlohmann::json coll_def;
coll_def["fields"] = {
{{"name", "foo"}, {"type", "string"}, {"facet", false}}
};
coll_def["name"] = "foo";
coll_def["symbols_to_index"] = {"foo"};
auto coll_op = collectionManager.create_collection(coll_def);
ASSERT_FALSE(coll_op.ok());
ASSERT_EQ("`symbols_to_index` should be an array of character symbols.", coll_op.error());
coll_def["symbols_to_index"] = "f";
coll_op = collectionManager.create_collection(coll_def);
ASSERT_FALSE(coll_op.ok());
ASSERT_EQ("`symbols_to_index` should be an array of character symbols.", coll_op.error());
coll_def["symbols_to_index"] = 123;
coll_op = collectionManager.create_collection(coll_def);
ASSERT_FALSE(coll_op.ok());
ASSERT_EQ("`symbols_to_index` should be an array of character symbols.", coll_op.error());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, CustomSeparatorsHandleQueryVariations) {
std::vector<field> fields = {field("name", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection(
"coll1", 1, fields, "points", 0, "", {}, {"-", ".", "*", "&", "/"}
).get();
nlohmann::json coll_summary = coll1->get_summary_json();
ASSERT_EQ(0, coll_summary["symbols_to_index"].size());
ASSERT_EQ(5, coll_summary["token_separators"].size());
ASSERT_EQ("-", coll_summary["token_separators"][0].get<std::string>());
ASSERT_EQ(".", coll_summary["token_separators"][1].get<std::string>());
ASSERT_EQ("*", coll_summary["token_separators"][2].get<std::string>());
ASSERT_EQ("&", coll_summary["token_separators"][3].get<std::string>());
ASSERT_EQ("/", coll_summary["token_separators"][4].get<std::string>());
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "1&1 Internet Limited";
doc1["points"] = 100;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "bofrost*dienstl";
doc2["points"] = 100;
nlohmann::json doc3;
doc3["id"] = "2";
doc3["name"] = "just...grilled";
doc3["points"] = 100;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
auto results = coll1->search("bofrost*dienstl", {"name"},
"", {}, {}, {0}, 10,
1, FREQUENCY, {false},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 1, {}, {}, {}, 0,
"<mark>", "</mark>",{}, 1000,
true, false, true, "", false).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("<mark>bofrost</mark>*<mark>dienstl</mark>", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
results = coll1->search("bofrost * dienstl", {"name"},
"", {}, {}, {0}, 10,
1, FREQUENCY, {false},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 1, {}, {}, {}, 0,
"<mark>", "</mark>",{}, 1000,
true, false, true, "", false).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("<mark>bofrost</mark>*<mark>dienstl</mark>", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
results = coll1->search("bofrost dienstl", {"name"},
"", {}, {}, {0}, 10,
1, FREQUENCY, {false},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 1, {}, {}, {}, 0,
"<mark>", "</mark>",{}, 1000,
true, false, true, "", false).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("<mark>bofrost</mark>*<mark>dienstl</mark>", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
results = coll1->search("1&1", {"name"},
"", {}, {}, {0}, 10,
1, FREQUENCY, {false},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 1, {}, {}, {}, 0,
"<mark>", "</mark>",{}, 1000,
true, false, true, "", false).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("<mark>1</mark>&<mark>1</mark> Internet Limited", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
results = coll1->search("1 & 1", {"name"},
"", {}, {}, {0}, 10,
1, FREQUENCY, {false},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 1, {}, {}, {}, 0,
"<mark>", "</mark>",{}, 1000,
true, false, true, "", false).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("<mark>1</mark>&<mark>1</mark> Internet Limited", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
results = coll1->search("just grilled", {"name"},
"", {}, {}, {0}, 10,
1, FREQUENCY, {false},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 1, {}, {}, {}, 0,
"<mark>", "</mark>",{}, 1000,
true, false, true, "", false).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("<mark>just</mark>...<mark>grilled</mark>", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, TypoCorrectionWithFaceting) {
std::vector<field> fields = {field("name", field_types::STRING, false),
field("brand", field_types::STRING, true),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection(
"coll1", 1, fields, "points", 0, "", {}, {}
).get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "Salt";
doc1["brand"] = "Salpices";
doc1["points"] = 100;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "Turmeric";
doc2["brand"] = "Salpices";
doc2["points"] = 100;
nlohmann::json doc3;
doc3["id"] = "2";
doc3["name"] = "Turmeric";
doc3["brand"] = "Salpices";
doc3["points"] = 100;
nlohmann::json doc4;
doc4["id"] = "3";
doc4["name"] = "Tomato";
doc4["brand"] = "Saltato";
doc4["points"] = 100;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
ASSERT_TRUE(coll1->add(doc4.dump()).ok());
auto results = coll1->search("salt", {"name", "brand"},
"", {"brand"}, {}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true).get();
ASSERT_EQ(3, results["facet_counts"][0]["counts"][0]["count"].get<size_t>());
results = coll1->search("salt", {"name", "brand"},
"brand: Salpices", {"brand"}, {}, {2}, 10,
1, FREQUENCY, {true},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 1, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true).get();
ASSERT_EQ(3, results["facet_counts"][0]["counts"][0]["count"].get<size_t>());
// without exhaustive search, count be just 1 for non-filtered query
results = coll1->search("salt", {"name", "brand"},
"", {"brand"}, {}, {2}, 10,
1, FREQUENCY, {true},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 1, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", false).get();
ASSERT_EQ(1, results["facet_counts"][0]["counts"][0]["count"].get<size_t>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, MultiFieldVerbatimMatchesShouldBeWeighted) {
// 2 exact matches on low weighted fields should not overpower a single exact match on high weighted field
std::vector<field> fields = {field("name", field_types::STRING, false),
field("category", field_types::STRING, false),
field("label", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "Amazing Twin";
doc1["category"] = "kids";
doc1["label"] = "kids";
doc1["points"] = 3;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "Kids";
doc2["category"] = "children";
doc2["label"] = "children";
doc2["points"] = 5;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
auto results = coll1->search("kids", {"name", "category", "label"},
"", {}, {}, {0, 0, 0}, 10,
1, FREQUENCY, {false, false, false},
2, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {6, 1, 1}).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, ZeroWeightedField) {
std::vector<field> fields = {field("name", field_types::STRING, false),
field("category", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "Energy Kids";
doc1["category"] = "kids";
doc1["points"] = 3;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "Amazing Twin";
doc2["category"] = "kids";
doc2["points"] = 5;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
auto results = coll1->search("kids", {"category", "name"},
"", {}, {}, {0, 0}, 10,
1, FREQUENCY, {false, false},
2, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {1, 0}).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][1]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, ImportDocumentWithRepeatingIDInTheSameBatch) {
std::vector<field> fields = {field("name", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "Levis";
doc1["points"] = 3;
nlohmann::json doc2;
doc2["id"] = "0";
doc2["name"] = "Amazing from Levis";
doc2["points"] = 5;
std::vector<std::string> import_records;
import_records.push_back(doc1.dump());
import_records.push_back(doc2.dump());
nlohmann::json document;
nlohmann::json import_response = coll1->add_many(import_records, document);
ASSERT_FALSE(import_response["success"].get<bool>());
ASSERT_EQ(1, import_response["num_imported"].get<int>());
ASSERT_TRUE(nlohmann::json::parse(import_records[0])["success"].get<bool>());
ASSERT_FALSE(nlohmann::json::parse(import_records[1])["success"].get<bool>());
ASSERT_EQ("A document with id 0 already exists.",
nlohmann::json::parse(import_records[1])["error"].get<std::string>());
auto results = coll1->search("levis", {"name"},
"", {}, {}, {0}, 10,
1, FREQUENCY, {false},
2, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {0},
1000, true).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("Levis", results["hits"][0]["document"]["name"].get<std::string>());
// should allow updates though
import_records.clear();
import_records.push_back(doc1.dump());
import_records.push_back(doc2.dump());
import_response = coll1->add_many(import_records, document, index_operation_t::UPDATE);
ASSERT_TRUE(import_response["success"].get<bool>());
ASSERT_EQ(2, import_response["num_imported"].get<int>());
// should allow upserts also
import_records.clear();
import_records.push_back(doc1.dump());
import_records.push_back(doc2.dump());
import_response = coll1->add_many(import_records, document, index_operation_t::UPSERT);
ASSERT_TRUE(import_response["success"].get<bool>());
ASSERT_EQ(2, import_response["num_imported"].get<int>());
// repeated ID is NOT rejected if the first ID is not indexed due to some error
import_records.clear();
doc1.erase("name");
doc1["id"] = "100";
doc2["id"] = "100";
import_records.push_back(doc1.dump());
import_records.push_back(doc2.dump());
import_response = coll1->add_many(import_records, document);
ASSERT_FALSE(import_response["success"].get<bool>());
ASSERT_EQ(1, import_response["num_imported"].get<int>());
ASSERT_FALSE(nlohmann::json::parse(import_records[0])["success"].get<bool>());
ASSERT_EQ("Field `name` has been declared in the schema, but is not found in the document.",
nlohmann::json::parse(import_records[0])["error"].get<std::string>());
ASSERT_TRUE(nlohmann::json::parse(import_records[1])["success"].get<bool>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, UpdateOfTwoDocsWithSameIdWithinSameBatch) {
std::vector<field> fields = {field("last_chance", field_types::BOOL, false, true),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["points"] = 100;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
// second update should reflect the result of first update
std::vector<std::string> updates = {
R"({"id": "0", "last_chance": false})",
R"({"id": "0", "points": 200})",
};
nlohmann::json update_doc;
auto import_response = coll1->add_many(updates, update_doc, UPDATE);
ASSERT_TRUE(import_response["success"].get<bool>());
ASSERT_EQ(2, import_response["num_imported"].get<int>());
auto results = coll1->search("*", {},
"", {}, {}, {0}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true).get();
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, UpsertOfTwoDocsWithSameIdWithinSameBatch) {
std::vector<field> fields = {field("last_chance", field_types::BOOL, false, true),
field("points", field_types::INT32, false, true),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["points"] = 100;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
// first upsert removes both fields, so second upsert should only insert "points"
std::vector<std::string> upserts = {
R"({"id": "0", "last_chance": true})",
R"({"id": "0", "points": 200})",
};
nlohmann::json update_doc;
auto import_response = coll1->add_many(upserts, update_doc, UPSERT);
ASSERT_TRUE(import_response["success"].get<bool>());
ASSERT_EQ(2, import_response["num_imported"].get<int>());
auto results = coll1->search("*", {},
"", {}, {}, {0}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_TRUE(results["hits"][0]["document"].contains("points"));
ASSERT_FALSE(results["hits"][0]["document"].contains("last_chance"));
ASSERT_EQ(200, results["hits"][0]["document"]["points"].get<int32_t>());
ASSERT_EQ(1, coll1->_get_index()->_get_numerical_index().at("points")->size());
ASSERT_EQ(0, coll1->_get_index()->_get_numerical_index().at("last_chance")->size());
// update without doc id
upserts = {
R"({"last_chance": true})",
};
import_response = coll1->add_many(upserts, update_doc, UPDATE);
ASSERT_FALSE(import_response["success"].get<bool>());
ASSERT_EQ(0, import_response["num_imported"].get<int>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, UpdateUpsertOfDocWithMissingFields) {
std::vector<field> fields = {field("last_chance", field_types::BOOL, false, true),
field("points", field_types::INT32, false, true),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["last_chance"] = true;
doc1["points"] = 100;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
// upsert doc with missing fields: should be removed from index
std::vector<std::string> upserts = {
R"({"id": "0"})"
};
nlohmann::json update_doc;
auto import_response = coll1->add_many(upserts, update_doc, UPSERT);
ASSERT_TRUE(import_response["success"].get<bool>());
ASSERT_EQ(1, import_response["num_imported"].get<int>());
auto results = coll1->search("*", {},
"", {}, {}, {0}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ(1, results["hits"][0]["document"].size());
ASSERT_EQ(0, coll1->_get_index()->_get_numerical_index().at("points")->size());
ASSERT_EQ(0, coll1->_get_index()->_get_numerical_index().at("last_chance")->size());
// put the original doc back
ASSERT_TRUE(coll1->add(doc1.dump(), UPSERT).ok());
results = coll1->search("*", {},
"", {}, {}, {0}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(3, results["hits"][0]["document"].size());
// update doc with missing fields: existing fields should NOT be removed
upserts = {
R"({"id": "0"})"
};
import_response = coll1->add_many(upserts, update_doc, UPDATE);
ASSERT_TRUE(import_response["success"].get<bool>());
ASSERT_EQ(1, import_response["num_imported"].get<int>());
results = coll1->search("*", {},
"", {}, {}, {0}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ(3, results["hits"][0]["document"].size());
ASSERT_EQ(1, coll1->_get_index()->_get_numerical_index().at("points")->size());
ASSERT_EQ(1, coll1->_get_index()->_get_numerical_index().at("last_chance")->size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, FacetParallelizationVerification) {
std::vector<field> fields = {field("name", field_types::STRING, false),
field("category", field_types::STRING, true),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
// choose a number that's not a multiple of 4
for(size_t i = 0; i < 18; i++) {
nlohmann::json doc1;
doc1["id"] = std::to_string(i);
doc1["name"] = "Levis";
doc1["category"] = "jeans";
doc1["points"] = 3;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
}
auto results = coll1->search("levis", {"name"},
"", {"category"}, {}, {0}, 10,
1, FREQUENCY, {false},
2, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {0},
1000, true).get();
ASSERT_STREQ("category", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(18, (int) results["facet_counts"][0]["counts"][0]["count"]);
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, VerbatimMatchShouldConsiderTokensMatchedAcrossAllFields) {
// dropped tokens on a single field cannot be deemed as verbatim match
std::vector<field> fields = {field("name", field_types::STRING, false),
field("brand", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "Hamburger";
doc1["brand"] = "Burger King";
doc1["points"] = 10;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "Hamburger Bun";
doc2["brand"] = "Trader Joe’s";
doc2["points"] = 5;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
auto results = coll1->search("hamburger trader", {"name", "brand"},
"", {}, {}, {0, 0}, 10,
1, FREQUENCY, {false, false},
2, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {1, 1},
1000, true).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
nlohmann::json doc3;
doc3["id"] = "2";
doc3["name"] = "Potato Wedges";
doc3["brand"] = "McDonalds";
doc3["points"] = 10;
nlohmann::json doc4;
doc4["id"] = "3";
doc4["name"] = "Hot Potato Wedges";
doc4["brand"] = "KFC Inc.";
doc4["points"] = 5;
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
ASSERT_TRUE(coll1->add(doc4.dump()).ok());
results = coll1->search("potato wedges kfc", {"name", "brand"},
"", {}, {}, {0, 0}, 10,
1, FREQUENCY, {false, false},
2, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {1, 1},
1000, true).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("3", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("2", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ(2, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, CustomNumTyposConfiguration) {
// dropped tokens on a single field cannot be deemed as verbatim match
std::vector<field> fields = {field("name", field_types::STRING, false),
field("brand", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "Hamburger";
doc1["brand"] = "Burger and King";
doc1["points"] = 10;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
// by default a typo on 3 char tokens are ignored (min 4 length is needed)
auto results = coll1->search("asd", {"brand"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {false},
2, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {1},
1000, true, false, true, "", false, 60000*100).get();
ASSERT_EQ(0, results["hits"].size());
results = coll1->search("asd", {"brand"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {false},
2, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {1},
1000, true, false, true, "", false, 60000*100, 3, 7).get();
ASSERT_EQ(1, results["hits"].size());
// 2 typos are not tolerated by default on 6-len word
results = coll1->search("bixger", {"brand"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {false},
2, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {1},
1000, true, false, true, "", false, 60000*100).get();
ASSERT_EQ(0, results["hits"].size());
results = coll1->search("bixger", {"brand"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {false},
2, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {1},
1000, true, false, true, "", false, 60000*100, 3, 6).get();
ASSERT_EQ(1, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, RepeatingStringArrayTokens) {
std::vector<std::string> tags;
// when the first document containing a token already cannot fit compact posting list
for(size_t i = 0; i < 200; i++) {
tags.emplace_back("spools");
}
std::vector<field> fields = {field("tags", field_types::STRING_ARRAY, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
nlohmann::json doc;
doc["tags"] = tags;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("spools", {"tags"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
// when the second document containing a token cannot fit compact posting list
tags = {"foobar"};
doc["tags"] = tags;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
for(size_t i = 0; i < 200; i++) {
tags.emplace_back("foobar");
}
doc["tags"] = tags;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
results = coll1->search("foobar", {"tags"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(2, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, HighlightOnPrefixRegression) {
// when the first document containing a token already cannot fit compact posting list
std::vector<field> fields = {field("title", field_types::STRING, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
nlohmann::json doc;
doc["title"] = "And then there were a storm.";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("and", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, DroppedTokensShouldNotBeUsedForPrefixSearch) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "Dog Shoemaker";
doc1["points"] = 100;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["title"] = "Shoe and Sock";
doc2["points"] = 200;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
auto results = coll1->search("shoe cat", {"title"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
results = coll1->search("cat shoe", {"title"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, SearchShouldSplitAndJoinTokens) {
std::vector<field> fields = {field("title", field_types::STRING, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
nlohmann::json doc;
doc["title"] = "The nonstick pressure cooker is a great invention.";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("non stick", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 0).get();
ASSERT_EQ(1, results["hits"].size());
results = coll1->search("pressurecooker", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 0).get();
ASSERT_EQ(1, results["hits"].size());
results = coll1->search("nonstick pressurecooker", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 0).get();
ASSERT_EQ(1, results["hits"].size());
results = coll1->search("the pressurecooker", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 0).get();
ASSERT_EQ(1, results["hits"].size());
results = coll1->search("pressurecooker great", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 0).get();
ASSERT_EQ(1, results["hits"].size());
// splitting requires tokens to co-occur as a phrase in the dataset
results = coll1->search("the pressureis", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 0).get();
ASSERT_EQ(0, results["hits"].size());
results = coll1->search("greatcooker", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 0).get();
ASSERT_EQ(0, results["hits"].size());
results = coll1->search("t h e", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 0).get();
ASSERT_EQ(1, results["hits"].size());
results = coll1->search("c o o k e r", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 0).get();
ASSERT_EQ(1, results["hits"].size());
// three word split won't work
results = coll1->search("nonstickpressurecooker", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 0).get();
ASSERT_EQ(0, results["hits"].size());
// only first 5 words of the query are used for concat/split
results = coll1->search("nonstick pressure cooker is a greatinvention", {"title"}, "", {}, {}, {0}, 10, 1,
FREQUENCY, {false}, 0).get();
ASSERT_EQ(0, results["hits"].size());
results = coll1->search("nonstick pressure cooker is a gr eat", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY,
{false}, 0).get();
ASSERT_EQ(0, results["hits"].size());
// don't join when feature is disabled
results = coll1->search("non stick", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 0,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000*1000, 4, 7, off).get();
ASSERT_EQ(0, results["hits"].size());
// don't split when feature is disabled
results = coll1->search("pressurecooker", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 0,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000*1000, 4, 7, off).get();
ASSERT_EQ(0, results["hits"].size());
// drop tokens should not happen on tokens split
doc["title"] = "Pressure Copper vessel";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
results = coll1->search("pressurecopper", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 10,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("Pressure Copper vessel", results["hits"][0]["document"]["title"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, SplitJoinTokenAlways) {
// even if verbatim matches are found, do split+join anyway
std::vector<field> fields = {field("title", field_types::STRING, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
nlohmann::json doc;
doc["title"] = "Non stick cookware";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["title"] = "Nonstick cookware";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["title"] = "Non cookware stick";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
// return only query match docs as default
auto results = coll1->search("non stick", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 0).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("2", results["hits"][1]["document"]["id"].get<std::string>());
results = coll1->search("nonstick", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 0).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
// in always mode, both results should be returned
results = coll1->search("non stick", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 0,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000*1000, 4, 7, always).get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("2", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][2]["document"]["id"].get<std::string>());
results = coll1->search("nonstick", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 0,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000*1000, 4, 7, always).get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("2", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][2]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, TokenCountOfWordsFarApart) {
// word proximity is calculated using a moving window of X tokens. If only 1 token is present in the best matched
// window, proximity ends up being perfect. So we've to ensure that scoring uses total tokens found and not
// window tokens found for scoring.
std::vector<field> fields = {field("title", field_types::STRING, false),
field("author", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
std::vector<std::vector<std::string>> records = {
{"Central Arizona Project. - Hearing, Eighty-eighth Congress, Second Session, on H.R. 6796, H.R. 6797, "
"H.R. 6798. November 9, 1964, Phoenix, Ariz", "JK"},
{"Project Phoenix", "JK"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["author"] = records[i][1];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("Phoenix project)", {"title", "author"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {false},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 1, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_STREQ("1", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("0", results["hits"][1]["document"]["id"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, SingleFieldTokenCountOfWordsFarApart) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("author", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
std::vector<std::vector<std::string>> records = {
{"Central Arizona Project. - Hearing, Eighty-eighth Congress, Second Session, on H.R. 6796, H.R. 6797, "
"H.R. 6798. November 9, 1964, Phoenix, Ariz", "JK"},
{"Project Aim Arizona", "JK"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["author"] = records[i][1];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("Phoenix project)", {"title"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {false},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("1", results["hits"][1]["document"]["id"].get<std::string>().c_str());
// without drop tokens
results = coll1->search("Phoenix project)", {"title"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {false},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 1, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, SingleHyphenInQueryNotToBeTreatedAsExclusion) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "Saturday Short - Thrive (with Audio Descriptions + Open Captions)";
doc1["points"] = 100;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
auto results = coll1->search("Saturday Short - Thrive (with Audio Descriptions + Open Captions)", {"title"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true).get();
ASSERT_EQ(1, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, DuplicateFieldsNotAllowed) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("title", field_types::INT32, true),};
Option<Collection*> create_op = collectionManager.create_collection("collection", 1, fields);
ASSERT_FALSE(create_op.ok());
ASSERT_EQ(create_op.error(), "There are duplicate field names in the schema.");
ASSERT_EQ(create_op.code(), 400);
// with dynamic field
fields = {field("title_.*", field_types::STRING, false, true),
field("title_.*", field_types::INT32, true, true),};
create_op = collectionManager.create_collection("collection", 1, fields);
ASSERT_FALSE(create_op.ok());
ASSERT_EQ(create_op.error(), "There are duplicate field names in the schema.");
ASSERT_EQ(create_op.code(), 400);
// but allow string* with resolved field
fields = {field("title", "string*", false, true),
field("title", field_types::STRING, true),};
create_op = collectionManager.create_collection("collection", 1, fields);
ASSERT_TRUE(create_op.ok());
}
TEST_F(CollectionSpecificTest, EmptyArrayShouldBeAcceptedAsFirstValue) {
Collection *coll1;
std::vector<field> fields = {field("tags", field_types::STRING_ARRAY, false, true)};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
auto op = collectionManager.create_collection("coll1", 1, fields, "");
ASSERT_TRUE(op.ok());
coll1 = op.get();
}
nlohmann::json doc;
doc["company_name"] = "Amazon Inc.";
doc["tags"] = nlohmann::json::array();
auto add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, SimplePrefixQueryHighlight) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "The Hound of the Baskervilles";
doc1["points"] = 100;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
auto results = coll1->search("basker", {"title"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("The Hound of the <mark>Basker</mark>villes", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
ASSERT_EQ("The Hound of the <mark>Basker</mark>villes", results["hits"][0]["highlights"][0]["value"].get<std::string>());
results = coll1->search("bassker", {"title"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("The Hound of the <mark>Baskerv</mark>illes", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
ASSERT_EQ("The Hound of the <mark>Baskerv</mark>illes", results["hits"][0]["highlights"][0]["value"].get<std::string>());
// multiple tokens with typo in prefix
results = coll1->search("hound of bassker", {"title"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("The <mark>Hound</mark> <mark>of</mark> the <mark>Baskerv</mark>illes", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
ASSERT_EQ("The <mark>Hound</mark> <mark>of</mark> the <mark>Baskerv</mark>illes", results["hits"][0]["highlights"][0]["value"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, PhraseSearch) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false, true),
field("points", field_types::INT32, false, true)};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
auto op = collectionManager.create_collection("coll1", 1, fields, "");
ASSERT_TRUE(op.ok());
coll1 = op.get();
}
std::vector<std::vector<std::string>> records = {
{"Then and there by the down"},
{"Down There by the Train"},
{"The State Trooper"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
// without phrase search
auto results = coll1->search(R"(down there by)", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 0).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
// with phrase search (with padded space before after double quote
results = coll1->search(R"(" down there by ")", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 10).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("<mark>Down</mark> <mark>There</mark> <mark>by</mark> the Train", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
// phrase search with exclusion
results = coll1->search(R"("by the" -train)", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 10).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("Then and there <mark>by</mark> <mark>the</mark> down", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
// exclusion of an entire phrase
results = coll1->search(R"(-"by the down")", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 10).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ(0, results["hits"][0]["highlights"].size());
ASSERT_EQ(0, results["hits"][1]["highlights"].size());
results = coll1->search(R"(-"by the")", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 10).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
// phrase search with token with no matching doc
results = coll1->search(R"("by the dinosaur")", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 10).get();
ASSERT_EQ(0, results["hits"].size());
// phrase search with no matching document
results = coll1->search(R"("by the state")", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 10).get();
ASSERT_EQ(0, results["hits"].size());
// phrase search with filter condition
results = coll1->search(R"("there by the")", {"title"}, "points:>=1", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 10).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
// exclude phrase with tokens that don't have any document matched
results = coll1->search(R"(-"by the dinosaur")", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 10).get();
ASSERT_EQ(3, results["hits"].size());
// phrase with normal non-matching token
results = coll1->search(R"("by the" state)", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 10).get();
ASSERT_EQ(0, results["hits"].size());
results = coll1->search(R"("by the" and)", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 10).get();
ASSERT_EQ(1, results["hits"].size());
// phrase with normal matching token
results = coll1->search(R"("by the" and)", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 10).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
// order of tokens in phrase must be respected
results = coll1->search(R"("train by the")", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 10).get();
ASSERT_EQ(0, results["hits"].size());
results = coll1->search(R"("train the by")", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 10).get();
ASSERT_EQ(0, results["hits"].size());
results = coll1->search(R"("train the")", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 10).get();
ASSERT_EQ(0, results["hits"].size());
results = coll1->search(R"("trooper state")", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 10).get();
ASSERT_EQ(0, results["hits"].size());
// two phrases
results = coll1->search(R"("by the" "then and")", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 10).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("<mark>Then</mark> <mark>and</mark> there <mark>by</mark> <mark>the</mark> down", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
results = coll1->search(R"("by the" "there by")", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 10).get();
ASSERT_EQ(2, results["hits"].size());
// two phrases with filter
results = coll1->search(R"("by the" "there by")", {"title"}, "points:>=1", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 10).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
// single token phrase
results = coll1->search(R"("trooper")", {"title"}, "points:>=1", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 10).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, PhraseSearchMultiBlockToken) {
std::vector<field> fields = {field("title", field_types::STRING, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
// the word "train"'s posting list will spread > 1 block (since 300 docs contain it)
for(size_t i = 0; i < 300; i++) {
nlohmann::json doc1;
doc1["title"] = "Train was here.";
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
}
nlohmann::json doc2;
doc2["title"] = "Train is coming.";
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
auto results = coll1->search(R"("is train")", {"title"},
"", {}, {}, {2}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(0, results["hits"].size());
results = coll1->search(R"("train is")", {"title"},
"", {}, {}, {2}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("300", results["hits"][0]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, PhraseSearchMultipleFields) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("description", field_types::STRING, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "Dog Shoemaker";
doc1["description"] = "A book about a dog and a shoemaker";
nlohmann::json doc2;
doc2["id"] = "1";
doc2["title"] = "Dog and cat";
doc2["description"] = "A book about two animals.";
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
auto results = coll1->search(R"("dog shoemaker")", {"title", "description"},
"", {}, {}, {2, 2}, 10, 1, FREQUENCY, {true, true}, 10).get();
ASSERT_EQ(1, results["hits"].size());
results = coll1->search(R"("dog and cat")", {"title", "description"},
"", {}, {}, {2, 2}, 10, 1, FREQUENCY, {true, true}, 10).get();
ASSERT_EQ(1, results["hits"].size());
results = coll1->search(R"("dog and cat")", {"title", "description"},
"description: about", {}, {}, {2, 2}, 10, 1, FREQUENCY, {true, true}, 10).get();
ASSERT_EQ(1, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, PhraseSearchMultipleFieldsWithWeights) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("description", field_types::STRING, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "And then there were none";
doc1["description"] = "A tale about prisioners stuck in an island";
nlohmann::json doc2;
doc2["id"] = "1";
doc2["title"] = "Mystery Island";
doc2["description"] = "And then there were none - a novel";
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
// weight title more than description
auto results = coll1->search(R"("there were none")", {"title", "description"},
"", {}, {}, {2, 2}, 10, 1, FREQUENCY, {true, true}, 10,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {10, 2}).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][1]["document"]["id"].get<std::string>());
// weight description more than title
results = coll1->search(R"("there were none")", {"title", "description"},
"", {}, {}, {2, 2}, 10, 1, FREQUENCY, {true, true}, 10,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {2, 10}).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, HandleLargeWeights) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("description", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "foo same";
doc1["description"] = "bar same";
doc1["points"] = 200;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["title"] = "bar same";
doc2["description"] = "foo same";
doc2["points"] = 100;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
auto results = coll1->search("foo same", {"title", "description"},
"", {}, {}, {2, 2}, 10,
1, FREQUENCY, {true, true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {1, 2}, 1000, true).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_TRUE(results["hits"][0]["text_match"].get<size_t>() > results["hits"][1]["text_match"].get<size_t>());
results = coll1->search("foo same", {"title", "description"},
"", {}, {}, {2, 2}, 10,
1, FREQUENCY, {true, true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {128, 130}, 1000, true).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_TRUE(results["hits"][0]["text_match"].get<size_t>() > results["hits"][1]["text_match"].get<size_t>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, VerbatimMatchShouldOverpowerHigherWeightedField) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("description", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "Basketball Shoes";
doc1["description"] = "Basketball";
doc1["points"] = 100;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["title"] = "Nike Jordan";
doc2["description"] = "Shoes";
doc2["points"] = 200;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
auto results = coll1->search("shoes", {"title", "description"},
"", {}, {}, {2, 2}, 10,
1, FREQUENCY, {true, true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {4, 1}, 1000, true).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, DropTokensTillOneToken) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("description", field_types::STRING, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "Harry";
doc1["description"] = "Malcolm Roscow";
nlohmann::json doc2;
doc2["id"] = "1";
doc2["title"] = "Malcolm";
doc2["description"] = "Something 2";
nlohmann::json doc3;
doc3["id"] = "2";
doc3["title"] = "Roscow";
doc3["description"] = "Something 3";
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
auto results = coll1->search("harry malcolm roscow", {"title"},
"", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
// with drop tokens threshold of 1
results = coll1->search("harry malcolm roscow", {"title"},
"", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 1).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, NegationOfTokens) {
std::vector<field> fields = {field("title", field_types::STRING, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
std::vector<std::vector<std::string>> records = {
{"Samsung Galaxy Buds 2 White"},
{"Samsung Galaxy Note20 Ultra Cover EF-ZN985CAEGEE, Bronze"},
{"Samsung Galaxy S21+ cover EF-NG996PJEGEE, bijeli"},
{"Samsung Galaxy S21+ Kožna maska EF-VG996LBEGWW, crna"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("galaxy -buds -maska -cover", {"title"},
"", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(0, results["hits"].size());
ASSERT_EQ(0, results["found"].get<size_t>());
results = coll1->search("-white -bronze -bijeli", {"title"},
"", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ("3", results["hits"][0]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, PhraseSearchOnLongText) {
std::vector<field> fields = {field("title", field_types::STRING, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
std::vector<std::vector<std::string>> records = {
{"He goes, Sir, why don't you drive? (Laughter) I don't know where we're going. Neither do I. It will be an adventure, sir. (Laughter) The Middle East has been an adventure the past couple of years. It is going crazy with the Arab Spring and revolution and all this. Are there any Lebanese here tonight, by applause? (Cheering) Lebanese, yeah. The Middle East is going crazy. You know the Middle East is going crazy when Lebanon is the most peaceful place in the region. (Laughter) (Applause) Who would have thought? (Laughter) Oh my gosh."},
{"Bear in mind this was an ultrasound, so it would have been moving images. It is a reflex of the autonomic nervous system. Now, this is the part of the nervous system that deals with the things that we don't consciously control."},
{"So there will be a shared autonomy fleet where you buy your car and you can choose to use that car exclusively, you could choose to have it be used only by friends and family."}
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("\"have it be\"", {"title"},
"", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 1).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, RepeatedTokensInArray) {
// should have same text match score
std::vector<field> fields = {field("tags", field_types::STRING_ARRAY, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["tags"] = {"Harry Mark"};
nlohmann::json doc2;
doc2["id"] = "1";
doc2["tags"] = {"Harry is random", "Harry Simpson"};
nlohmann::json doc3;
doc3["id"] = "2";
doc3["tags"] = {"Harry is Harry"};
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
auto results = coll1->search("harry", {"tags"},
"", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ(results["hits"][0]["text_match"].get<size_t>(), results["hits"][1]["text_match"].get<size_t>());
ASSERT_EQ(results["hits"][1]["text_match"].get<size_t>(), results["hits"][2]["text_match"].get<size_t>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, NonIndexField) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("description", field_types::STRING, false, true, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "Harry";
doc1["description"] = "A book.";
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
auto results = coll1->search("harry", {"title"},
"", {}, {}, {2}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(1, coll1->_get_index()->_get_search_index().size());
std::map<std::string, std::string> req_params = {
{"collection", "coll1"},
{"q", "*"},
{"include_fields", "*, "}
};
nlohmann::json embedded_params;
std::string json_res;
auto now_ts = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
results = nlohmann::json::parse(json_res);
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(3, results["hits"][0].at("document").size());
ASSERT_EQ(1, results["hits"][0].at("document").count("description"));
req_params = {
{"collection", "coll1"},
{"q", "*"},
{"include_fields", "*, title"} // Adding a field name overrides include all wildcard
};
collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
results = nlohmann::json::parse(json_res);
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(1, results["hits"][0].at("document").size());
ASSERT_EQ(1, results["hits"][0].at("document").count("title"));
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, HighlightPrefixProperly) {
std::vector<field> fields = {field("title", field_types::STRING, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
std::vector<std::vector<std::string>> records = {
{"Cinderella: the story."},
{"The story of a girl."},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("cindrella o", {"title"},
"", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 1).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("<mark>Cinderella</mark>: the story.", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, DontHighlightPunctuation) {
std::vector<field> fields = {field("title", field_types::STRING, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
std::vector<std::vector<std::string>> records = {
{"??Ensure! readability, use a legible font."},
{"Too much clutter-- use readability.js to clean up the page."},
{"'DMonte Harris"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("readability", {"title"},
"", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 1).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("??Ensure! <mark>readability</mark>, use a legible font.", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
ASSERT_EQ("Too much clutter-- use <mark>readability</mark>.js to clean up the page.", results["hits"][1]["highlights"][0]["snippet"].get<std::string>());
results = coll1->search("clutter", {"title"},
"", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 1).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("Too much <mark>clutter</mark>-- use readability.js to clean up the page.", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
results = coll1->search("ensure", {"title"},
"", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 1).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("??<mark>Ensure</mark>! readability, use a legible font.", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
results = coll1->search("dmonte", {"title"},
"", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 1).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("'<mark>DMonte</mark> Harris", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificTest, ExactMatchWithoutClosingSymbol) {
std::vector<field> fields = {field("title", field_types::STRING, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
std::vector<std::vector<std::string>> records = {
{"Hampi"},
{"Mahabalipuram"},
{"Taj Mahal"},
{"Mysore Palace"}
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
std::map<std::string, std::string> req_params = {
{"collection", "coll1"},
{"q", "\"Hamp"},
{"query_by", "title"},
};
nlohmann::json embedded_params;
std::string json_res;
auto now_ts = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
auto search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
nlohmann::json result = nlohmann::json::parse(json_res);
ASSERT_EQ(1, result["hits"].size());
ASSERT_EQ("0", result["hits"][0]["document"]["id"]);
ASSERT_EQ("Hampi", result["hits"][0]["document"]["title"]);
req_params = {
{"collection", "coll1"},
{"q", "\"Mah"},
{"query_by", "title"},
};
now_ts = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
result = nlohmann::json::parse(json_res);
ASSERT_EQ(2, result["hits"].size());
ASSERT_EQ("2", result["hits"][0]["document"]["id"]);
ASSERT_EQ("Taj Mahal", result["hits"][0]["document"]["title"]);
ASSERT_EQ("1", result["hits"][1]["document"]["id"]);
ASSERT_EQ("Mahabalipuram", result["hits"][1]["document"]["title"]);
}
| 133,557
|
C++
|
.cpp
| 2,309
| 46.778259
| 554
| 0.541324
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,699
|
auth_manager_test.cpp
|
typesense_typesense/test/auth_manager_test.cpp
|
#include <gtest/gtest.h>
#include <stdlib.h>
#include <iostream>
#include <http_data.h>
#include "auth_manager.h"
#include "core_api.h"
#include <collection_manager.h>
static const size_t FUTURE_TS = 64723363199;
class AuthManagerTest : public ::testing::Test {
protected:
Store *store;
AuthManager auth_manager;
CollectionManager& collectionManager = CollectionManager::get_instance();
std::atomic<bool> quit = false;
void setupCollection() {
std::string state_dir_path = "/tmp/typesense_test/auth_manager_test_db";
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
store = new Store(state_dir_path);
auth_manager.init(store, "bootstrap-key");
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
}
virtual void SetUp() {
setupCollection();
}
virtual void TearDown() {
delete store;
}
};
TEST_F(AuthManagerTest, CreateListDeleteAPIKeys) {
auto list_op = auth_manager.list_keys();
ASSERT_TRUE(list_op.ok());
ASSERT_EQ(0, list_op.get().size());
auto get_op = auth_manager.get_key(0);
ASSERT_FALSE(get_op.ok());
ASSERT_EQ(404, get_op.code());
// test inserts
api_key_t api_key1("abcd1", "test key 1", {"read", "write"}, {"collection1", "collection2"}, FUTURE_TS);
api_key_t api_key2("abcd2", "test key 2", {"admin"}, {"*"}, FUTURE_TS);
ASSERT_EQ("abcd1", api_key1.value);
ASSERT_EQ("abcd2", api_key2.value);
auto insert_op = auth_manager.create_key(api_key1);
ASSERT_TRUE(insert_op.ok());
ASSERT_EQ(5, insert_op.get().value.size());
insert_op = auth_manager.create_key(api_key2);
ASSERT_TRUE(insert_op.ok());
ASSERT_EQ(5, insert_op.get().value.size());
// reject on conflict
insert_op = auth_manager.create_key(api_key2);
ASSERT_FALSE(insert_op.ok());
ASSERT_EQ(409, insert_op.code());
ASSERT_EQ("API key generation conflict.", insert_op.error());
api_key2.value = "bootstrap-key";
insert_op = auth_manager.create_key(api_key2);
ASSERT_FALSE(insert_op.ok());
ASSERT_EQ(409, insert_op.code());
ASSERT_EQ("API key generation conflict.", insert_op.error());
// get an individual key
get_op = auth_manager.get_key(0);
ASSERT_TRUE(get_op.ok());
const api_key_t &key1 = get_op.get();
ASSERT_EQ(4, key1.value.size());
ASSERT_EQ("test key 1", key1.description);
ASSERT_EQ(2, key1.actions.size());
EXPECT_STREQ("read", key1.actions[0].c_str());
EXPECT_STREQ("write", key1.actions[1].c_str());
ASSERT_EQ(2, key1.collections.size());
EXPECT_STREQ("collection1", key1.collections[0].c_str());
EXPECT_STREQ("collection2", key1.collections[1].c_str());
get_op = auth_manager.get_key(1);
ASSERT_TRUE(get_op.ok());
ASSERT_EQ(4, get_op.get().value.size());
ASSERT_EQ("test key 2", get_op.get().description);
get_op = auth_manager.get_key(1, false);
ASSERT_TRUE(get_op.ok());
ASSERT_NE(4, get_op.get().value.size());
get_op = auth_manager.get_key(2, false);
ASSERT_FALSE(get_op.ok());
// listing keys
list_op = auth_manager.list_keys();
ASSERT_TRUE(list_op.ok());
ASSERT_EQ(2, list_op.get().size());
ASSERT_EQ("test key 1", list_op.get()[0].description);
ASSERT_EQ("abcd", list_op.get()[0].value);
ASSERT_EQ("test key 2", list_op.get()[1].description);
ASSERT_EQ("abcd", list_op.get()[1].value);
// delete key
auto del_op = auth_manager.remove_key(1);
ASSERT_TRUE(del_op.ok());
del_op = auth_manager.remove_key(1000);
ASSERT_FALSE(del_op.ok());
ASSERT_EQ(404, del_op.code());
}
TEST_F(AuthManagerTest, CheckRestoreOfAPIKeys) {
api_key_t api_key1("abcd1", "test key 1", {"read", "write"}, {"collection1", "collection2"}, FUTURE_TS);
api_key_t api_key2("abcd2", "test key 2", {"admin"}, {"*"}, FUTURE_TS);
std::string key_value1 = auth_manager.create_key(api_key1).get().value;
std::string key_value2 = auth_manager.create_key(api_key2).get().value;
AuthManager auth_manager2;
auth_manager2.init(store, "bootstrap-key");
// list keys
auto list_op = auth_manager.list_keys();
ASSERT_TRUE(list_op.ok());
ASSERT_EQ(2, list_op.get().size());
ASSERT_EQ("test key 1", list_op.get()[0].description);
ASSERT_EQ("abcd", list_op.get()[0].value);
ASSERT_STREQ(key_value1.substr(0, 4).c_str(), list_op.get()[0].value.c_str());
ASSERT_EQ(FUTURE_TS, list_op.get()[0].expires_at);
ASSERT_EQ("test key 2", list_op.get()[1].description);
ASSERT_EQ("abcd", list_op.get()[1].value);
ASSERT_STREQ(key_value2.substr(0, 4).c_str(), list_op.get()[1].value.c_str());
ASSERT_EQ(FUTURE_TS, list_op.get()[1].expires_at);
}
TEST_F(AuthManagerTest, VerifyAuthentication) {
std::map<std::string, std::string> sparams;
std::vector<nlohmann::json> embedded_params(2);
// when no keys are present at all
ASSERT_FALSE(auth_manager.authenticate("", {collection_key_t("", "jdlaslasdasd")}, sparams, embedded_params));
// wildcard permission
api_key_t wildcard_all_key = api_key_t("abcd1", "wildcard all key", {"*"}, {"*"}, FUTURE_TS);
auth_manager.create_key(wildcard_all_key);
ASSERT_FALSE(auth_manager.authenticate("documents:create", {collection_key_t("collection1", "jdlaslasdasd")}, sparams, embedded_params));
ASSERT_TRUE(auth_manager.authenticate("metrics:get", {collection_key_t("", wildcard_all_key.value)}, sparams, embedded_params));
// long API key
std::string long_api_key_str = StringUtils::randstring(50);
api_key_t long_api_key = api_key_t(long_api_key_str, "long api key", {"*"}, {"*"}, FUTURE_TS);
auth_manager.create_key(long_api_key);
ASSERT_TRUE(auth_manager.authenticate("metrics:get", {collection_key_t(long_api_key_str, wildcard_all_key.value)}, sparams, embedded_params));
// wildcard on a collection
api_key_t wildcard_coll_key = api_key_t("abcd2", "wildcard coll key", {"*"}, {"collection1"}, FUTURE_TS);
auth_manager.create_key(wildcard_coll_key);
ASSERT_FALSE(auth_manager.authenticate("documents:create", {collection_key_t("collection1", "adasda")}, sparams, embedded_params));
ASSERT_TRUE(auth_manager.authenticate("documents:get", {collection_key_t("collection1", wildcard_coll_key.value)}, sparams, embedded_params));
ASSERT_FALSE(auth_manager.authenticate("documents:get", {collection_key_t("collection2", wildcard_coll_key.value)}, sparams, embedded_params));
// wildcard on multiple collections
api_key_t wildcard_colls_key = api_key_t("abcd3", "wildcard coll key", {"*"}, {"collection1", "collection2", "collection3"}, FUTURE_TS);
auth_manager.create_key(wildcard_colls_key);
ASSERT_TRUE(auth_manager.authenticate("documents:get", {collection_key_t("collection1", wildcard_colls_key.value)}, sparams, embedded_params));
ASSERT_TRUE(auth_manager.authenticate("documents:search", {collection_key_t("collection2", wildcard_colls_key.value)}, sparams, embedded_params));
ASSERT_TRUE(auth_manager.authenticate("documents:create", {collection_key_t("collection3", wildcard_colls_key.value)}, sparams, embedded_params));
ASSERT_FALSE(auth_manager.authenticate("documents:get", {collection_key_t("collection4", wildcard_colls_key.value)}, sparams, embedded_params));
ASSERT_FALSE(auth_manager.authenticate("documents:get", {collection_key_t("*", wildcard_colls_key.value)}, sparams, embedded_params));
// only 1 action on multiple collections
api_key_t one_action_key = api_key_t("abcd4", "one action key", {"documents:search"}, {"collection1", "collection2"}, FUTURE_TS);
auth_manager.create_key(one_action_key);
ASSERT_TRUE(auth_manager.authenticate("documents:search", {collection_key_t("collection1", one_action_key.value)}, sparams, embedded_params));
ASSERT_FALSE(auth_manager.authenticate("documents:get", {collection_key_t("collection2", one_action_key.value)}, sparams, embedded_params));
ASSERT_FALSE(auth_manager.authenticate("documents:search", {collection_key_t("collection5", one_action_key.value)}, sparams, embedded_params));
ASSERT_FALSE(auth_manager.authenticate("*", {collection_key_t("collection2", one_action_key.value)}, sparams, embedded_params));
// multiple actions on multiple collections
api_key_t mul_acoll_key = api_key_t("abcd5", "multiple action/collection key",
{"documents:get", "collections:list"}, {"metacollection", "collection2"}, FUTURE_TS);
auth_manager.create_key(mul_acoll_key);
ASSERT_TRUE(auth_manager.authenticate("documents:get", {collection_key_t("metacollection", mul_acoll_key.value)}, sparams, embedded_params));
ASSERT_TRUE(auth_manager.authenticate("collections:list", {collection_key_t("collection2", mul_acoll_key.value)}, sparams, embedded_params));
ASSERT_TRUE(auth_manager.authenticate("collections:list", {collection_key_t("metacollection", mul_acoll_key.value)}, sparams, embedded_params));
ASSERT_FALSE(auth_manager.authenticate("documents:search", {collection_key_t("collection2", mul_acoll_key.value)}, sparams, embedded_params));
ASSERT_FALSE(auth_manager.authenticate("documents:get", {collection_key_t("collection5", mul_acoll_key.value)}, sparams, embedded_params));
ASSERT_FALSE(auth_manager.authenticate("*", {collection_key_t("*", mul_acoll_key.value)}, sparams, embedded_params));
// regexp match
api_key_t regexp_colls_key1 = api_key_t("abcd6", "regexp coll key", {"*"}, {"coll.*"}, FUTURE_TS);
auth_manager.create_key(regexp_colls_key1);
ASSERT_TRUE(auth_manager.authenticate("collections:list", {collection_key_t("collection2", regexp_colls_key1.value)}, sparams, embedded_params));
ASSERT_TRUE(auth_manager.authenticate("documents:get", {collection_key_t("collection5", regexp_colls_key1.value)}, sparams, embedded_params));
api_key_t regexp_colls_key2 = api_key_t("abcd7", "regexp coll key", {"*"}, {".*meta.*"}, FUTURE_TS);
auth_manager.create_key(regexp_colls_key2);
ASSERT_TRUE(auth_manager.authenticate("collections:list", {collection_key_t("metacollection", regexp_colls_key2.value)}, sparams, embedded_params));
ASSERT_TRUE(auth_manager.authenticate("collections:list", {collection_key_t("ametacollection", regexp_colls_key2.value)}, sparams, embedded_params));
// check for expiry
api_key_t expired_key1 = api_key_t("abcd8", "expiry key", {"*"}, {"*"}, 1606542716);
auth_manager.create_key(expired_key1);
ASSERT_FALSE(auth_manager.authenticate("collections:list", {collection_key_t("collection", expired_key1.value)}, sparams, embedded_params));
api_key_t unexpired_key1 = api_key_t("abcd9", "expiry key", {"*"}, {"*"}, 2237712220);
auth_manager.create_key(unexpired_key1);
ASSERT_TRUE(auth_manager.authenticate("collections:list", {collection_key_t("collection", unexpired_key1.value)}, sparams, embedded_params));
// wildcard action on any collection
api_key_t wildcard_action_coll_key = api_key_t("abcd10", "wildcard coll action key", {"collections:*"}, {"*"}, FUTURE_TS);
auth_manager.create_key(wildcard_action_coll_key);
ASSERT_TRUE(auth_manager.authenticate("collections:create", {collection_key_t("collection1", wildcard_action_coll_key.value)}, sparams, embedded_params));
ASSERT_TRUE(auth_manager.authenticate("collections:delete", {collection_key_t("collection1", wildcard_action_coll_key.value), collection_key_t("collection2", wildcard_action_coll_key.value)}, sparams, embedded_params));
ASSERT_FALSE(auth_manager.authenticate("documents:create", {collection_key_t("collection1", wildcard_action_coll_key.value)}, sparams, embedded_params));
// create action on a specific collection
api_key_t create_action_coll_key = api_key_t("abcd11", "create action+coll key", {"collections:create"}, {"collection1"}, FUTURE_TS);
auth_manager.create_key(create_action_coll_key);
ASSERT_TRUE(auth_manager.authenticate("collections:create", {collection_key_t("collection1", create_action_coll_key.value)}, sparams, embedded_params));
ASSERT_FALSE(auth_manager.authenticate("collections:create", {collection_key_t("collection2", create_action_coll_key.value)}, sparams, embedded_params));
// two keys against 2 different collections: both should be valid
api_key_t coll_a_key = api_key_t("coll_a", "one action key", {"documents:search"}, {"collectionA"}, FUTURE_TS);
api_key_t coll_b_key = api_key_t("coll_b", "one action key", {"documents:search"}, {"collectionB"}, FUTURE_TS);
auth_manager.create_key(coll_a_key);
auth_manager.create_key(coll_b_key);
ASSERT_TRUE(auth_manager.authenticate("documents:search",
{collection_key_t("collectionA", coll_a_key.value),
collection_key_t("collectionB", coll_b_key.value)},
sparams, embedded_params));
ASSERT_FALSE(auth_manager.authenticate("documents:search",
{collection_key_t("collectionA", coll_a_key.value),
collection_key_t("collection1", coll_b_key.value)},
sparams, embedded_params));
ASSERT_FALSE(auth_manager.authenticate("documents:search",
{collection_key_t("collection1", coll_a_key.value),
collection_key_t("collectionB", coll_b_key.value)},
sparams, embedded_params));
// bad collection allow regexp
api_key_t coll_c_key = api_key_t("coll_c", "one action key", {"documents:search"}, {"*coll_c"}, FUTURE_TS);
auth_manager.create_key(coll_c_key);
ASSERT_FALSE(auth_manager.authenticate("documents:search",
{collection_key_t("coll_c", coll_c_key.value),},
sparams, embedded_params));
}
TEST_F(AuthManagerTest, GenerationOfAPIAction) {
route_path rpath_search = route_path("GET", {"collections", ":collection", "documents", "search"}, nullptr, false, false);
route_path rpath_multi_search = route_path("POST", {"multi_search"}, nullptr, false, false);
route_path rpath_coll_create = route_path("POST", {"collections"}, nullptr, false, false);
route_path rpath_coll_get = route_path("GET", {"collections", ":collection"}, nullptr, false, false);
route_path rpath_coll_list = route_path("GET", {"collections"}, nullptr, false, false);
route_path rpath_coll_import = route_path("POST", {"collections", ":collection", "documents", "import"}, nullptr, false, false);
route_path rpath_coll_export = route_path("GET", {"collections", ":collection", "documents", "export"}, nullptr, false, false);
route_path rpath_keys_post = route_path("POST", {"keys"}, nullptr, false, false);
route_path rpath_doc_delete = route_path("DELETE", {"collections", ":collection", "documents", ":id"}, nullptr, false, false);
route_path rpath_override_upsert = route_path("PUT", {"collections", ":collection", "overrides", ":id"}, nullptr, false, false);
route_path rpath_doc_patch = route_path("PATCH", {"collections", ":collection", "documents", ":id"}, nullptr, false, false);
route_path rpath_analytics_rules_list = route_path("GET", {"analytics", "rules"}, nullptr, false, false);
route_path rpath_analytics_rules_get = route_path("GET", {"analytics", "rules", ":id"}, nullptr, false, false);
route_path rpath_analytics_rules_put = route_path("PUT", {"analytics", "rules", ":id"}, nullptr, false, false);
route_path rpath_ops_cache_clear_post = route_path("POST", {"operations", "cache", "clear"}, nullptr, false, false);
route_path rpath_conv_models_list = route_path("GET", {"conversations", "models"}, nullptr, false, false);
ASSERT_STREQ("documents:search", rpath_search._get_action().c_str());
ASSERT_STREQ("documents:search", rpath_multi_search._get_action().c_str());
ASSERT_STREQ("collections:create", rpath_coll_create._get_action().c_str());
ASSERT_STREQ("collections:get", rpath_coll_get._get_action().c_str());
ASSERT_STREQ("documents:import", rpath_coll_import._get_action().c_str());
ASSERT_STREQ("documents:export", rpath_coll_export._get_action().c_str());
ASSERT_STREQ("collections:list", rpath_coll_list._get_action().c_str());
ASSERT_STREQ("keys:create", rpath_keys_post._get_action().c_str());
ASSERT_STREQ("documents:delete", rpath_doc_delete._get_action().c_str());
ASSERT_STREQ("overrides:upsert", rpath_override_upsert._get_action().c_str());
ASSERT_STREQ("documents:update", rpath_doc_patch._get_action().c_str());
ASSERT_STREQ("analytics/rules:list", rpath_analytics_rules_list._get_action().c_str());
ASSERT_STREQ("analytics/rules:get", rpath_analytics_rules_get._get_action().c_str());
ASSERT_STREQ("analytics/rules:upsert", rpath_analytics_rules_put._get_action().c_str());
ASSERT_STREQ("operations/cache/clear:create", rpath_ops_cache_clear_post._get_action().c_str());
ASSERT_STREQ("conversations/models:list", rpath_conv_models_list._get_action().c_str());
}
TEST_F(AuthManagerTest, ScopedAPIKeys) {
std::map<std::string, std::string> params;
params["filter_by"] = "country:USA";
std::vector<nlohmann::json> embedded_params(2);
// create a API key bound to search scope and a given collection
api_key_t key_search_coll1("KeyVal", "test key", {"documents:search"}, {"coll1"}, FUTURE_TS);
auth_manager.create_key(key_search_coll1);
std::string scoped_key = StringUtils::base64_encode(
R"(IvjqWNZ5M5ElcvbMoXj45BxkQrZG4ZKEaNQoRioCx2s=KeyV{"filter_by": "user_id:1080"})"
);
ASSERT_TRUE(auth_manager.authenticate("documents:search", {collection_key_t("coll1", scoped_key)}, params, embedded_params));
ASSERT_EQ("user_id:1080", embedded_params[0]["filter_by"].get<std::string>());
// should scope to collection bound by the parent key
ASSERT_FALSE(auth_manager.authenticate("documents:search", {collection_key_t("coll2", scoped_key)}, params, embedded_params));
// should scope to search action only
ASSERT_FALSE(auth_manager.authenticate("documents:create", {collection_key_t("coll1", scoped_key)}, params, embedded_params));
// check with corrupted key
ASSERT_FALSE(auth_manager.authenticate("documents:search", {collection_key_t("coll1", "asdasasd")}, params, embedded_params));
// with multiple collections, all should be authenticated
ASSERT_FALSE(auth_manager.authenticate("documents:search",
{collection_key_t("coll1", scoped_key),
collection_key_t("coll2", scoped_key)},
params, embedded_params));
// send both regular key and scoped key
ASSERT_TRUE(auth_manager.authenticate("documents:search",
{collection_key_t("coll1", key_search_coll1.value),
collection_key_t("coll1", scoped_key)},
params, embedded_params));
// when params is empty, embedded param should be set
std::map<std::string, std::string> empty_params;
embedded_params.clear();
embedded_params.push_back(nlohmann::json::object());
ASSERT_TRUE(auth_manager.authenticate("documents:search", {collection_key_t("coll1", scoped_key)}, empty_params, embedded_params));
ASSERT_EQ("user_id:1080", embedded_params[0]["filter_by"].get<std::string>());
// when more than a single key prefix matches, must pick the correct underlying key
embedded_params.clear();
embedded_params.push_back(nlohmann::json::object());
api_key_t key_search_coll2("KeyVal2", "test key", {"documents:search"}, {"coll2"}, FUTURE_TS);
auth_manager.create_key(key_search_coll2);
ASSERT_TRUE(auth_manager.authenticate("documents:search", {collection_key_t("coll1", scoped_key)}, empty_params, embedded_params));
ASSERT_FALSE(auth_manager.authenticate("documents:search", {collection_key_t("coll2", scoped_key)}, empty_params, embedded_params));
// scoped key generated from key_search_coll2
embedded_params.clear();
embedded_params.push_back(nlohmann::json::object());
std::string scoped_key_prefix2 = "QmNlNXdkUThaeDJFZXNiOXB4VUFCT1BmN01GSEJnRUdiMng2aTJESjJqND1LZXlWeyJmaWx0ZXJfYnkiOiAidXNlcl9pZDoxMDgwIn0=";
ASSERT_TRUE(auth_manager.authenticate("documents:search", {collection_key_t("coll2", scoped_key_prefix2)}, empty_params, embedded_params));
ASSERT_FALSE(auth_manager.authenticate("documents:search", {collection_key_t("coll1", scoped_key_prefix2)}, empty_params, embedded_params));
// should only allow scoped API keys derived from parent key with documents:search action
embedded_params.clear();
embedded_params.push_back(nlohmann::json::object());
api_key_t key_search_admin("AdminKey", "admin key", {"*"}, {"*"}, FUTURE_TS);
auth_manager.create_key(key_search_admin);
std::string scoped_key2 = StringUtils::base64_encode(
"BXbsk+xLT1gxOjDyip6+PE4MtOzOm/H7kbkN1d/j/s4=Admi{\"filter_by\": \"user_id:1080\"}"
);
ASSERT_FALSE(auth_manager.authenticate("documents:search", {collection_key_t("coll2", scoped_key2)}, empty_params, embedded_params));
// expiration of scoped api key
// {"filter_by": "user_id:1080", "expires_at": 2237712220} (NOT expired)
api_key_t key_expiry("ExpireKey", "expire key", {"documents:search"}, {"*"}, FUTURE_TS);
auth_manager.create_key(key_expiry);
embedded_params.clear();
embedded_params.push_back(nlohmann::json::object());
std::string scoped_key3 = "K1M2STRDelZYNHpxNGVWUTlBTGpOWUl4dk8wNU8xdnVEZi9aSUcvZE5tcz1FeHBpeyJmaWx0ZXJfYnkiOi"
"AidXNlcl9pZDoxMDgwIiwgImV4cGlyZXNfYXQiOiAyMjM3NzEyMjIwfQ==";
ASSERT_TRUE(auth_manager.authenticate("documents:search", {collection_key_t("coll1", scoped_key3)}, empty_params, embedded_params));
ASSERT_EQ("user_id:1080", embedded_params[0]["filter_by"].get<std::string>());
ASSERT_EQ(1, embedded_params.size());
// {"filter_by": "user_id:1080", "expires_at": 1606563316} (expired)
api_key_t key_expiry2("ExpireKey2", "expire key", {"documents:search"}, {"*"}, FUTURE_TS);
auth_manager.create_key(key_expiry2);
embedded_params.clear();
embedded_params.push_back(nlohmann::json::object());
std::string scoped_key4 = "SXFKNldZZWRiWkVKVmI2RCt3OTlKNHpBZ24yWlRUbEdJdERtTy9IZ2REZz1FeHBpeyJmaWx0ZXJfYnkiOiAidXN"
"lcl9pZDoxMDgwIiwgImV4cGlyZXNfYXQiOiAxNjA2NTYzMzE2fQ==";
ASSERT_FALSE(auth_manager.authenticate("documents:search", {collection_key_t("coll1", scoped_key4)}, empty_params, embedded_params));
// {"filter_by": "user_id:1080", "expires_at": 64723363200} (greater than parent key expiry)
// embedded key's param cannot exceed parent's expiry
api_key_t key_expiry3("ExpireKey3", "expire key", {"documents:search"}, {"*"}, 1606563841);
auth_manager.create_key(key_expiry3);
embedded_params.clear();
embedded_params.push_back(nlohmann::json::object());
std::string scoped_key5 = "V3JMNFJlZHRMVStrZHphNFVGZDh4MWltSmx6Yzk2R3QvS2ZwSE8weGRWQT1FeHBpeyJmaWx0ZXJfYnkiOiAidX"
"Nlcl9pZDoxMDgwIiwgImV4cGlyZXNfYXQiOiA2NDcyMzM2MzIwMH0=";
ASSERT_FALSE(auth_manager.authenticate("documents:search", {collection_key_t("coll1", scoped_key5)}, empty_params, embedded_params));
// bad scoped API key
ASSERT_FALSE(auth_manager.authenticate("documents:search", {collection_key_t("coll1", " XhsdBdhehdDheruyhvbdhwjhHdhgyeHbfheR")}, empty_params, embedded_params));
ASSERT_FALSE(auth_manager.authenticate("documents:search", {collection_key_t("coll1", "cXYPvkNKRlQrBzVTEgY4a3FrZfZ2MEs4kFJ6all3eldwM GhKZnRId3Y3TT1RZmxZeYJmaWx0ZXJfYnkiOkJ1aWQ6OElVm1lUVm15SG9ZOHM4NUx2VFk4S2drNHJIMiJ9")}, empty_params, embedded_params));
ASSERT_FALSE(auth_manager.authenticate("documents:search", {collection_key_t("coll1", "SXZqcVdOWjVNNUVsY3ZiTW9YajQ1QnhrUXJaRzRaS0VhTlFvUmlvQ3gycz1LZXlWeyJmaWx0ZXJfYnkiOiAidXNlcl9pZDoxMDgw In0=")}, empty_params, embedded_params));
}
TEST_F(AuthManagerTest, ValidateBadKeyProperties) {
nlohmann::json key_obj1;
key_obj1["description"] = "desc";
key_obj1["actions"].push_back("*");
key_obj1["collections"].push_back(1);
Option<uint32_t> validate_op = api_key_t::validate(key_obj1);
ASSERT_FALSE(validate_op.ok());
ASSERT_STREQ("Wrong format for `collections`. It should be an array of string.", validate_op.error().c_str());
key_obj1["actions"].push_back(1);
key_obj1["collections"].push_back("*");
validate_op = api_key_t::validate(key_obj1);
ASSERT_FALSE(validate_op.ok());
ASSERT_STREQ("Wrong format for `actions`. It should be an array of string.", validate_op.error().c_str());
key_obj1["actions"] = 1;
key_obj1["collections"] = {"*"};
validate_op = api_key_t::validate(key_obj1);
ASSERT_FALSE(validate_op.ok());
ASSERT_STREQ("Wrong format for `actions`. It should be an array of string.", validate_op.error().c_str());
nlohmann::json key_obj2;
key_obj2["description"] = "desc";
key_obj2["actions"] = {"*"};
key_obj2["collections"] = {"foobar"};
key_obj2["expires_at"] = -100;
validate_op = api_key_t::validate(key_obj2);
ASSERT_FALSE(validate_op.ok());
ASSERT_STREQ("Wrong format for `expires_at`. It should be an unsigned integer.", validate_op.error().c_str());
key_obj2["expires_at"] = "expiry_ts";
validate_op = api_key_t::validate(key_obj2);
ASSERT_FALSE(validate_op.ok());
ASSERT_STREQ("Wrong format for `expires_at`. It should be an unsigned integer.", validate_op.error().c_str());
key_obj2["expires_at"] = 1606539880;
validate_op = api_key_t::validate(key_obj2);
ASSERT_TRUE(validate_op.ok());
// check for valid value
nlohmann::json key_obj3;
key_obj3["description"] = "desc";
key_obj3["actions"] = {"*"};
key_obj3["collections"] = {"foobar"};
key_obj3["value"] = 100;
validate_op = api_key_t::validate(key_obj3);
ASSERT_FALSE(validate_op.ok());
ASSERT_STREQ("Key value must be a string.", validate_op.error().c_str());
// check for valid description
nlohmann::json key_obj4;
key_obj4["description"] = 42;
key_obj4["actions"] = {"*"};
key_obj4["collections"] = {"foobar"};
key_obj4["value"] = "abcd";
validate_op = api_key_t::validate(key_obj4);
ASSERT_FALSE(validate_op.ok());
ASSERT_STREQ("Key description must be a string.", validate_op.error().c_str());
}
TEST_F(AuthManagerTest, AutoDeleteKeysOnExpiry) {
auto list_op = auth_manager.list_keys();
ASSERT_TRUE(list_op.ok());
ASSERT_EQ(0, list_op.get().size());
//regular key(future ts)
api_key_t api_key1("abcd", "test key 1", {"read", "write"}, {"collection1", "collection2"}, FUTURE_TS);
//key is expired (past ts)
uint64_t PAST_TS = uint64_t(std::time(0)) - 100;
api_key_t api_key2("wxyz", "test key 2", {"admin"}, {"*"}, PAST_TS, true);
auto insert_op = auth_manager.create_key(api_key1);
ASSERT_TRUE(insert_op.ok());
ASSERT_EQ(4, insert_op.get().value.size());
insert_op = auth_manager.create_key(api_key2);
ASSERT_TRUE(insert_op.ok());
ASSERT_EQ(4, insert_op.get().value.size());
list_op = auth_manager.list_keys();
ASSERT_TRUE(list_op.ok());
auto keys = list_op.get();
ASSERT_EQ(2, keys.size());
ASSERT_EQ("abcd", keys[0].value);
ASSERT_EQ("wxyz", keys[1].value);
auth_manager.do_housekeeping();
list_op = auth_manager.list_keys();
ASSERT_TRUE(list_op.ok());
keys = list_op.get();
ASSERT_EQ(1, keys.size());
ASSERT_EQ("abcd", keys[0].value);
}
TEST_F(AuthManagerTest, CollectionsByScope) {
std::shared_ptr<http_req> req = std::make_shared<http_req>();
std::shared_ptr<http_res> res = std::make_shared<http_res>(nullptr);
auto scoped_key_json = R"({
"description": "Write key",
"actions": [ "collections:*", "documents:*", "synonyms:*" ],
"collections": [ "collection_.*" ],
"value": "3859c47b98"
})"_json;
req->body =scoped_key_json.dump();
ASSERT_TRUE(post_create_key(req, res));
auto schema1 = R"({
"name": "collection_1",
"fields": [
{"name": "title", "type": "string", "locale": "en"},
{"name": "points", "type": "int32"}
]
})"_json;
collectionManager.create_collection(schema1);
auto schema2 = R"({
"name": "collection2",
"fields": [
{"name": "title", "type": "string", "locale": "en"},
{"name": "points", "type": "int32"}
]
})"_json;
collectionManager.create_collection(schema2);
req->api_auth_key = "3859c47b98";
get_collections(req, res);
auto result_json = nlohmann::json::parse(res->body);
ASSERT_EQ(1, result_json.size());
ASSERT_EQ("collection_1", result_json[0]["name"]);
req->api_auth_key.clear();
get_collections(req, res);
result_json = nlohmann::json::parse(res->body);
ASSERT_EQ(2, result_json.size());
ASSERT_EQ("collection2", result_json[0]["name"]);
ASSERT_EQ("collection_1", result_json[1]["name"]);
scoped_key_json = R"({
"description": "Write key",
"actions": [ "collections:*", "documents:*", "synonyms:*" ],
"collections": [ "collection2" ],
"value": "b78a573a1a"
})"_json;
req->body =scoped_key_json.dump();
ASSERT_TRUE(post_create_key(req, res));
req->api_auth_key = "b78a573a1a";
get_collections(req, res);
result_json = nlohmann::json::parse(res->body);
ASSERT_EQ(1, result_json.size());
ASSERT_EQ("collection2", result_json[0]["name"]);
scoped_key_json = R"({
"description": "Write key",
"actions": [ "collections:*", "documents:*", "synonyms:*" ],
"collections": [ "*" ],
"value": "00071e2108"
})"_json;
req->body =scoped_key_json.dump();
ASSERT_TRUE(post_create_key(req, res));
req->api_auth_key = "00071e2108";
get_collections(req, res);
result_json = nlohmann::json::parse(res->body);
ASSERT_EQ(2, result_json.size());
ASSERT_EQ("collection2", result_json[0]["name"]);
ASSERT_EQ("collection_1", result_json[1]["name"]);
}
| 30,549
|
C++
|
.cpp
| 479
| 56.82881
| 257
| 0.669561
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,700
|
store_test.cpp
|
typesense_typesense/test/store_test.cpp
|
#include <gtest/gtest.h>
#include <vector>
#include <store.h>
#include <string_utils.h>
TEST(StoreTest, GetUpdatesSince) {
std::string primary_store_path = "/tmp/typesense_test/primary_store_test";
LOG(INFO) << "Truncating and creating: " << primary_store_path;
system(("rm -rf "+primary_store_path+" && mkdir -p "+primary_store_path).c_str());
// add some records, get the updates and restore them in a new store
Store primary_store(primary_store_path, 24*60*60, 1024, false);
// on a fresh store, sequence number is 0
Option<std::vector<std::string>*> updates_op = primary_store.get_updates_since(0, 10);
ASSERT_TRUE(updates_op.ok());
ASSERT_EQ(0, updates_op.get()->size());
ASSERT_EQ(0, primary_store.get_latest_seq_number());
delete updates_op.get();
// get_updates_since(1) == get_updates_since(0)
updates_op = primary_store.get_updates_since(1, 10);
ASSERT_TRUE(updates_op.ok());
ASSERT_EQ(0, updates_op.get()->size());
ASSERT_EQ(0, primary_store.get_latest_seq_number());
delete updates_op.get();
// querying for a seq_num > 1 on a fresh store
updates_op = primary_store.get_updates_since(2, 10);
ASSERT_FALSE(updates_op.ok());
ASSERT_EQ("Unable to fetch updates. Master's latest sequence number is 0 but "
"requested sequence number is 2", updates_op.error());
// get_updates_since(1) == get_updates_since(0) even after inserting a record
primary_store.insert("foo1", "bar1");
ASSERT_EQ(1, primary_store.get_latest_seq_number());
updates_op = primary_store.get_updates_since(1, 10);
std::cout << updates_op.error() << std::endl;
ASSERT_TRUE(updates_op.ok());
ASSERT_EQ(1, updates_op.get()->size());
delete updates_op.get();
updates_op = primary_store.get_updates_since(0, 10);
ASSERT_TRUE(updates_op.ok());
ASSERT_EQ(1, updates_op.get()->size());
delete updates_op.get();
// add more records
primary_store.insert("foo2", "bar2");
primary_store.insert("foo3", "bar3");
ASSERT_EQ(3, primary_store.get_latest_seq_number());
updates_op = primary_store.get_updates_since(0, 10);
ASSERT_EQ(3, updates_op.get()->size());
delete updates_op.get();
updates_op = primary_store.get_updates_since(1, 10);
ASSERT_EQ(3, updates_op.get()->size());
delete updates_op.get();
updates_op = primary_store.get_updates_since(3, 10);
ASSERT_EQ(1, updates_op.get()->size());
delete updates_op.get();
std::string replica_store_path = "/tmp/typesense_test/replica_store_test";
LOG(INFO) << "Truncating and creating: " << replica_store_path;
system(("rm -rf "+replica_store_path+" && mkdir -p "+replica_store_path).c_str());
Store replica_store(replica_store_path, 24*60*60, 1024, false);
rocksdb::DB* replica_db = replica_store._get_db_unsafe();
updates_op = primary_store.get_updates_since(0, 10);
for(const std::string & update: *updates_op.get()) {
// Do Base64 encoding and decoding as we would in the API layer
const std::string update_encoded = StringUtils::base64_encode(update);
const std::string update_decoded = StringUtils::base64_decode(update_encoded);
rocksdb::WriteBatch write_batch(update_decoded);
replica_db->Write(rocksdb::WriteOptions(), &write_batch);
}
delete updates_op.get();
std::string value;
for(auto i=1; i<=3; i++) {
replica_store.get(std::string("foo")+std::to_string(i), value);
ASSERT_EQ(std::string("bar")+std::to_string(i), value);
}
// Ensure that updates are limited to max_updates argument
updates_op = primary_store.get_updates_since(0, 10);
ASSERT_EQ(3, updates_op.get()->size());
delete updates_op.get();
// sequence numbers 0 and 1 are the same
updates_op = primary_store.get_updates_since(0, 10);
ASSERT_EQ(3, updates_op.get()->size());
delete updates_op.get();
updates_op = primary_store.get_updates_since(1, 10);
ASSERT_EQ(3, updates_op.get()->size());
delete updates_op.get();
updates_op = primary_store.get_updates_since(3, 100);
ASSERT_TRUE(updates_op.ok());
ASSERT_EQ(1, updates_op.get()->size());
delete updates_op.get();
updates_op = primary_store.get_updates_since(4, 100);
ASSERT_TRUE(updates_op.ok());
ASSERT_EQ(0, updates_op.get()->size());
delete updates_op.get();
updates_op = primary_store.get_updates_since(50, 100);
ASSERT_FALSE(updates_op.ok());
ASSERT_EQ("Unable to fetch updates. Master's latest sequence number is 3 but "
"requested sequence number is 50", updates_op.error());
}
TEST(StoreTest, GetUpdateSinceInvalidIterator) {
std::string primary_store_path = "/tmp/typesense_test/primary_store_test";
LOG(INFO) << "Truncating and creating: " << primary_store_path;
system(("rm -rf "+primary_store_path+" && mkdir -p "+primary_store_path).c_str());
// add some records, get the updates and restore them in a new store
Store primary_store(primary_store_path, 0, 0, true); // disable WAL
primary_store.insert("foo1", "bar1");
primary_store.insert("foo2", "bar2");
primary_store.insert("foo3", "bar3");
primary_store.insert("foo4", "bar4");
primary_store.flush();
Option<std::vector<std::string>*> updates_op = primary_store.get_updates_since(2, 10);
ASSERT_FALSE(updates_op.ok());
ASSERT_EQ("Invalid iterator. Master's latest sequence number is 4 but updates are requested from sequence number 2. "
"The master's WAL entries might have expired (they are kept only for 24 hours).", updates_op.error());
}
TEST(StoreTest, Contains) {
std::string primary_store_path = "/tmp/typesense_test/primary_store_test";
LOG(INFO) << "Truncating and creating: " << primary_store_path;
system(("rm -rf "+primary_store_path+" && mkdir -p "+primary_store_path).c_str());
// add some records, flush and try to query
Store primary_store(primary_store_path, 0, 0, true); // disable WAL
primary_store.insert("foo1", "bar1");
primary_store.insert("foo2", "bar2");
primary_store.flush();
ASSERT_EQ(true, primary_store.contains("foo1"));
ASSERT_EQ(true, primary_store.contains("foo2"));
ASSERT_EQ(false, primary_store.contains("foo"));
ASSERT_EQ(false, primary_store.contains("foo3"));
// add more records without flushing and query again
primary_store.insert("foo3", "bar1");
primary_store.insert("foo4", "bar2");
primary_store.flush();
ASSERT_EQ(true, primary_store.contains("foo1"));
ASSERT_EQ(true, primary_store.contains("foo3"));
ASSERT_EQ(true, primary_store.contains("foo4"));
ASSERT_EQ(false, primary_store.contains("foo"));
ASSERT_EQ(false, primary_store.contains("foo5"));
}
| 6,803
|
C++
|
.cpp
| 134
| 45.328358
| 124
| 0.668324
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,701
|
collection_nested_fields_test.cpp
|
typesense_typesense/test/collection_nested_fields_test.cpp
|
#include <gtest/gtest.h>
#include <string>
#include <vector>
#include <fstream>
#include <algorithm>
#include <collection_manager.h>
#include "collection.h"
class CollectionNestedFieldsTest : public ::testing::Test {
protected:
Store* store;
CollectionManager& collectionManager = CollectionManager::get_instance();
std::atomic<bool> quit = false;
std::vector <std::string> query_fields;
std::vector <sort_by> sort_fields;
void setupCollection() {
std::string state_dir_path = "/tmp/typesense_test/collection_nested";
LOG(INFO) << "Truncating and creating: " << state_dir_path;
system(("rm -rf " + state_dir_path + " && mkdir -p " + state_dir_path).c_str());
store = new Store(state_dir_path);
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
}
virtual void SetUp() {
setupCollection();
}
virtual void TearDown() {
collectionManager.dispose();
delete store;
}
};
tsl::htrie_map<char, field> get_nested_map(const std::vector<field>& nested_fields) {
tsl::htrie_map<char, field> map;
for(const auto& f: nested_fields) {
map.emplace(f.name, f);
}
return map;
}
TEST_F(CollectionNestedFieldsTest, FlattenJSONObject) {
auto json_str = R"({
"company": {"name": "nike"},
"employees": { "num": 1200 },
"locations": [
{ "pincode": 100, "country": "USA",
"address": { "street": "One Bowerman Drive", "city": "Beaverton", "products": ["shoes", "tshirts"] }
},
{ "pincode": 200, "country": "Canada",
"address": { "street": "175 Commerce Valley", "city": "Thornhill", "products": ["sneakers", "shoes"] }
}
]}
)";
std::vector<field> nested_fields = {
field("locations", field_types::OBJECT_ARRAY, false)
};
// array of objects
std::vector<field> flattened_fields;
nlohmann::json doc = nlohmann::json::parse(json_str);
ASSERT_TRUE(field::flatten_doc(doc, get_nested_map(nested_fields), {}, false, flattened_fields).ok());
ASSERT_EQ(5, flattened_fields.size());
for(const auto& f: flattened_fields) {
ASSERT_TRUE(f.is_array());
}
auto expected_json = R"(
{
".flat": ["locations.address.city","locations.address.products","locations.address.street",
"locations.country", "locations.pincode"],
"company":{"name":"nike"},
"employees":{"num":1200},
"locations":[
{"address":{"city":"Beaverton","products":["shoes","tshirts"],
"street":"One Bowerman Drive"},"country":"USA","pincode":100},
{"address":{"city":"Thornhill","products":["sneakers","shoes"],
"street":"175 Commerce Valley"},"country":"Canada","pincode":200}
],
"locations.address.city":["Beaverton","Thornhill"],
"locations.address.products":["shoes","tshirts","sneakers","shoes"],
"locations.address.street":["One Bowerman Drive","175 Commerce Valley"],
"locations.country":["USA","Canada"],
"locations.pincode":[100,200]
}
)";
// handle order of generation differences between compilers (due to iteration of unordered map)
auto expected_flat_fields = doc[".flat"].get<std::vector<std::string>>();
std::sort(expected_flat_fields.begin(), expected_flat_fields.end());
doc[".flat"] = expected_flat_fields;
ASSERT_EQ(doc.dump(), nlohmann::json::parse(expected_json).dump());
// plain object
flattened_fields.clear();
doc = nlohmann::json::parse(json_str);
nested_fields = {
field("company", field_types::OBJECT, false)
};
ASSERT_TRUE(field::flatten_doc(doc, get_nested_map(nested_fields), {}, false, flattened_fields).ok());
expected_json = R"(
{
".flat": ["company.name"],
"company":{"name":"nike"},
"company.name":"nike",
"employees":{"num":1200},
"company.name":"nike",
"locations":[
{"address":{"city":"Beaverton","products":["shoes","tshirts"],
"street":"One Bowerman Drive"},"country":"USA","pincode":100},
{"address":{"city":"Thornhill","products":["sneakers","shoes"],"street":"175 Commerce Valley"},
"country":"Canada","pincode":200}
]
}
)";
ASSERT_EQ(doc.dump(), nlohmann::json::parse(expected_json).dump());
// plain object inside an array
flattened_fields.clear();
doc = nlohmann::json::parse(json_str);
nested_fields = {
field("locations.address", field_types::OBJECT, false)
};
ASSERT_FALSE(field::flatten_doc(doc, get_nested_map(nested_fields), {}, false, flattened_fields).ok()); // must be of type object_array
nested_fields = {
field("locations.address", field_types::OBJECT_ARRAY, false)
};
flattened_fields.clear();
ASSERT_TRUE(field::flatten_doc(doc, get_nested_map(nested_fields), {}, false, flattened_fields).ok());
expected_json = R"(
{
".flat": ["locations.address.city","locations.address.products","locations.address.street"],
"company":{"name":"nike"},
"employees":{"num":1200},
"locations":[
{"address":{"city":"Beaverton","products":["shoes","tshirts"],
"street":"One Bowerman Drive"},"country":"USA","pincode":100},
{"address":{"city":"Thornhill","products":["sneakers","shoes"],"street":"175 Commerce Valley"},
"country":"Canada","pincode":200}
],
"locations.address.city":["Beaverton","Thornhill"],
"locations.address.products":["shoes","tshirts","sneakers","shoes"],
"locations.address.street":["One Bowerman Drive","175 Commerce Valley"]
}
)";
// handle order of generation differences between compilers (due to iteration of unordered map)
expected_flat_fields = doc[".flat"].get<std::vector<std::string>>();
std::sort(expected_flat_fields.begin(), expected_flat_fields.end());
doc[".flat"] = expected_flat_fields;
ASSERT_EQ(doc.dump(), nlohmann::json::parse(expected_json).dump());
// primitive inside nested object
flattened_fields.clear();
doc = nlohmann::json::parse(json_str);
nested_fields = {
field("company.name", field_types::STRING, false)
};
ASSERT_TRUE(field::flatten_doc(doc, get_nested_map(nested_fields), {}, false, flattened_fields).ok());
expected_json = R"(
{
".flat": ["company.name"],
"company":{"name":"nike"},
"company.name":"nike",
"employees":{"num":1200},
"locations":[
{"address":{"city":"Beaverton","products":["shoes","tshirts"],
"street":"One Bowerman Drive"},"country":"USA","pincode":100},
{"address":{"city":"Thornhill","products":["sneakers","shoes"],"street":"175 Commerce Valley"},
"country":"Canada","pincode":200}
]
}
)";
ASSERT_EQ(doc.dump(), nlohmann::json::parse(expected_json).dump());
}
TEST_F(CollectionNestedFieldsTest, TestNestedArrayField) {
auto json_str = R"({
"company": {"name": "nike"},
"employees": {
"num": 1200,
"detail": {
"num_tags": 2,
"tags": ["plumber", "electrician"]
},
"details": [{
"num_tags": 2,
"tags": ["plumber", "electrician"]
}]
},
"locations": [
{ "pincode": 100, "country": "USA",
"address": { "street": "One Bowerman Drive", "city": "Beaverton", "products": ["shoes", "tshirts"] }
},
{ "pincode": 200, "country": "Canada",
"address": { "street": "175 Commerce Valley", "city": "Thornhill", "products": ["sneakers", "shoes"] }
}
]}
)";
std::vector<field> nested_fields = {
field("locations", field_types::OBJECT_ARRAY, false)
};
// array of objects
std::vector<field> flattened_fields;
nlohmann::json doc = nlohmann::json::parse(json_str);
ASSERT_TRUE(field::flatten_doc(doc, get_nested_map(nested_fields), {}, false, flattened_fields).ok());
ASSERT_EQ(5, flattened_fields.size());
for(const auto& f: flattened_fields) {
ASSERT_TRUE(f.is_array());
ASSERT_TRUE(f.nested_array);
}
flattened_fields.clear();
// test against whole object
nested_fields = {
field("employees", field_types::OBJECT, false)
};
ASSERT_TRUE(field::flatten_doc(doc, get_nested_map(nested_fields), {}, false, flattened_fields).ok());
ASSERT_EQ(5, flattened_fields.size());
for(const auto& f: flattened_fields) {
if(StringUtils::begins_with(f.name, "employees.details")) {
ASSERT_TRUE(f.nested_array);
} else {
ASSERT_FALSE(f.nested_array);
}
}
// test against deep paths
flattened_fields.clear();
doc = nlohmann::json::parse(json_str);
nested_fields = {
field("employees.details.num_tags", field_types::INT32_ARRAY, false),
field("employees.details.tags", field_types::STRING_ARRAY, false),
field("employees.detail.tags", field_types::STRING_ARRAY, false),
};
ASSERT_TRUE(field::flatten_doc(doc, get_nested_map(nested_fields), {}, false, flattened_fields).ok());
ASSERT_EQ(3, flattened_fields.size());
std::sort(flattened_fields.begin(), flattened_fields.end(), [](field& a, field& b) {
return a.name < b.name;
});
ASSERT_EQ("employees.detail.tags",flattened_fields[0].name);
ASSERT_FALSE(flattened_fields[0].nested_array);
ASSERT_EQ("employees.details.num_tags",flattened_fields[1].name);
ASSERT_TRUE(flattened_fields[1].nested_array);
ASSERT_EQ("employees.details.tags",flattened_fields[2].name);
ASSERT_TRUE(flattened_fields[2].nested_array);
}
TEST_F(CollectionNestedFieldsTest, FlattenJSONObjectHandleErrors) {
auto json_str = R"({
"company": {"name": "nike"},
"employees": { "num": 1200 }
})";
std::vector<field> nested_fields = {
field("locations", field_types::OBJECT_ARRAY, false)
};
std::vector<field> flattened_fields;
nlohmann::json doc = nlohmann::json::parse(json_str);
auto flatten_op = field::flatten_doc(doc, get_nested_map(nested_fields), {}, false, flattened_fields);
ASSERT_FALSE(flatten_op.ok());
ASSERT_EQ("Field `locations` not found.", flatten_op.error());
nested_fields = {
field("company", field_types::INT32, false)
};
flattened_fields.clear();
flatten_op = field::flatten_doc(doc, get_nested_map(nested_fields), {}, false, flattened_fields);
ASSERT_FALSE(flatten_op.ok());
ASSERT_EQ("Field `company` has an incorrect type.", flatten_op.error());
}
TEST_F(CollectionNestedFieldsTest, FlattenStoredDoc) {
auto stored_doc = R"({
"employees": { "num": 1200 },
"foo": "bar",
"details": [{"name": "foo", "year": 2000}]
})"_json;
tsl::htrie_map<char, field> schema;
schema.emplace("employees.num", field("employees.num", field_types::INT32, false));
schema.emplace("details.name", field("details.name", field_types::STRING_ARRAY, false));
schema.emplace("details.year", field("details.year", field_types::INT32_ARRAY, false));
std::vector<field> flattened_fields;
field::flatten_doc(stored_doc, schema, {}, true, flattened_fields);
ASSERT_EQ(3, stored_doc[".flat"].size());
ASSERT_EQ(7, stored_doc.size());
ASSERT_EQ(1, stored_doc.count("employees.num"));
ASSERT_EQ(1, stored_doc.count("details.name"));
ASSERT_EQ(1, stored_doc.count("details.year"));
}
TEST_F(CollectionNestedFieldsTest, CompactNestedFields) {
auto stored_doc = R"({
"company_name": "Acme Corp",
"display_address": {
"city": "LA",
"street": "Lumbard St"
},
"id": "314",
"location_addresses": [
{
"city": "Columbus",
"street": "Yale St"
},
{
"city": "Soda Springs",
"street": "5th St"
}
],
"num_employees": 10,
"primary_address": {
"city": "Los Angeles",
"street": "123 Lumbard St"
}
})"_json;
tsl::htrie_map<char, field> schema;
schema.emplace("location_addresses.city", field("location_addresses.city", field_types::STRING_ARRAY, true));
schema.emplace("location_addresses", field("location_addresses", field_types::OBJECT_ARRAY, true));
schema.emplace("primary_address", field("primary_address", field_types::OBJECT, true));
schema.emplace("primary_address.city", field("primary_address.city", field_types::STRING, true));
schema.emplace("location_addresses.street", field("location_addresses.street", field_types::STRING_ARRAY, true));
schema.emplace("primary_address.street", field("primary_address.street", field_types::STRING, true));
field::compact_nested_fields(schema);
ASSERT_EQ(2, schema.size());
ASSERT_EQ(1, schema.count("primary_address"));
ASSERT_EQ(1, schema.count("location_addresses"));
std::vector<field> flattened_fields;
field::flatten_doc(stored_doc, schema, {}, true, flattened_fields);
ASSERT_EQ(2, stored_doc["location_addresses.city"].size());
ASSERT_EQ(2, stored_doc["location_addresses.street"].size());
}
TEST_F(CollectionNestedFieldsTest, SearchOnFieldsOnWildcardSchema) {
std::vector<field> fields = {field(".*", field_types::AUTO, false, true)};
auto op = collectionManager.create_collection("coll1", 1, fields, "", 0, field_types::AUTO, {}, {}, true);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc = R"({
"id": "0",
"company": {"name": "Nike Inc."},
"employees": {
"num": 1200,
"tags": ["senior plumber", "electrician"]
},
"locations": [
{ "pincode": 100, "country": "USA",
"address": { "street": "One Bowerman Drive", "city": "Beaverton", "products": ["shoes", "tshirts"] }
},
{ "pincode": 200, "country": "Canada",
"address": { "street": "175 Commerce Valley", "city": "Thornhill", "products": ["sneakers", "shoes"] }
}
]
})"_json;
auto add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
nlohmann::json create_res = add_op.get();
ASSERT_EQ(doc.dump(), create_res.dump());
auto results = coll1->search("electrician", {"employees"}, "", {}, sort_fields,
{0}, 10, 1, FREQUENCY, {true}).get();
auto highlight_doc = R"({
"employees": {
"num": {
"matched_tokens": [],
"snippet": "1200"
},
"tags": [
{
"matched_tokens": [],
"snippet": "senior plumber"
},
{
"matched_tokens": [
"electrician"
],
"snippet": "<mark>electrician</mark>"
}
]
}
})"_json;
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(highlight_doc.dump(), results["hits"][0]["highlight"].dump());
// search both simply nested and deeply nested array-of-objects
results = coll1->search("electrician commerce", {"employees", "locations"}, "", {}, sort_fields,
{0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(doc, results["hits"][0]["document"]);
highlight_doc = R"({
"employees": {
"num": {
"matched_tokens": [],
"snippet": "1200"
},
"tags": [
{
"matched_tokens": [],
"snippet": "senior plumber"
},
{
"matched_tokens": [
"electrician"
],
"snippet": "<mark>electrician</mark>"
}
]
},
"locations": [
{
"address": {
"city": {
"matched_tokens": [],
"snippet": "Beaverton"
},
"products": [
{
"matched_tokens": [],
"snippet": "shoes"
},
{
"matched_tokens": [],
"snippet": "tshirts"
}
],
"street": {
"matched_tokens": [],
"snippet": "One Bowerman Drive"
}
},
"country": {
"matched_tokens": [],
"snippet": "USA"
},
"pincode": {
"matched_tokens": [],
"snippet": "100"
}
},
{
"address": {
"city": {
"matched_tokens": [],
"snippet": "Thornhill"
},
"products": [
{
"matched_tokens": [],
"snippet": "sneakers"
},
{
"matched_tokens": [],
"snippet": "shoes"
}
],
"street": {
"matched_tokens": [
"Commerce"
],
"snippet": "175 <mark>Commerce</mark> Valley"
}
},
"country": {
"matched_tokens": [],
"snippet": "Canada"
},
"pincode": {
"matched_tokens": [],
"snippet": "200"
}
}
]
})"_json;
// ensure that flat fields are not returned in response
ASSERT_EQ(0, results["hits"][0].count(".flat"));
ASSERT_EQ(0, results["hits"][0].count("employees.tags"));
// raw document in the store will not have the .flat meta key or actual flat fields
nlohmann::json raw_doc;
coll1->get_document_from_store(0, raw_doc, true);
ASSERT_EQ(0, raw_doc.count(".flat"));
ASSERT_EQ(0, raw_doc.count("employees.tags"));
ASSERT_EQ(4, raw_doc.size());
ASSERT_EQ(highlight_doc.dump(), results["hits"][0]["highlight"].dump());
ASSERT_EQ(0, results["hits"][0]["highlights"].size());
// after update also the flat fields or meta should not be present on disk
doc["employees"]["tags"][0] = "senior plumber 2";
auto update_op = coll1->add(doc.dump(), UPSERT);
ASSERT_TRUE(add_op.ok());
raw_doc.clear();
coll1->get_document_from_store(0, raw_doc, true);
ASSERT_EQ(0, raw_doc.count(".flat"));
ASSERT_EQ(0, raw_doc.count("employees.tags"));
ASSERT_EQ(4, raw_doc.size());
// search specific nested fields, only matching field is highlighted by default
results = coll1->search("one shoe", {"locations.address.street", "employees.tags"}, "", {}, sort_fields,
{0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(doc, results["hits"][0]["document"]);
highlight_doc = R"({
"locations":[
{
"address":{
"street":{
"matched_tokens":[
"One"
],
"snippet":"<mark>One</mark> Bowerman Drive"
}
}
},
{
"address":{
"street":{
"matched_tokens":[],
"snippet":"175 Commerce Valley"
}
}
}
]
})"_json;
ASSERT_EQ(highlight_doc.dump(), results["hits"][0]["highlight"].dump());
ASSERT_EQ(0, results["hits"][0]["highlights"].size());
// try to search nested fields that don't exist
auto res_op = coll1->search("one shoe", {"locations.address.str"}, "", {}, sort_fields,
{0}, 10, 1, FREQUENCY, {true});
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Could not find a field named `locations.address.str` in the schema.", res_op.error());
res_op = coll1->search("one shoe", {"locations.address.foo"}, "", {}, sort_fields,
{0}, 10, 1, FREQUENCY, {true});
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Could not find a field named `locations.address.foo` in the schema.", res_op.error());
res_op = coll1->search("one shoe", {"locations.foo.street"}, "", {}, sort_fields,
{0}, 10, 1, FREQUENCY, {true});
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Could not find a field named `locations.foo.street` in the schema.", res_op.error());
}
TEST_F(CollectionNestedFieldsTest, IncludeExcludeFieldsPruning) {
auto doc_str = R"({
"company": {"name": "Nike Inc."},
"employees": {
"num": 1200,
"tags": ["senior plumber", "electrician"]
},
"employee": true,
"locations": [
{ "pincode": 100, "country": "USA",
"address": { "street": "One Bowerman Drive", "city": "Beaverton", "products": ["shoes", "tshirts"] }
},
{ "pincode": 200, "country": "Canada",
"address": { "street": "175 Commerce Valley", "city": "Thornhill", "products": ["sneakers", "shoes"] }
}
],
"one_obj_arr": [{"foo": "bar"}]
})";
auto doc = nlohmann::json::parse(doc_str);
Collection::prune_doc(doc, tsl::htrie_set<char>(), {"one_obj_arr.foo"});
ASSERT_EQ(1, doc.count("one_obj_arr"));
ASSERT_EQ(1, doc["one_obj_arr"].size());
// handle non-existing exclude field
doc = nlohmann::json::parse(doc_str);
Collection::prune_doc(doc, {"employees.num", "employees.tags"}, {"foobar"});
ASSERT_EQ(1, doc.size());
ASSERT_EQ(1, doc.count("employees"));
ASSERT_EQ(2, doc["employees"].size());
// select a specific field within nested array object
doc = nlohmann::json::parse(doc_str);
Collection::prune_doc(doc, {"locations.address.city"}, tsl::htrie_set<char>());
ASSERT_EQ(R"({"locations":[{"address":{"city":"Beaverton"}},{"address":{"city":"Thornhill"}}]})", doc.dump());
// select 2 fields within nested array object
doc = nlohmann::json::parse(doc_str);
Collection::prune_doc(doc, {"locations.address.city", "locations.address.products"}, tsl::htrie_set<char>());
ASSERT_EQ(R"({"locations":[{"address":{"city":"Beaverton","products":["shoes","tshirts"]}},{"address":{"city":"Thornhill","products":["sneakers","shoes"]}}]})", doc.dump());
// exclusion takes preference
doc = nlohmann::json::parse(doc_str);
Collection::prune_doc(doc, {"locations.address.city"}, {"locations.address.city"});
ASSERT_EQ(R"({"locations":[{},{}]})", doc.dump());
// include object, exclude sub-fields
doc = nlohmann::json::parse(doc_str);
Collection::prune_doc(doc, {"locations.address.city", "locations.address.products"}, {"locations.address.city"});
ASSERT_EQ(R"({"locations":[{"address":{"products":["shoes","tshirts"]}},{"address":{"products":["sneakers","shoes"]}}]})", doc.dump());
}
TEST_F(CollectionNestedFieldsTest, ShouldNotPruneEmptyFields) {
auto doc_str = R"({
"name": "Foo",
"obj": {},
"obj_arr": [{}],
"price": {
"per_unit": {},
"items": [{}]
}
})";
auto doc = nlohmann::json::parse(doc_str);
auto expected_doc = doc;
Collection::prune_doc(doc, tsl::htrie_set<char>(), tsl::htrie_set<char>());
ASSERT_EQ(expected_doc.dump(), doc.dump());
}
TEST_F(CollectionNestedFieldsTest, IncludeFieldsSearch) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "name", "type": "object" }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"name": {"first": "John", "last": "Smith"}
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto results = coll1->search("*", {},
"", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, {"name.first"},
spp::sparse_hash_set<std::string>(), 10, "", 30, 4).get();
ASSERT_EQ(1, results["hits"][0]["document"].size());
ASSERT_EQ(1, results["hits"][0]["document"].count("name"));
ASSERT_EQ(1, results["hits"][0]["document"]["name"].size());
}
TEST_F(CollectionNestedFieldsTest, HighlightNestedFieldFully) {
std::vector<field> fields = {field(".*", field_types::AUTO, false, true)};
auto op = collectionManager.create_collection("coll1", 1, fields, "", 0, field_types::AUTO, {}, {}, true);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc = R"({
"company_names": ["Space Corp. LLC", "Drive One Inc."],
"company": {"names": ["Space Corp. LLC", "Drive One Inc."]},
"locations": [
{ "pincode": 100, "country": "USA",
"address": { "street": "One Bowerman Drive", "city": "Beaverton", "products": ["shoes", "tshirts"] }
},
{ "pincode": 200, "country": "Canada",
"address": { "street": "175 Commerce Drive", "city": "Thornhill", "products": ["sneakers", "shoes"] }
}
]
})"_json;
auto add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto results = coll1->search("One", {"locations.address"}, "", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "locations.address").get();
ASSERT_EQ(1, results["hits"].size());
auto highlight_doc = R"({
"locations":[
{
"address":{
"city":{
"matched_tokens":[
],
"snippet":"Beaverton",
"value":"Beaverton"
},
"products":[
{
"matched_tokens":[
],
"snippet":"shoes",
"value":"shoes"
},
{
"matched_tokens":[
],
"snippet":"tshirts",
"value":"tshirts"
}
],
"street":{
"matched_tokens":[
"One"
],
"snippet":"<mark>One</mark> Bowerman Drive",
"value":"<mark>One</mark> Bowerman Drive"
}
}
},
{
"address":{
"city":{
"matched_tokens":[
],
"snippet":"Thornhill",
"value":"Thornhill"
},
"products":[
{
"matched_tokens":[
],
"snippet":"sneakers",
"value":"sneakers"
},
{
"matched_tokens":[
],
"snippet":"shoes",
"value":"shoes"
}
],
"street":{
"matched_tokens":[
],
"snippet":"175 Commerce Drive",
"value":"175 Commerce Drive"
}
}
}
]
})"_json;
ASSERT_EQ(highlight_doc.dump(), results["hits"][0]["highlight"].dump());
ASSERT_EQ(0, results["hits"][0]["highlights"].size());
// repeating token
results = coll1->search("drive", {"locations.address"}, "", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "locations.address").get();
ASSERT_EQ(1, results["hits"].size());
highlight_doc = R"({
"locations":[
{
"address":{
"city":{
"matched_tokens":[
],
"snippet":"Beaverton",
"value":"Beaverton"
},
"products":[
{
"matched_tokens":[
],
"snippet":"shoes",
"value":"shoes"
},
{
"matched_tokens":[
],
"snippet":"tshirts",
"value":"tshirts"
}
],
"street":{
"matched_tokens":[
"Drive"
],
"snippet":"One Bowerman <mark>Drive</mark>",
"value":"One Bowerman <mark>Drive</mark>"
}
}
},
{
"address":{
"city":{
"matched_tokens":[
],
"snippet":"Thornhill",
"value":"Thornhill"
},
"products":[
{
"matched_tokens":[
],
"snippet":"sneakers",
"value":"sneakers"
},
{
"matched_tokens":[
],
"snippet":"shoes",
"value":"shoes"
}
],
"street":{
"matched_tokens":[
"Drive"
],
"snippet":"175 Commerce <mark>Drive</mark>",
"value":"175 Commerce <mark>Drive</mark>"
}
}
}
]
})"_json;
ASSERT_EQ(highlight_doc.dump(), results["hits"][0]["highlight"].dump());
ASSERT_EQ(0, results["hits"][0]["highlights"].size());
// nested array of array, highlighting parent of searched nested field
results = coll1->search("shoes", {"locations.address.products"}, "", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "locations.address",
20, {}, {}, {}, 0, "<mark>", "</mark>", {}, 1000, true, false, true,
"locations.address").get();
ASSERT_EQ(1, results["hits"].size());
highlight_doc = R"({
"locations":[
{
"address":{
"city":{
"matched_tokens":[
],
"snippet":"Beaverton",
"value":"Beaverton"
},
"products":[
{
"matched_tokens":[
"shoes"
],
"snippet":"<mark>shoes</mark>",
"value":"<mark>shoes</mark>"
},
{
"matched_tokens":[
],
"snippet":"tshirts",
"value":"tshirts"
}
],
"street":{
"matched_tokens":[
],
"snippet":"One Bowerman Drive",
"value":"One Bowerman Drive"
}
}
},
{
"address":{
"city":{
"matched_tokens":[
],
"snippet":"Thornhill",
"value":"Thornhill"
},
"products":[
{
"matched_tokens":[
],
"snippet":"sneakers",
"value":"sneakers"
},
{
"matched_tokens":[
"shoes"
],
"snippet":"<mark>shoes</mark>",
"value":"<mark>shoes</mark>"
}
],
"street":{
"matched_tokens":[
],
"snippet":"175 Commerce Drive",
"value":"175 Commerce Drive"
}
}
}
]
})"_json;
ASSERT_EQ(highlight_doc.dump(), results["hits"][0]["highlight"].dump());
// full highlighting only one of the 3 highlight fields
results = coll1->search("drive", {"company.names", "company_names", "locations.address"}, "", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "locations.address",
20, {}, {}, {}, 0, "<mark>", "</mark>", {}, 1000, true, false, true,
"company.names,company_names,locations.address").get();
highlight_doc = R"({
"company": {
"names": [
{
"matched_tokens": [],
"snippet": "Space Corp. LLC"
},
{
"matched_tokens": [
"Drive"
],
"snippet": "<mark>Drive</mark> One Inc."
}
]
},
"company_names": [
{
"matched_tokens": [],
"snippet": "Space Corp. LLC"
},
{
"matched_tokens": [
"Drive"
],
"snippet": "<mark>Drive</mark> One Inc."
}
],
"locations": [
{
"address": {
"city": {
"matched_tokens": [],
"snippet": "Beaverton",
"value": "Beaverton"
},
"products": [
{
"matched_tokens": [],
"snippet": "shoes",
"value": "shoes"
},
{
"matched_tokens": [],
"snippet": "tshirts",
"value": "tshirts"
}
],
"street": {
"matched_tokens": [
"Drive"
],
"snippet": "One Bowerman <mark>Drive</mark>",
"value": "One Bowerman <mark>Drive</mark>"
}
}
},
{
"address": {
"city": {
"matched_tokens": [],
"snippet": "Thornhill",
"value": "Thornhill"
},
"products": [
{
"matched_tokens": [],
"snippet": "sneakers",
"value": "sneakers"
},
{
"matched_tokens": [],
"snippet": "shoes",
"value": "shoes"
}
],
"street": {
"matched_tokens": [
"Drive"
],
"snippet": "175 Commerce <mark>Drive</mark>",
"value": "175 Commerce <mark>Drive</mark>"
}
}
}
]
})"_json;
ASSERT_EQ(highlight_doc.dump(), results["hits"][0]["highlight"].dump());
// if highlight fields not provided, only matching sub-fields should appear in highlight
results = coll1->search("space", {"company.names", "company_names", "locations.address"}, "", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4).get();
highlight_doc = R"({
"company":{
"names":[
{
"matched_tokens":[
"Space"
],
"snippet":"<mark>Space</mark> Corp. LLC"
},
{
"matched_tokens":[],
"snippet":"Drive One Inc."
}
]
},
"company_names":[
{
"matched_tokens":[
"Space"
],
"snippet":"<mark>Space</mark> Corp. LLC"
},
{
"matched_tokens":[],
"snippet":"Drive One Inc."
}
]
})"_json;
ASSERT_EQ(highlight_doc.dump(), results["hits"][0]["highlight"].dump());
// only a single highlight full field provided
results = coll1->search("space", {"company.names", "company_names", "locations.address"}, "", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "company.names").get();
highlight_doc = R"({
"company": {
"names": [
{
"matched_tokens": [
"Space"
],
"snippet": "<mark>Space</mark> Corp. LLC",
"value": "<mark>Space</mark> Corp. LLC"
},
{
"matched_tokens": [],
"snippet": "Drive One Inc.",
"value": "Drive One Inc."
}
]
},
"company_names": [
{
"matched_tokens": [
"Space"
],
"snippet": "<mark>Space</mark> Corp. LLC"
},
{
"matched_tokens": [],
"snippet": "Drive One Inc."
}
]
})"_json;
ASSERT_EQ(highlight_doc.dump(), results["hits"][0]["highlight"].dump());
// try to highlight `id` field
results = coll1->search("shoes", {"locations.address.products"}, "", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "id",
20, {}, {}, {}, 0, "<mark>", "</mark>", {}, 1000, true, false, true,
"id").get();
ASSERT_TRUE(results["hits"][0]["highlight"].empty());
}
TEST_F(CollectionNestedFieldsTest, FieldsWithExplicitSchema) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "details", "type": "object", "optional": false },
{"name": "company.name", "type": "string", "optional": false, "facet": true },
{"name": "locations", "type": "object[]", "optional": false }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
nlohmann::json coll_summary = coll1->get_summary_json();
ASSERT_EQ(1, coll_summary.count("enable_nested_fields"));
for(auto& f: coll_summary["fields"]) {
ASSERT_EQ(0, f.count(fields::nested));
ASSERT_EQ(0, f.count(fields::nested_array));
}
auto doc = R"({
"company_names": ["Quick brown fox jumped.", "The red fox was not fast."],
"details": {
"description": "Quick set, go.",
"names": ["Quick brown fox jumped.", "The red fox was not fast."]
},
"company": {"name": "Quick and easy fix."},
"locations": [
{
"address": { "street": "Brown Shade Avenue" }
},
{
"address": { "street": "Graywolf Lane" }
}
]
})"_json;
auto add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
ASSERT_TRUE(coll1->get_schema()["company.name"].facet);
ASSERT_FALSE(coll1->get_schema()["company.name"].optional);
// search both simply nested and deeply nested array-of-objects
auto results = coll1->search("brown fox", {"details", "locations"},
"", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4).get();
auto snippet_doc = R"({
"details": {
"description": {
"matched_tokens": [],
"snippet": "Quick set, go."
},
"names": [
{
"matched_tokens": [
"brown",
"fox"
],
"snippet": "Quick <mark>brown</mark> <mark>fox</mark> jumped."
},
{
"matched_tokens": [
"fox"
],
"snippet": "The red <mark>fox</mark> was not fast."
}
]
},
"locations": [
{
"address": {
"street": {
"matched_tokens": [
"Brown"
],
"snippet": "<mark>Brown</mark> Shade Avenue"
}
}
},
{
"address": {
"street": {
"matched_tokens": [],
"snippet": "Graywolf Lane"
}
}
}
]
})"_json;
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(snippet_doc.dump(), results["hits"][0]["highlight"].dump());
results = coll1->search("fix", {"company.name"},
"", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4).get();
ASSERT_EQ(1, results["hits"].size());
// explicit nested array field (locations.address.street)
schema = R"({
"name": "coll2",
"enable_nested_fields": true,
"fields": [
{"name": "details", "type": "object", "optional": false },
{"name": "company.name", "type": "string", "optional": false },
{"name": "locations.address.street", "type": "string[]", "optional": false }
]
})"_json;
op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll2 = op.get();
add_op = coll2->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
results = coll2->search("brown", {"locations.address.street"},
"", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4).get();
ASSERT_EQ(1, results["hits"].size());
snippet_doc = R"({
"locations": [
{
"address": {
"street": {
"matched_tokens": [
"Brown"
],
"snippet": "<mark>Brown</mark> Shade Avenue"
}
}
},
{
"address": {
"street": {
"matched_tokens": [],
"snippet": "Graywolf Lane"
}
}
}
]
})"_json;
ASSERT_EQ(snippet_doc.dump(), results["hits"][0]["highlight"].dump());
// explicit partial array object field in the schema
schema = R"({
"name": "coll3",
"enable_nested_fields": true,
"fields": [
{"name": "details", "type": "object", "optional": false },
{"name": "company.name", "type": "string", "optional": false },
{"name": "locations.address", "type": "object[]", "optional": false }
]
})"_json;
op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll3 = op.get();
add_op = coll3->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
results = coll3->search("brown", {"locations.address"},
"", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4).get();
ASSERT_EQ(1, results["hits"].size());
snippet_doc = R"({
"locations": [
{
"address": {
"street": {
"matched_tokens": [
"Brown"
],
"snippet": "<mark>Brown</mark> Shade Avenue"
}
}
},
{
"address": {
"street": {
"matched_tokens": [],
"snippet": "Graywolf Lane"
}
}
}
]
})"_json;
ASSERT_EQ(snippet_doc.dump(), results["hits"][0]["highlight"].dump());
// non-optional object field validation (details)
auto doc2 = R"({
"company_names": ["Quick brown fox jumped.", "The red fox was not fast."],
"company": {"name": "Quick and easy fix."},
"locations": [
{
"address": { "street": "Foo bar street" }
}
]
})"_json;
add_op = coll3->add(doc2.dump(), CREATE);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `details` not found.", add_op.error());
// check fields and their properties
auto coll_fields = coll1->get_fields();
ASSERT_EQ(6, coll_fields.size());
for(size_t i = 0; i < coll_fields.size(); i++) {
auto& coll_field = coll_fields[i];
if(i <= 2) {
// original 3 explicit fields will be non-optional, but the sub-properties will be optional
ASSERT_FALSE(coll_field.optional);
} else {
ASSERT_TRUE(coll_field.optional);
}
}
// deleting doc from coll1 and try querying again
coll1->remove("0");
results = coll1->search("brown", {"locations.address"},
"", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4).get();
ASSERT_EQ(0, results["found"].get<size_t>());
// use remove_if_found API
coll2->remove_if_found(0);
results = coll2->search("brown", {"locations.address"},
"", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4).get();
ASSERT_EQ(0, results["found"].get<size_t>());
}
TEST_F(CollectionNestedFieldsTest, ExplicitSchemaOptionalFieldValidation) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "details", "type": "object", "optional": true },
{"name": "company.name", "type": "string", "optional": true },
{"name": "locations", "type": "object[]", "optional": true },
{"name": "blocks.text.description", "type": "string[]", "optional": true }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
// when a nested field is null it should be allowed
auto doc1 = R"({
"company": {"name": null}
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
// check the same with nested array type
doc1 = R"({
"blocks": {"text": [{"description": null}]}
})"_json;
add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
// no optional field is present and that should be allowed
doc1 = R"({
"foo": "bar"
})"_json;
add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
// some parts of an optional field is present in a subsequent doc indexed
auto doc2 = R"({
"details": {"name": "foo"}
})"_json;
add_op = coll1->add(doc2.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto doc3 = R"({
"details": {"age": 30}
})"_json;
add_op = coll1->add(doc3.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
// check fields and their properties
auto coll_fields = coll1->get_fields();
ASSERT_EQ(6, coll_fields.size());
for(auto& coll_field : coll_fields) {
ASSERT_TRUE(coll_field.optional);
}
}
TEST_F(CollectionNestedFieldsTest, ExplicitSchemaForNestedArrayTypeValidation) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "blocks.text", "type": "object[]"},
{"name": "blocks.text.description", "type": "string"}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"blocks": {"text": [{"description": "Hello world."}]}
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `blocks.text.description` has an incorrect type. "
"Hint: field inside an array of objects must be an array type as well.", add_op.error());
}
TEST_F(CollectionNestedFieldsTest, NestedStringArrayHighlight) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "passages", "type": "object[]"},
{"name": "passages.text", "type": "string[]"}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc_str = std::string (R"({"passages": [{"text": "In January 1880, two of Tesla's uncles put together enough money to help him )") +
"leave Gospić for Prague, where he was to study. He arrived too late to enroll at Charles-Ferdinand " +
"University; he had never studied Greek, a required subject; and he was illiterate in Czech, another " +
"required subject. Tesla did, however, attend lectures in philosophy at the university as an auditor " +
"but he did not receive grades for the courses." + R"("}]})";
auto doc1 = nlohmann::json::parse(doc_str);
coll1->add(doc1.dump(), CREATE);
auto results = coll1->search("grades", {"passages.text"},
"", {}, {}, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ("he did not receive <mark>grades</mark> for the courses.",
results["hits"][0]["highlight"]["passages"][0]["text"]["snippet"].get<std::string>());
}
TEST_F(CollectionNestedFieldsTest, OptionalNestedOptionalOjectArrStringField) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"facet":true,"name":"data","optional":false,"type":"object"},
{"facet":false,"name":"data.locations.stateShort","optional":true,"type":"string[]"}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"data": {
"locations": [
{
"stateShort": null
}
]
}
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
doc1 = R"({
"data": {
"locations": [
{
"stateShort": null
},
{
"stateShort": "NY"
}
]
}
})"_json;
coll1->add(doc1.dump(), CREATE);
auto results = coll1->search("ny", {"data.locations.stateShort"},
"", {}, {}, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4).get();
ASSERT_EQ(1, results["found"].get<size_t>());
}
TEST_F(CollectionNestedFieldsTest, OptionalNestedNonOptionalOjectArrStringField) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"facet":true,"name":"data","type":"object"},
{"facet":false,"name":"data.locations.stateShort","type":"string[]"}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"data": {
"locations": [
{
"stateShort": null
}
]
}
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `data.locations.stateShort` has been declared in the schema, but is not found in the document.",
add_op.error());
doc1 = R"({
"data": {
"locations": [
{
"stateShort": null
},
{
"stateShort": "NY"
}
]
}
})"_json;
coll1->add(doc1.dump(), CREATE);
auto results = coll1->search("ny", {"data.locations.stateShort"},
"", {}, {}, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4).get();
ASSERT_EQ(1, results["found"].get<size_t>());
}
TEST_F(CollectionNestedFieldsTest, UnindexedNestedFieldShouldNotClutterSchema) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "block", "type": "object", "optional": true, "index": false}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"block": {"text": "Hello world."}
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
// child fields should not become part of schema
ASSERT_EQ(1, coll1->get_fields().size());
}
TEST_F(CollectionNestedFieldsTest, UnindexedNonOptionalFieldShouldBeAllowed) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "block", "type": "object", "index": false}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"block": {"text": "Hello world."}
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
// child fields should not become part of schema
ASSERT_EQ(1, coll1->get_fields().size());
}
TEST_F(CollectionNestedFieldsTest, SortByNestedField) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "details", "type": "object", "optional": false },
{"name": "company.num_employees", "type": "int32", "optional": false }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"details": {"count": 1000},
"company": {"num_employees": 2000}
})"_json;
auto doc2 = R"({
"details": {"count": 2000},
"company": {"num_employees": 1000}
})"_json;
ASSERT_TRUE(coll1->add(doc1.dump(), CREATE).ok());
ASSERT_TRUE(coll1->add(doc2.dump(), CREATE).ok());
std::vector<sort_by> sort_fields = { sort_by("details.count", "ASC") };
auto results = coll1->search("*", {},
"", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][1]["document"]["id"].get<std::string>());
sort_fields = { sort_by("company.num_employees", "ASC") };
results = coll1->search("*", {},
"", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
// with auto schema
schema = R"({
"name": "coll2",
"enable_nested_fields": true,
"fields": [
{"name": ".*", "type": "auto"}
]
})"_json;
op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll2 = op.get();
ASSERT_TRUE(coll2->add(doc1.dump(), CREATE).ok());
ASSERT_TRUE(coll2->add(doc2.dump(), CREATE).ok());
sort_fields = { sort_by("details.count", "ASC") };
results = coll2->search("*", {},
"", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][1]["document"]["id"].get<std::string>());
sort_fields = { sort_by("company.num_employees", "ASC") };
results = coll2->search("*", {},
"", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
}
TEST_F(CollectionNestedFieldsTest, OnlyExplcitSchemaFieldMustBeIndexedInADoc) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "company.num_employees", "type": "int32", "optional": false },
{"name": "company.founded", "type": "int32", "optional": false }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"company": {"num_employees": 2000, "founded": 1976, "year": 2000}
})"_json;
auto create_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(create_op.ok());
auto fs = coll1->get_fields();
ASSERT_EQ(2, coll1->get_fields().size());
}
TEST_F(CollectionNestedFieldsTest, VerifyDisableOfNestedFields) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": ".*", "type": "auto"}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"company": {"num_employees": 2000, "founded": 1976, "year": 2000},
"company_num_employees": 2000,
"company_founded": 1976
})"_json;
ASSERT_TRUE(coll1->add(doc1.dump(), CREATE).ok());
auto fs = coll1->get_fields();
ASSERT_EQ(3, coll1->get_fields().size());
// explicit schema
schema = R"({
"name": "coll2",
"fields": [
{"name": "company_num_employees", "type": "int32"},
{"name": "company_founded", "type": "int32"}
]
})"_json;
op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll2 = op.get();
ASSERT_TRUE(coll2->add(doc1.dump(), CREATE).ok());
fs = coll2->get_fields();
ASSERT_EQ(2, coll2->get_fields().size());
}
TEST_F(CollectionNestedFieldsTest, ExplicitDotSeparatedFieldsShouldHavePrecendence) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": ".*", "type": "auto"}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"company": {"num_employees": 1000, "ids": [1,2]},
"details": [{"name": "bar"}],
"company.num_employees": 2000,
"company.ids": [10],
"details.name": "foo"
})"_json;
ASSERT_TRUE(coll1->add(doc1.dump(), CREATE).ok());
auto fs = coll1->get_fields();
ASSERT_EQ(6, coll1->get_fields().size());
// simple nested object
auto results = coll1->search("*", {}, "company.num_employees: 2000", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
results = coll1->search("*", {}, "company.num_employees: 1000", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}).get();
ASSERT_EQ(0, results["found"].get<size_t>());
// nested array object
results = coll1->search("foo", {"details.name"}, "", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
results = coll1->search("bar", {"details.name"}, "", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}).get();
ASSERT_EQ(0, results["found"].get<size_t>());
// nested simple array
results = coll1->search("*", {}, "company.ids: 10", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
results = coll1->search("*", {}, "company.ids: 1", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}).get();
ASSERT_EQ(0, results["found"].get<size_t>());
// WITH EXPLICIT SCHEMA
schema = R"({
"name": "coll2",
"enable_nested_fields": true,
"fields": [
{"name": "company.num_employees", "type": "int32"},
{"name": "company.ids", "type": "int32[]"},
{"name": "details.name", "type": "string[]"}
]
})"_json;
op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll2 = op.get();
auto doc2 = R"({
"company": {"num_employees": 1000, "ids": [1,2]},
"details": [{"name": "bar"}],
"company.num_employees": 2000,
"company.ids": [10],
"details.name": ["foo"]
})"_json;
ASSERT_TRUE(coll2->add(doc2.dump(), CREATE).ok());
// simple nested object
results = coll2->search("*", {}, "company.num_employees: 2000", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
results = coll2->search("*", {}, "company.num_employees: 1000", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}).get();
ASSERT_EQ(0, results["found"].get<size_t>());
// nested array object
results = coll2->search("foo", {"details.name"}, "", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
results = coll2->search("bar", {"details.name"}, "", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}).get();
ASSERT_EQ(0, results["found"].get<size_t>());
// nested simple array
results = coll2->search("*", {}, "company.ids: 10", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
results = coll2->search("*", {}, "company.ids: 1", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}).get();
ASSERT_EQ(0, results["found"].get<size_t>());
}
TEST_F(CollectionNestedFieldsTest, NestedFieldWithExplicitWeight) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": ".*", "type": "auto"}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"id": "0",
"company": {"num_employees": 2000, "founded": 1976},
"studies": [{"name": "College 1", "location": "USA"}]
})"_json;
ASSERT_TRUE(coll1->add(doc1.dump(), CREATE).ok());
auto results = coll1->search("college", {"studies"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 0,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "category", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {1}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
}
TEST_F(CollectionNestedFieldsTest, ObjectArrayAllowEmpty) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "addresses", "type": "object[]"}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"addresses": []
})"_json;
ASSERT_TRUE(coll1->add(doc1.dump(), CREATE).ok());
doc1 = R"({
"addresses": [{"street": "foobar"}]
})"_json;
ASSERT_TRUE(coll1->add(doc1.dump(), CREATE).ok());
doc1 = R"({
"addresses": []
})"_json;
ASSERT_TRUE(coll1->add(doc1.dump(), CREATE).ok());
}
TEST_F(CollectionNestedFieldsTest, NestedFieldWithGeopointArray) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "addresses.geoPoint", "type": "geopoint[]"}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"addresses": [{"geoPoint": [1.91, 23.5]}, {"geoPoint": [12.91, 23.5]}]
})"_json;
ASSERT_TRUE(coll1->add(doc1.dump(), CREATE).ok());
auto results = coll1->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 0).get();
ASSERT_EQ(1, results["found"].get<size_t>());
results = coll1->search("*", {}, "addresses.geoPoint: (12.911, 23.5, 1 mi)",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(1, results["found"].get<size_t>());
// with nested geopoint array
auto doc2 = R"({
"addresses": [{"geoPoint": [[1.91, 23.5]]}, {"geoPoint": [[1.91, 23.5], [1.95, 24.5]]}]
})"_json;
ASSERT_TRUE(coll1->add(doc2.dump(), CREATE).ok());
results = coll1->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 0).get();
ASSERT_EQ(2, results["found"].get<size_t>());
// simply nested geopoint array
auto doc3 = R"({
"addresses": {"geoPoint": [[1.91, 23.5]]}
})"_json;
ASSERT_TRUE(coll1->add(doc3.dump(), CREATE).ok());
results = coll1->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 0).get();
ASSERT_EQ(3, results["found"].get<size_t>());
// simply nested geopoint
// this technically cannot be allowed but it's really tricky to detect so we allow
auto doc4 = R"({
"addresses": {"geoPoint": [1.91, 23.5]}
})"_json;
auto simple_geopoint_op = coll1->add(doc4.dump(), CREATE);
ASSERT_TRUE(simple_geopoint_op.ok());
// data validation
auto bad_doc = R"({
"addresses": [{"geoPoint": [1.91, "x"]}]
})"_json;
auto create_op = coll1->add(bad_doc.dump(), CREATE);
ASSERT_FALSE(create_op.ok());
ASSERT_EQ("Field `addresses.geoPoint` has an incorrect type.", create_op.error());
bad_doc = R"({
"addresses": [{"geoPoint": [[1.91, "x"]]}]
})"_json;
create_op = coll1->add(bad_doc.dump(), CREATE);
ASSERT_FALSE(create_op.ok());
ASSERT_EQ("Field `addresses.geoPoint` must be an array of geopoint.", create_op.error());
}
TEST_F(CollectionNestedFieldsTest, NestedFieldWithGeopoint) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "address.geoPoint", "type": "geopoint"}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({"address": {"geoPoint": [19.07283, 72.88261]}})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto results = coll1->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 0).get();
ASSERT_EQ(1, results["found"].get<size_t>());
results = coll1->search("*", {}, "address.geoPoint: (19.07, 72.882, 1 mi)",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(1, results["found"].get<size_t>());
// data validation
//with integer values
doc1 = R"({"address": {"geoPoint": [19, 72.88261]}})"_json;
add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
doc1 = R"({"address": {"geoPoint": [19.12, 72]}})"_json;
add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
doc1 = R"({"address": {"geoPoint": [19, 72]}})"_json;
add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto bad_doc = R"({
"address": {"geoPoint": [1.91, "x"]}
})"_json;
auto create_op = coll1->add(bad_doc.dump(), CREATE);
ASSERT_FALSE(create_op.ok());
ASSERT_EQ("Field `address.geoPoint` has an incorrect type.", create_op.error());
bad_doc = R"({
"address": {"geoPoint": [[1.91, "x"]]}
})"_json;
create_op = coll1->add(bad_doc.dump(), CREATE);
ASSERT_FALSE(create_op.ok());
ASSERT_EQ("Field `address.geoPoint` must be a 2 element array: [lat, lng].", create_op.error());
// with nested array field
bad_doc = R"({
"address": [
{"geoPoint": [1.91, 2.56]},
{"geoPoint": [2.91, 3.56]}
]
})"_json;
create_op = coll1->add(bad_doc.dump(), CREATE);
ASSERT_FALSE(create_op.ok());
ASSERT_EQ("Field `address.geoPoint` has an incorrect type. "
"Hint: field inside an array of objects must be an array type as well.", create_op.error());
}
TEST_F(CollectionNestedFieldsTest, NestedFieldWithParentAndChildSchema) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "addresses.street", "type": "string[]"},
{"name": "addresses", "type": "object[]"}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
// only parent field should be present
ASSERT_EQ(1, coll1->get_nested_fields().size());
ASSERT_EQ(1, coll1->get_nested_fields().count("addresses"));
// with children after object field
schema = R"({
"name": "coll2",
"enable_nested_fields": true,
"fields": [
{"name": "addresses", "type": "object[]"},
{"name": "addresses.street", "type": "string[]"}
]
})"_json;
op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll2 = op.get();
ASSERT_EQ(1, coll2->get_nested_fields().size());
ASSERT_EQ(1, coll2->get_nested_fields().count("addresses"));
// only object in schema
schema = R"({
"name": "coll3",
"enable_nested_fields": true,
"fields": [
{"name": "addresses", "type": "object[]"}
]
})"_json;
op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll3 = op.get();
auto doc1 = R"({"addresses": [{"street": "foobar"}]})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_EQ(1, coll3->get_nested_fields().size());
ASSERT_EQ(1, coll3->get_nested_fields().count("addresses"));
}
TEST_F(CollectionNestedFieldsTest, GroupByOnNestedFieldsWithWildcardSchema) {
std::vector<field> fields = {field(".*", field_types::AUTO, false, true),
field("education.name", field_types::STRING_ARRAY, true, true),
field("employee.num", field_types::INT32, true, true)};
auto op = collectionManager.create_collection("coll1", 1, fields, "", 0, field_types::AUTO, {}, {},
true);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"employee": {"num": 5000},
"education": [
{"name": "X High School", "type": "school"},
{"name": "Y University", "type": "undergraduate"}
]
})"_json;
auto doc2 = R"({
"employee": {"num": 1000},
"education": [
{"name": "X High School", "type": "school"},
{"name": "Z University", "type": "undergraduate"}
]
})"_json;
ASSERT_TRUE(coll1->add(doc1.dump(), CREATE).ok());
ASSERT_TRUE(coll1->add(doc2.dump(), CREATE).ok());
// group on a field inside array of objects
auto results = coll1->search("school", {"education"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 10,
spp::sparse_hash_set<std::string>(), spp::sparse_hash_set<std::string>(), 10, "", 30,
5, "", 10, {}, {}, {"education.name"}, 2).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["grouped_hits"].size());
ASSERT_EQ(1, results["grouped_hits"][0]["group_key"].size());
ASSERT_EQ(2, results["grouped_hits"][0]["group_key"][0].size());
ASSERT_EQ("X High School", results["grouped_hits"][0]["group_key"][0][0].get<std::string>());
ASSERT_EQ("Z University", results["grouped_hits"][0]["group_key"][0][1].get<std::string>());
ASSERT_EQ(1, results["grouped_hits"][0]["hits"].size());
ASSERT_EQ("1", results["grouped_hits"][0]["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ(1, results["grouped_hits"][1]["group_key"].size());
ASSERT_EQ(2, results["grouped_hits"][1]["group_key"][0].size());
ASSERT_EQ("X High School", results["grouped_hits"][1]["group_key"][0][0].get<std::string>());
ASSERT_EQ("Y University", results["grouped_hits"][1]["group_key"][0][1].get<std::string>());
ASSERT_EQ(1, results["grouped_hits"][1]["hits"].size());
ASSERT_EQ("0", results["grouped_hits"][1]["hits"][0]["document"]["id"].get<std::string>());
// group on plain nested field
results = coll1->search("school", {"education"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 10,
spp::sparse_hash_set<std::string>(), spp::sparse_hash_set<std::string>(), 10, "", 30,
5, "", 10, {}, {}, {"employee.num"}, 2).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["grouped_hits"].size());
ASSERT_EQ(1, results["grouped_hits"][0]["group_key"].size());
ASSERT_EQ(1, results["grouped_hits"][0]["group_key"][0].size());
ASSERT_EQ(1000, results["grouped_hits"][0]["group_key"][0].get<size_t>());
ASSERT_EQ(1, results["grouped_hits"][0]["hits"].size());
ASSERT_EQ("1", results["grouped_hits"][0]["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ(1, results["grouped_hits"][1]["group_key"].size());
ASSERT_EQ(1, results["grouped_hits"][1]["group_key"][0].size());
ASSERT_EQ(5000, results["grouped_hits"][1]["group_key"][0].get<size_t>());
ASSERT_EQ(1, results["grouped_hits"][1]["hits"].size());
ASSERT_EQ("0", results["grouped_hits"][1]["hits"][0]["document"]["id"].get<std::string>());
}
TEST_F(CollectionNestedFieldsTest, WildcardWithExplicitSchema) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": ".*", "type": "auto"},
{"name": "company.id", "type": "int32"},
{"name": "studies.year", "type": "int32[]"}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"id": "0",
"company": {"id": 1000, "name": "Foo"},
"studies": [{"name": "College 1", "year": 1997}]
})"_json;
ASSERT_TRUE(coll1->add(doc1.dump(), CREATE).ok());
auto results = coll1->search("*", {}, "company.id: 1000", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
results = coll1->search("*", {}, "studies.year: 1997", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
}
TEST_F(CollectionNestedFieldsTest, DynamicFieldWithExplicitSchema) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "spec", "type": "object"},
{"name": "spec\\..*\\.value", "type": "float"}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"spec": {"number": {"value": 100}}
})"_json;
ASSERT_TRUE(coll1->add(doc1.dump(), CREATE).ok());
auto field_vec = coll1->get_fields();
ASSERT_EQ(3, field_vec.size());
ASSERT_EQ(field_types::FLOAT, field_vec[2].type);
// with only explicit nested dynamic type
schema = R"({
"name": "coll2",
"enable_nested_fields": true,
"fields": [
{"name": ".*", "type": "auto"},
{"name": "spec\\..*\\.value", "type": "float"}
]
})"_json;
op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll2 = op.get();
ASSERT_TRUE(coll2->add(doc1.dump(), CREATE).ok());
field_vec = coll2->get_fields();
ASSERT_EQ(4, field_vec.size());
ASSERT_EQ(field_types::FLOAT, field_vec[3].type);
}
TEST_F(CollectionNestedFieldsTest, UpdateOfNestFields) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name":"name", "type": "string", "index": false, "optional": true},
{"name":"brand","type":"object","optional":true},
{"name":"brand.id","type":"int32","sort":false},
{"name":"brand.name","type":"string","index":false,"sort":false,"optional":true}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection *coll1 = op.get();
auto doc1 = R"({
"id": "b4db5f0456a93320428365f92c2a54ce15df2d0a",
"product_id": 63992305,
"name": "Chips",
"link": "http://wicked-uncle.biz",
"meta": {
"valid": true
},
"brand": {
"id": 34002,
"name": "Hodkiewicz - Rempel"
}
})"_json;
ASSERT_TRUE(coll1->add(doc1.dump(), CREATE).ok());
// action=update - `name` changed, `id` not sent
// `id` field should not be deleted
auto doc_update = R"({
"id": "b4db5f0456a93320428365f92c2a54ce15df2d0a",
"brand": {
"name": "Rempel"
}
})"_json;
ASSERT_TRUE(coll1->add(doc_update.dump(), UPDATE).ok());
auto results = coll1->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(6, results["hits"][0]["document"].size());
ASSERT_EQ(2, results["hits"][0]["document"]["brand"].size());
ASSERT_EQ("Rempel", results["hits"][0]["document"]["brand"]["name"].get<std::string>());
// action=emplace
doc_update = R"({
"id": "b4db5f0456a93320428365f92c2a54ce15df2d0a",
"brand": {
"name": "The Rempel"
}
})"_json;
ASSERT_TRUE(coll1->add(doc_update.dump(), EMPLACE).ok());
results = coll1->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(6, results["hits"][0]["document"].size());
ASSERT_EQ(2, results["hits"][0]["document"]["brand"].size());
ASSERT_EQ("The Rempel", results["hits"][0]["document"]["brand"]["name"].get<std::string>());
// action=upsert requires the full document
doc_update = R"({
"id": "b4db5f0456a93320428365f92c2a54ce15df2d0a",
"brand": {
"name": "Xomel"
}
})"_json;
auto add_op = coll1->add(doc_update.dump(), UPSERT);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `brand.id` has been declared in the schema, but is not found in the document.", add_op.error());
doc_update = R"({
"id": "b4db5f0456a93320428365f92c2a54ce15df2d0a",
"name": "Chips",
"brand": {
"id": 34002,
"name": "Xomel"
}
})"_json;
add_op = coll1->add(doc_update.dump(), UPSERT);
ASSERT_TRUE(add_op.ok());
results = coll1->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(3, results["hits"][0]["document"].size());
ASSERT_EQ(2, results["hits"][0]["document"]["brand"].size());
ASSERT_EQ("Xomel", results["hits"][0]["document"]["brand"]["name"].get<std::string>());
// upsert with brand.name missing is allowed because it's optional
doc_update = R"({
"id": "b4db5f0456a93320428365f92c2a54ce15df2d0a",
"name": "Potato Chips",
"brand": {
"id": 34002
}
})"_json;
add_op = coll1->add(doc_update.dump(), UPSERT);
ASSERT_TRUE(add_op.ok());
results = coll1->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(3, results["hits"][0]["document"].size());
ASSERT_EQ(1, results["hits"][0]["document"]["brand"].size());
ASSERT_EQ(34002, results["hits"][0]["document"]["brand"]["id"].get<size_t>());
}
TEST_F(CollectionNestedFieldsTest, UpdateOfNestFieldsWithWildcardSchema) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": ".*", "type": "auto"}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"id": "0",
"company": {"num_employees": 2000, "founded": 1976},
"studies": [{"name": "College 1"}]
})"_json;
ASSERT_TRUE(coll1->add(doc1.dump(), CREATE).ok());
auto doc_update = R"({
"id": "0",
"company": {"num_employees": 2000, "founded": 1976, "year": 2000},
"studies": [{"name": "College Alpha", "year": 1967},{"name": "College Beta", "year": 1978}]
})"_json;
ASSERT_TRUE(coll1->add(doc_update.dump(), UPDATE).ok());
auto results = coll1->search("*", {}, "company.year: 2000", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
results = coll1->search("*", {}, "studies.year: 1967", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
results = coll1->search("*", {}, "studies.year: 1978", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
results = coll1->search("alpha", {"studies.name"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
results = coll1->search("beta", {"studies.name"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
// try removing fields via upsert, dropping "company.year"
doc_update = R"({
"id": "0",
"company": {"num_employees": 4000, "founded": 1976},
"studies": [{"name": "College Alpha"}]
})"_json;
ASSERT_TRUE(coll1->add(doc_update.dump(), UPSERT).ok());
results = coll1->search("*", {}, "company.year: 2000", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["found"].get<size_t>());
results = coll1->search("*", {}, "studies.year: 1967", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["found"].get<size_t>());
results = coll1->search("*", {}, "studies.year: 1978", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["found"].get<size_t>());
results = coll1->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(3, results["hits"][0]["document"].size());
ASSERT_EQ(2, results["hits"][0]["document"]["company"].size());
ASSERT_EQ(4000, results["hits"][0]["document"]["company"]["num_employees"].get<size_t>());
ASSERT_EQ(1976, results["hits"][0]["document"]["company"]["founded"].get<size_t>());
ASSERT_EQ(1, results["hits"][0]["document"]["studies"].size());
ASSERT_EQ(1, results["hits"][0]["document"]["studies"][0].size());
ASSERT_EQ("College Alpha", results["hits"][0]["document"]["studies"][0]["name"].get<std::string>());
// via update (should not remove, since document can be partial)
doc_update = R"({
"id": "0",
"company": {"num_employees": 2000},
"studies": [{"name": "College Alpha"}]
})"_json;
ASSERT_TRUE(coll1->add(doc_update.dump(), UPDATE).ok());
results = coll1->search("*", {}, "company.founded: 1976", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(3, results["hits"][0]["document"].size());
ASSERT_EQ(2, results["hits"][0]["document"]["company"].size());
ASSERT_EQ(2000, results["hits"][0]["document"]["company"]["num_employees"].get<size_t>());
ASSERT_EQ(1976, results["hits"][0]["document"]["company"]["founded"].get<size_t>());
ASSERT_EQ(1, results["hits"][0]["document"]["studies"].size());
ASSERT_EQ(1, results["hits"][0]["document"]["studies"][0].size());
ASSERT_EQ("College Alpha", results["hits"][0]["document"]["studies"][0]["name"].get<std::string>());
// via emplace (should not remove, since document can be partial)
doc_update = R"({
"id": "0",
"company": {},
"studies": [{"name": "College Alpha", "year": 1977}]
})"_json;
ASSERT_TRUE(coll1->add(doc_update.dump(), EMPLACE).ok());
results = coll1->search("*", {}, "company.num_employees: 2000", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(3, results["hits"][0]["document"].size());
ASSERT_EQ(2, results["hits"][0]["document"]["company"].size());
ASSERT_EQ(2000, results["hits"][0]["document"]["company"]["num_employees"].get<size_t>());
ASSERT_EQ(1976, results["hits"][0]["document"]["company"]["founded"].get<size_t>());
ASSERT_EQ(1, results["hits"][0]["document"]["studies"].size());
ASSERT_EQ(2, results["hits"][0]["document"]["studies"][0].size());
ASSERT_EQ("College Alpha", results["hits"][0]["document"]["studies"][0]["name"].get<std::string>());
ASSERT_EQ(1977, results["hits"][0]["document"]["studies"][0]["year"].get<size_t>());
}
TEST_F(CollectionNestedFieldsTest, NestedSchemaWithSingularType) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "studies.year", "type": "int32", "optional": false}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"id": "0",
"studies": [{"name": "College 1", "year": 1997}]
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `studies.year` has an incorrect type. "
"Hint: field inside an array of objects must be an array type as well.", add_op.error());
// even when field is optional, there should be an error
schema = R"({
"name": "coll2",
"enable_nested_fields": true,
"fields": [
{"name": "studies.year", "type": "int32", "optional": true}
]
})"_json;
op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll2 = op.get();
add_op = coll2->add(doc1.dump(), CREATE);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `studies.year` has an incorrect type. "
"Hint: field inside an array of objects must be an array type as well.", add_op.error());
// allow optional field to be missing when value is singular
doc1 = R"({
"id": "0",
"studies": {"name": "College 1"}
})"_json;
add_op = coll2->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
}
TEST_F(CollectionNestedFieldsTest, NestedSchemaAutoAndFacet) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "person.*", "type": "auto", "facet": true},
{"name": "schools.*", "type": "auto", "facet": true}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"id": "0",
"person": {"name": "Tony Stark"},
"schools": [{"name": "Primary School"}]
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto fields = coll1->get_fields();
for(const auto&f : fields) {
ASSERT_TRUE(f.facet);
}
ASSERT_TRUE(coll1->get_schema()["schools.name"].optional);
}
TEST_F(CollectionNestedFieldsTest, NestedObjectOfObjectEnableFacet) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "variants", "type": "object"},
{"name": "variants\\..*\\.price", "type": "int64", "facet": true}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"variants": {
"store_1": {"price": 100},
"store_2": {"price": 200}
}
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
ASSERT_TRUE(coll1->get_schema()["variants.store_1.price"].facet);
ASSERT_TRUE(coll1->get_schema()["variants.store_2.price"].facet);
}
TEST_F(CollectionNestedFieldsTest, ArrayOfObjectsFaceting) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "location_addresses", "type": "object[]", "facet": true}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"id": "0",
"company_name": "Acme Corp",
"display_address": {
"city": "LA",
"street": "Lumbard St"
},
"location_addresses": [
{
"city": "Columbus",
"street": "Yale St"
},
{
"city": "Soda Springs",
"street": "5th St"
}
],
"num_employees": 10,
"primary_address": {
"city": "Los Angeles",
"street": "123 Lumbard St"
}
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto results = coll1->search("*", {}, "", {"location_addresses.city"}, {},
{0}, 10, 1, FREQUENCY, {false}).get();
// add same doc again
doc1["id"] = "1";
add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
results = coll1->search("*", {}, "", {"location_addresses.city"}, {},
{0}, 10, 1, FREQUENCY, {false}).get();
// facet count should be 2
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("Columbus", results["facet_counts"][0]["counts"][0]["value"].get<std::string>());
ASSERT_EQ(2, results["facet_counts"][0]["counts"][0]["count"].get<size_t>());
ASSERT_EQ("Soda Springs", results["facet_counts"][0]["counts"][1]["value"].get<std::string>());
ASSERT_EQ(2, results["facet_counts"][0]["counts"][1]["count"].get<size_t>());
}
TEST_F(CollectionNestedFieldsTest, HighlightArrayInsideArrayOfObj) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "studies", "type": "auto"}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"id": "0",
"studies": [
{"name": "College 1", "tags": ["foo", "bar"]},
{"name": "College 1", "tags": ["alpha", "beta"]}
]
})"_json;
ASSERT_TRUE(coll1->add(doc1.dump(), CREATE).ok());
auto results = coll1->search("beta", {"studies"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
nlohmann::json highlight_meta_doc = R"({
"studies": [
{
"name": {
"matched_tokens": [],
"snippet": "College 1"
},
"tags": [
{
"matched_tokens": [],
"snippet": "foo"
},
{
"matched_tokens": [],
"snippet": "bar"
}
]
},
{
"name": {
"matched_tokens": [],
"snippet": "College 1"
},
"tags": [
{
"matched_tokens": [],
"snippet": "alpha"
},
{
"matched_tokens": [
"beta"
],
"snippet": "<mark>beta</mark>"
}
]
}
]
})"_json;
ASSERT_EQ(highlight_meta_doc.dump(), results["hits"][0]["highlight"].dump());
}
TEST_F(CollectionNestedFieldsTest, ErrorWhenObjectTypeUsedWithoutEnablingNestedFields) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "details", "type": "object", "optional": false }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_FALSE(op.ok());
ASSERT_EQ("Type `object` or `object[]` can be used only when nested fields are enabled by setting` "
"enable_nested_fields` to true.", op.error());
schema = R"({
"name": "coll1",
"fields": [
{"name": "details", "type": "object[]", "optional": false }
]
})"_json;
op = collectionManager.create_collection(schema);
ASSERT_FALSE(op.ok());
ASSERT_EQ("Type `object` or `object[]` can be used only when nested fields are enabled by setting` "
"enable_nested_fields` to true.", op.error());
}
TEST_F(CollectionNestedFieldsTest, FieldsWithDotsButNotNested) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "name.first", "type": "string"}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"name.first": "Alpha Beta Gamma"
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto results = coll1->search("beta", {"name.first"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ("Alpha <mark>Beta</mark> Gamma",
results["hits"][0]["highlight"]["name.first"]["snippet"].get<std::string>());
}
TEST_F(CollectionNestedFieldsTest, NullValuesWithExplicitSchema) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "name", "type": "object"},
{"name": "name.first", "type": "string"},
{"name": "name.last", "type": "string", "optional": true}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"name": {"last": null, "first": "Jack"}
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto results = coll1->search("jack", {"name.first"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"][0]["document"].size()); // id, name
ASSERT_EQ(1, results["hits"][0]["document"]["name"].size()); // name.first
ASSERT_EQ("Jack", results["hits"][0]["document"]["name"]["first"].get<std::string>());
}
TEST_F(CollectionNestedFieldsTest, EmplaceWithNullValueOnRequiredField) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name":"currency", "type":"object"},
{"name":"currency.eu", "type":"int32", "optional": false}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection *coll1 = op.get();
auto doc_with_null = R"({
"id": "0",
"currency": {
"eu": null
}
})"_json;
auto add_op = coll1->add(doc_with_null.dump(), EMPLACE);
ASSERT_FALSE(add_op.ok());
add_op = coll1->add(doc_with_null.dump(), CREATE);
ASSERT_FALSE(add_op.ok());
auto doc1 = R"({
"id": "0",
"currency": {
"eu": 12000
}
})"_json;
add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
// now update with null value -- should not be allowed
auto update_doc = R"({
"id": "0",
"currency": {
"eu": null
}
})"_json;
auto update_op = coll1->add(update_doc.dump(), EMPLACE);
ASSERT_FALSE(update_op.ok());
ASSERT_EQ("Field `currency.eu` must be an int32.", update_op.error());
}
TEST_F(CollectionNestedFieldsTest, EmplaceWithNullValueOnOptionalField) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name":"currency", "type":"object"},
{"name":"currency.eu", "type":"int32", "optional": true}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection *coll1 = op.get();
auto doc1 = R"({
"id": "0",
"currency": {
"eu": 12000
}
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
// now update with null value -- should be allowed since field is optional
auto update_doc = R"({
"id": "0",
"currency": {
"eu": null
}
})"_json;
auto update_op = coll1->add(update_doc.dump(), EMPLACE);
ASSERT_TRUE(update_op.ok());
// try to fetch the document to see the stored value
auto results = coll1->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"][0]["document"].size()); // id, currency
ASSERT_EQ(0, results["hits"][0]["document"]["currency"].size());
}
TEST_F(CollectionNestedFieldsTest, UpsertWithNullValueOnOptionalField) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "status", "type": "object"},
{"name": "title", "type": "string"}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"id": "0",
"title": "Title Alpha",
"status": {"name": "Foo"}
})"_json;
auto add_op = coll1->add(doc1.dump(), UPSERT);
ASSERT_TRUE(add_op.ok());
auto results = coll1->search("alpha", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(3, results["hits"][0]["document"].size()); // id, title, status
ASSERT_EQ(1, results["hits"][0]["document"]["status"].size());
results = coll1->search("foo", {"status"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
// upsert again with null value
doc1 = R"({
"id": "0",
"title": "Title Alpha",
"status": {"name": null}
})"_json;
add_op = coll1->add(doc1.dump(), UPSERT);
ASSERT_TRUE(add_op.ok());
results = coll1->search("alpha", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(3, results["hits"][0]["document"].size()); // id, title, status
ASSERT_EQ(0, results["hits"][0]["document"]["status"].size());
results = coll1->search("foo", {"status"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["found"].get<size_t>());
}
TEST_F(CollectionNestedFieldsTest, EmplaceWithMissingArrayValueOnOptionalField) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name":"currency", "type":"object[]"},
{"name":"currency.eu", "type":"int32[]", "optional": true}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection *coll1 = op.get();
auto doc1 = R"({
"id": "0",
"currency": [
{"eu": 12000},
{"us": 10000}
]
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
// now update with null value -- should be allowed since field is optional
auto update_doc = R"({
"id": "0",
"currency": [
{"us": 10000}
]
})"_json;
auto update_op = coll1->add(update_doc.dump(), EMPLACE);
ASSERT_TRUE(update_op.ok());
// try to fetch the document to see the stored value
auto results = coll1->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"][0]["document"].size()); // id, currency
ASSERT_EQ(1, results["hits"][0]["document"]["currency"].size());
ASSERT_EQ(10000, results["hits"][0]["document"]["currency"][0]["us"].get<uint32_t>());
}
TEST_F(CollectionNestedFieldsTest, UpdateNestedDocument) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "contributors", "type": "object", "optional": false},
{"name": "title", "type": "string", "optional": false}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"id": "0",
"title": "Title Alpha",
"contributors": {"first_name": "John", "last_name": "Galt"}
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
// update document partially
doc1 = R"({
"id": "0",
"title": "Title Beta"
})"_json;
add_op = coll1->add(doc1.dump(), UPDATE);
ASSERT_TRUE(add_op.ok());
auto results = coll1->search("beta", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
// emplace document partially
doc1 = R"({
"id": "0",
"title": "Title Gamma"
})"_json;
add_op = coll1->add(doc1.dump(), EMPLACE);
ASSERT_TRUE(add_op.ok());
results = coll1->search("gamma", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
// update a sub-field of an object
doc1 = R"({
"id": "0",
"contributors": {"last_name": "Shaw"}
})"_json;
add_op = coll1->add(doc1.dump(), UPDATE);
ASSERT_TRUE(add_op.ok());
results = coll1->search("shaw", {"contributors"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
results = coll1->search("john", {"contributors.first_name"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
// should not be able to find the old name
results = coll1->search("galt", {"contributors"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["found"].get<size_t>());
}
TEST_F(CollectionNestedFieldsTest, UpdateNestedDocumentAutoSchema) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": ".*", "type": "auto"}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"id": "0",
"price": {"now": 3000, "country": "US"}
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
// update document partially
doc1 = R"({
"id": "0",
"price": {"now": 4000}
})"_json;
add_op = coll1->add(doc1.dump(), UPDATE);
ASSERT_TRUE(add_op.ok());
auto results = coll1->search("us", {"price.country"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
}
TEST_F(CollectionNestedFieldsTest, UpdateNestedDocumentWithOptionalNullValue) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "contributors", "type": "object", "optional": true},
{"name": "title", "type": "string", "optional": false}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"id": "0",
"title": "Title Alpha",
"contributors": {"first_name": "John", "last_name": null}
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
// update document partially
doc1 = R"({
"id": "0",
"title": "Title Beta",
"contributors": {"first_name": "Jack", "last_name": null}
})"_json;
add_op = coll1->add(doc1.dump(), UPDATE);
ASSERT_TRUE(add_op.ok());
auto results = coll1->search("beta", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
// emplace document partially
doc1 = R"({
"id": "0",
"title": "Title Gamma",
"contributors": {"first_name": "Jim", "last_name": null}
})"_json;
add_op = coll1->add(doc1.dump(), EMPLACE);
ASSERT_TRUE(add_op.ok());
results = coll1->search("gamma", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
// remove field with null value
auto del_op = coll1->remove("0");
ASSERT_TRUE(del_op.ok());
results = coll1->search("gamma", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["found"].get<size_t>());
}
TEST_F(CollectionNestedFieldsTest, ImproveErrorMessageForNestedArrayNumericalFields) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "variants", "type": "object[]", "facet": true, "index": true},
{"name": "variants.sellingPrice", "type": "int32", "facet": true}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"variants": [
{
"sellingPrice": 2300,
"timestamp": 10000,
"is_deleted": false,
"price": 50.50
},
{
"sellingPrice": 1200,
"timestamp": 10000,
"is_deleted": false,
"price": 150.50
}
]
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `variants.sellingPrice` has an incorrect type. "
"Hint: field inside an array of objects must be an array type as well.", add_op.error());
schema = R"({
"name": "coll2",
"enable_nested_fields": true,
"fields": [
{"name": "variants", "type": "object[]", "facet": true, "index": true},
{"name": "variants.timestamp", "type": "int64", "facet": true}
]
})"_json;
op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll2 = op.get();
add_op = coll2->add(doc1.dump(), CREATE);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `variants.timestamp` has an incorrect type. "
"Hint: field inside an array of objects must be an array type as well.", add_op.error());
schema = R"({
"name": "coll3",
"enable_nested_fields": true,
"fields": [
{"name": "variants", "type": "object[]", "facet": true, "index": true},
{"name": "variants.is_deleted", "type": "bool", "facet": true}
]
})"_json;
op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll3 = op.get();
add_op = coll3->add(doc1.dump(), CREATE);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `variants.is_deleted` has an incorrect type. "
"Hint: field inside an array of objects must be an array type as well.", add_op.error());
// float
schema = R"({
"name": "coll4",
"enable_nested_fields": true,
"fields": [
{"name": "variants", "type": "object[]", "facet": true, "index": true},
{"name": "variants.price", "type": "float", "facet": true}
]
})"_json;
op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll4 = op.get();
add_op = coll4->add(doc1.dump(), CREATE);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `variants.price` has an incorrect type. "
"Hint: field inside an array of objects must be an array type as well.", add_op.error());
}
TEST_F(CollectionNestedFieldsTest, HighlightArrayOfObjects) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": ".*", "type": "auto"}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"details": [
{"foo": "John Smith"},
{"name": "James Peterson"},
{"bar": "John Galt"}
]
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto results = coll1->search("james", {"details.name"}, "", {}, {}, {0}, 10, 1, FREQUENCY,
{true}, 1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(3, results["hits"][0]["highlight"]["details"].size());
ASSERT_EQ(0, results["hits"][0]["highlight"]["details"][0].size());
ASSERT_EQ(1, results["hits"][0]["highlight"]["details"][1].size());
ASSERT_EQ(0, results["hits"][0]["highlight"]["details"][2].size());
results = coll1->search("james", {"details.name"}, "", {}, {}, {0}, 10, 1, FREQUENCY,
{true}, 1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 1, {}, {}, {}, 0,
"<mark>", "</mark>", {1}, 10000, true, false, true, "details.name").get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(0, results["hits"][0]["highlight"]["details"][0].size());
ASSERT_EQ(1, results["hits"][0]["highlight"]["details"][1].size());
ASSERT_EQ(0, results["hits"][0]["highlight"]["details"][2].size());
results = coll1->search("james", {"details.name"}, "", {}, {}, {0}, 10, 1, FREQUENCY,
{true}, 1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 1, {}, {}, {}, 0,
"<mark>", "</mark>", {1}, 10000, true, false, true, "details").get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(3, results["hits"][0]["highlight"]["details"].size());
ASSERT_EQ(1, results["hits"][0]["highlight"]["details"][0].size());
ASSERT_EQ(1, results["hits"][0]["highlight"]["details"][1].size());
ASSERT_EQ(1, results["hits"][0]["highlight"]["details"][2].size());
}
TEST_F(CollectionNestedFieldsTest, DeepNestedOptionalArrayValue) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{
"facet": false,
"index": true,
"infix": false,
"locale": "",
"name": "items.name",
"optional": true,
"sort": false,
"type": "string[]"
},
{
"facet": false,
"index": true,
"infix": false,
"locale": "",
"name": "items.description",
"optional": true,
"sort": false,
"type": "string[]"
},
{
"facet": false,
"index": true,
"infix": false,
"locale": "",
"name": "items.nested_items.name",
"optional": true,
"sort": false,
"type": "string[]"
}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"items": [
{
"description": "random description.",
"name": "foobar",
"nested_items": [
{
"isAvailable": true
},
{
"description": "nested description here",
"isAvailable": true,
"name": "naruto"
},
{
"description": "description again",
"isAvailable": true,
"name": "dragon ball"
}
]
}
]
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto results = coll1->search("naruto", {"items.nested_items.name"}, "", {}, {}, {0}, 10, 1, FREQUENCY,
{true}, 1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4).get();
ASSERT_EQ(1, results["found"].get<size_t>());
}
TEST_F(CollectionNestedFieldsTest, FloatInsideNestedObject) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "price.*", "type": "float"}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection *coll1 = op.get();
auto doc1 = R"({
"price": {
"USD": 75.40
}
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
// should also accept whole numbers
schema = R"({
"name": "coll2",
"enable_nested_fields": true,
"fields": [
{"name": "price.*", "type": "float"}
]
})"_json;
op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection *coll2 = op.get();
auto doc2 = R"({
"price": {
"USD": 75
}
})"_json;
add_op = coll2->add(doc2.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto fs = coll2->get_fields();
ASSERT_EQ(3, fs.size());
add_op = coll2->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
}
TEST_F(CollectionNestedFieldsTest, NestedFieldWithRegexName) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name":"titles", "type":"object"},
{"name": "titles\\..*", "type":"string"},
{"name":"start_date", "type":"object"},
{"name":"start_date\\..*", "type":"int32", "facet":true, "optional":true}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection *coll1 = op.get();
auto doc1 = R"({
"titles": {
"en": "Foobar baz"
},
"start_date": {
"year": 2020,
"month": 2,
"day": 3
}
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto results = coll1->search("foobar", {"titles.en"}, "start_date.year: 2020", {}, {}, {2}, 10,
1, FREQUENCY, {true}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
}
TEST_F(CollectionNestedFieldsTest, HighlightOnFlatFieldWithSnippeting) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("body", field_types::STRING, false)};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "pimples keep popping up on chin";
doc1["body"] = "on left side of chin under the corner of my mouth i keep getting huge pimples. they’ll go away for "
"a few days but come back every time and i don’t quit it. I have oily skin and acne prone. i "
"also just started using twice a week";
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
auto results = coll1->search("pimples", {"title", "body"}, "", {}, {}, {2}, 10,
1, FREQUENCY, {true}).get();
auto highlight_doc = R"({
"body": {
"matched_tokens": [
"pimples"
],
"snippet": "i keep getting huge <mark>pimples</mark>. they’ll go away for"
},
"title": {
"matched_tokens": [
"pimples"
],
"snippet": "<mark>pimples</mark> keep popping up on chin"
}
})"_json;
ASSERT_EQ(highlight_doc.dump(), results["hits"][0]["highlight"].dump());
// with full highlighting
highlight_doc = R"({
"body": {
"matched_tokens": [
"pimples"
],
"snippet": "i keep getting huge <mark>pimples</mark>. they’ll go away for",
"value": ""
},
"title": {
"matched_tokens": [
"pimples"
],
"snippet": "<mark>pimples</mark> keep popping up on chin",
"value": "<mark>pimples</mark> keep popping up on chin"
}
})"_json;
highlight_doc["body"]["value"] = "on left side of chin under the corner of my mouth i keep getting huge "
"<mark>pimples</mark>. they’ll go away for a few days but come back every time "
"and i don’t quit it. I have oily skin and acne prone. i also just started "
"using twice a week";
results = coll1->search("pimples", {"title", "body"}, "", {}, {}, {2}, 10,
1, FREQUENCY, {true}, 1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title,body").get();
ASSERT_EQ(highlight_doc.dump(), results["hits"][0]["highlight"].dump());
}
TEST_F(CollectionNestedFieldsTest, NestedObjecEnableSortOnString) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "status", "type": "object"},
{"name": "status\\..*", "type": "string", "sort": true}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"status": {
"1": "ACCEPTED"
}
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
ASSERT_TRUE(coll1->get_schema()["status.1"].sort);
}
| 121,656
|
C++
|
.cpp
| 3,048
| 30.677493
| 177
| 0.5234
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,702
|
array_test.cpp
|
typesense_typesense/test/array_test.cpp
|
#include <gtest/gtest.h>
#include "array.h"
#include <vector>
TEST(ArrayTest, Append) {
array arr;
int SIZE = 10 * 1000;
EXPECT_EQ(arr.getLength(), 0);
// First try inserting sorted ints
for(int i=0; i < SIZE; i++) {
arr.append(i);
}
EXPECT_EQ(arr.getLength(), SIZE);
for(int i=0; i < SIZE; i++) {
EXPECT_EQ(arr.at(i), i);
EXPECT_EQ(arr.indexOf(i), i);
EXPECT_EQ(arr.contains(i), true);
}
EXPECT_EQ(arr.contains(SIZE), false);
EXPECT_EQ(arr.indexOf(SIZE), SIZE);
EXPECT_EQ(arr.indexOf(SIZE+1), SIZE);
// Insert in unsorted fashion
array arr2;
std::vector<uint32_t> unsorted;
for(int i=0; i < SIZE; i++) {
uint32_t r = (uint32_t) rand();
unsorted.push_back(r);
arr2.append(r);
}
EXPECT_EQ(arr2.getLength(), SIZE);
for(int i=0; i < SIZE; i++) {
uint32_t value = unsorted.at(i);
EXPECT_EQ(arr2.at(i), value);
}
}
TEST(ArrayTest, InsertValues) {
std::vector<uint32_t> eles = {10, 1, 4, 5, 7};
array arr;
for(size_t i=0; i < eles.size(); i++) {
arr.append(eles[i]);
}
uint32_t insert_arr[2] = {2, 3};
arr.insert(2, insert_arr, 2);
eles = {10, 1, 2, 3, 4, 5, 7};
for(size_t i=0; i < eles.size(); i++) {
ASSERT_EQ(eles[i], arr.at(i));
}
uint32_t insert_arr2[2] = {20, 25};
arr.insert(6, insert_arr2, 2);
eles = {10, 1, 2, 3, 4, 5, 20, 25, 7};
for(size_t i=0; i < eles.size(); i++) {
ASSERT_EQ(eles[i], arr.at(i));
}
}
TEST(ArrayTest, Uncompress) {
const size_t SIZE = 10*1000;
array unsorted_arr;
std::vector<uint32_t> unsorted;
for(size_t i=0; i<SIZE; i++) {
uint32_t r = (uint32_t) rand();
unsorted.push_back(r);
unsorted_arr.append(r);
}
uint32_t *raw_unsorted_arr = unsorted_arr.uncompress();
for(size_t i=0; i<unsorted.size(); i++) {
ASSERT_EQ(raw_unsorted_arr[i], unsorted.at(i));
}
delete[] raw_unsorted_arr;
}
TEST(ArrayTest, RemoveBetweenIndices) {
array arr;
const size_t SIZE = 10*1000;
std::vector<uint32_t> unsorted;
// try removing from empty array
arr.append(100);
arr.remove_index(0, 1);
arr.contains(100);
arr.remove_index(0, 1);
for(size_t i=0; i<SIZE; i++) {
uint32_t r = (uint32_t) rand();
unsorted.push_back(r);
arr.append(r);
}
// Remove first two elements
arr.remove_index(0, 2);
unsorted.erase(unsorted.begin(), unsorted.begin()+2);
ASSERT_EQ(arr.getLength(), SIZE-2);
for(size_t i=0; i<SIZE-2; i++) {
ASSERT_EQ(arr.at(i), unsorted.at(i));
}
// Remove from the middle
arr.remove_index(1200, 2400);
unsorted.erase(unsorted.begin()+1200, unsorted.begin()+2400);
ASSERT_EQ(arr.getLength(), SIZE-2-1200);
for(size_t i=0; i<SIZE-2-1200; i++) {
ASSERT_EQ(arr.at(i), unsorted.at(i));
}
// Remove from the end
const uint32_t NEW_SIZE = arr.getLength();
arr.remove_index(NEW_SIZE - 3, NEW_SIZE);
unsorted.erase(unsorted.begin()+NEW_SIZE-3, unsorted.begin()+NEW_SIZE);
ASSERT_EQ(arr.getLength(), NEW_SIZE-3);
for(size_t i=0; i<NEW_SIZE-3; i++) {
ASSERT_EQ(arr.at(i), unsorted.at(i));
}
}
| 3,298
|
C++
|
.cpp
| 105
| 25.619048
| 75
| 0.579581
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,703
|
geo_filtering_test.cpp
|
typesense_typesense/test/geo_filtering_test.cpp
|
#include <gtest/gtest.h>
#include <string>
#include <vector>
#include <fstream>
#include <algorithm>
#include <collection_manager.h>
#include "collection.h"
class GeoFilteringTest : public ::testing::Test {
protected:
Store *store;
CollectionManager & collectionManager = CollectionManager::get_instance();
std::atomic<bool> quit = false;
std::vector<std::string> query_fields;
std::vector<sort_by> sort_fields;
void setupCollection() {
std::string state_dir_path = "/tmp/typesense_test/collection_filtering";
LOG(INFO) << "Truncating and creating: " << state_dir_path;
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
store = new Store(state_dir_path);
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
}
virtual void SetUp() {
setupCollection();
}
virtual void TearDown() {
collectionManager.dispose();
delete store;
}
};
TEST_F(GeoFilteringTest, GeoPointFiltering) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("loc", field_types::GEOPOINT, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"Palais Garnier", "48.872576479306765, 2.332291112241466"},
{"Sacre Coeur", "48.888286721920934, 2.342340862419206"},
{"Arc de Triomphe", "48.87538726829884, 2.296113163780903"},
{"Place de la Concorde", "48.86536119187326, 2.321850747347093"},
{"Louvre Musuem", "48.86065813197502, 2.3381285349616725"},
{"Les Invalides", "48.856648379569904, 2.3118555692631357"},
{"Eiffel Tower", "48.85821022164442, 2.294239067890161"},
{"Notre-Dame de Paris", "48.852455825574495, 2.35071182406452"},
{"Musee Grevin", "48.872370541246816, 2.3431536410008906"},
{"Pantheon", "48.84620987789056, 2.345152755563131"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
std::vector<std::string> lat_lng;
StringUtils::split(records[i][1], lat_lng, ", ");
double lat = std::stod(lat_lng[0]);
double lng = std::stod(lat_lng[1]);
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["loc"] = {lat, lng};
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
// pick a location close to only the Sacre Coeur
auto results = coll1->search("*",
{}, "loc: ([48.90615915923891, 2.3435897727061175], radius: 3 km)",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
// Multiple queries can be clubbed using square brackets [ filterA, filterB, ... ]
results = coll1->search("*", {}, "loc: [([48.90615, 2.34358], radius: 1 km), ([48.8462, 2.34515], radius: 1 km)]",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(2, results["found"].get<size_t>());
// pick location close to none of the spots
results = coll1->search("*",
{}, "loc: [([48.910544830985785, 2.337218333651177], radius: 2 km)]",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(0, results["found"].get<size_t>());
// pick a large radius covering all points
results = coll1->search("*",
{}, "loc: ([48.910544830985785, 2.337218333651177], radius: 20 km)",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(10, results["found"].get<size_t>());
// 1 mile radius
results = coll1->search("*",
{}, "loc: ([48.85825332869331, 2.303816427653377], radius: 1 mi)",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(3, results["found"].get<size_t>());
ASSERT_EQ("6", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("5", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("3", results["hits"][2]["document"]["id"].get<std::string>());
// when geo query had NaN
auto gop = coll1->search("*", {}, "loc: ([NaN, nan], radius: 1 mi)",
{}, {}, {0}, 10, 1, FREQUENCY);
ASSERT_FALSE(gop.ok());
ASSERT_EQ("Value of filter field `loc`: must be in the "
"`([-44.50, 170.29], radius: 0.75 km, exact_filter_radius: 5 km)` or "
"([56.33, -65.97, 23.82, -127.82], exact_filter_radius: 7 km) format.", gop.error());
// when geo query does not send radius key
gop = coll1->search("*", {}, "loc: ([48.85825332869331, 2.303816427653377])",
{}, {}, {0}, 10, 1, FREQUENCY);
ASSERT_FALSE(gop.ok());
ASSERT_EQ("Value of filter field `loc`: must be in the "
"`([-44.50, 170.29], radius: 0.75 km, exact_filter_radius: 5 km)` or "
"([56.33, -65.97, 23.82, -127.82], exact_filter_radius: 7 km) format.", gop.error());
// when geo field is formatted as string, show meaningful error
nlohmann::json bad_doc;
bad_doc["id"] = "1000";
bad_doc["title"] = "Test record";
bad_doc["loc"] = {"48.91", "2.33"};
bad_doc["points"] = 1000;
auto add_op = coll1->add(bad_doc.dump(), CREATE, "", DIRTY_VALUES::REJECT);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `loc` must be a geopoint.", add_op.error());
bad_doc["loc"] = "foobar";
add_op = coll1->add(bad_doc.dump(), CREATE, "", DIRTY_VALUES::REJECT);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `loc` must be a 2 element array: [lat, lng].", add_op.error());
bad_doc["loc"] = "loc: (48.910544830985785, 2.337218333651177, 2k)";
add_op = coll1->add(bad_doc.dump(), CREATE, "", DIRTY_VALUES::REJECT);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `loc` must be a 2 element array: [lat, lng].", add_op.error());
bad_doc["loc"] = "loc: (48.910544830985785, 2.337218333651177, 2)";
add_op = coll1->add(bad_doc.dump(), CREATE, "", DIRTY_VALUES::REJECT);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `loc` must be a 2 element array: [lat, lng].", add_op.error());
bad_doc["loc"] = {"foo", "bar"};
add_op = coll1->add(bad_doc.dump(), CREATE, "", DIRTY_VALUES::COERCE_OR_REJECT);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `loc` must be a geopoint.", add_op.error());
bad_doc["loc"] = {"2.33", "bar"};
add_op = coll1->add(bad_doc.dump(), CREATE, "", DIRTY_VALUES::COERCE_OR_REJECT);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `loc` must be a geopoint.", add_op.error());
bad_doc["loc"] = {"foo", "2.33"};
add_op = coll1->add(bad_doc.dump(), CREATE, "", DIRTY_VALUES::COERCE_OR_REJECT);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `loc` must be a geopoint.", add_op.error());
// under coercion mode, it should work
bad_doc["loc"] = {"48.91", "2.33"};
add_op = coll1->add(bad_doc.dump(), CREATE, "", DIRTY_VALUES::COERCE_OR_REJECT);
ASSERT_TRUE(add_op.ok());
collectionManager.drop_collection("coll1");
}
TEST_F(GeoFilteringTest, GeoPointArrayFiltering) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("loc", field_types::GEOPOINT_ARRAY, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::vector<std::string>>> records = {
{ {"Alpha Inc", "Ennore", "13.22112, 80.30511"},
{"Alpha Inc", "Velachery", "12.98973, 80.23095"}
},
{
{"Veera Inc", "Thiruvallur", "13.12752, 79.90136"},
},
{
{"B1 Inc", "Bengaluru", "12.98246, 77.5847"},
{"B1 Inc", "Hosur", "12.74147, 77.82915"},
{"B1 Inc", "Vellore", "12.91866, 79.13075"},
},
{
{"M Inc", "Nashik", "20.11282, 73.79458"},
{"M Inc", "Pune", "18.56309, 73.855"},
}
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0][0];
doc["points"] = i;
std::vector<std::vector<double>> lat_lngs;
for(size_t k = 0; k < records[i].size(); k++) {
std::vector<std::string> lat_lng_str;
StringUtils::split(records[i][k][2], lat_lng_str, ", ");
std::vector<double> lat_lng = {
std::stod(lat_lng_str[0]),
std::stod(lat_lng_str[1])
};
lat_lngs.push_back(lat_lng);
}
doc["loc"] = lat_lngs;
auto add_op = coll1->add(doc.dump());
ASSERT_TRUE(add_op.ok());
}
// pick a location close to Chennai
auto results = coll1->search("*",
{}, "loc: ([13.12631, 80.20252], radius: 100km, exact_filter_radius: 100km)",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
// Default value of exact_filter_radius is 10km, exact filtering is not performed.
results = coll1->search("*",
{}, "loc: ([13.12631, 80.20252], radius: 100km,)",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(3, results["found"].get<size_t>());
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][2]["document"]["id"].get<std::string>());
// pick location close to none of the spots
results = coll1->search("*",
{}, "loc: ([13.62601, 79.39559], radius: 10 km)",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(0, results["found"].get<size_t>());
// pick a large radius covering all points
results = coll1->search("*",
{}, "loc: ([21.20714729927276, 78.99153966917213], radius: 1000 km)",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(4, results["found"].get<size_t>());
// 1 mile radius
results = coll1->search("*",
{}, "loc: ([12.98941, 80.23073], radius: 1mi)",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
// when geo field is formatted badly, show meaningful error
nlohmann::json bad_doc;
bad_doc["id"] = "1000";
bad_doc["title"] = "Test record";
bad_doc["loc"] = {"48.91", "2.33"};
bad_doc["points"] = 1000;
auto add_op = coll1->add(bad_doc.dump(), CREATE, "", DIRTY_VALUES::REJECT);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `loc` must contain 2 element arrays: [ [lat, lng],... ].", add_op.error());
bad_doc["loc"] = "foobar";
add_op = coll1->add(bad_doc.dump(), CREATE, "", DIRTY_VALUES::REJECT);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `loc` must be an array.", add_op.error());
bad_doc["loc"] = nlohmann::json::array();
nlohmann::json points = nlohmann::json::array();
points.push_back("foo");
points.push_back("bar");
bad_doc["loc"].push_back(points);
add_op = coll1->add(bad_doc.dump(), CREATE, "", DIRTY_VALUES::COERCE_OR_REJECT);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `loc` must be an array of geopoint.", add_op.error());
bad_doc["loc"][0][0] = "2.33";
bad_doc["loc"][0][1] = "bar";
add_op = coll1->add(bad_doc.dump(), CREATE, "", DIRTY_VALUES::COERCE_OR_REJECT);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `loc` must be an array of geopoint.", add_op.error());
bad_doc["loc"][0][0] = "foo";
bad_doc["loc"][0][1] = "2.33";
add_op = coll1->add(bad_doc.dump(), CREATE, "", DIRTY_VALUES::COERCE_OR_REJECT);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `loc` must be an array of geopoint.", add_op.error());
// under coercion mode, it should work
bad_doc["loc"][0][0] = "48.91";
bad_doc["loc"][0][1] = "2.33";
add_op = coll1->add(bad_doc.dump(), CREATE, "", DIRTY_VALUES::COERCE_OR_REJECT);
ASSERT_TRUE(add_op.ok());
collectionManager.drop_collection("coll1");
}
TEST_F(GeoFilteringTest, GeoPointRemoval) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("loc1", field_types::GEOPOINT, false),
field("loc2", field_types::GEOPOINT_ARRAY, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "Palais Garnier";
doc["loc1"] = {48.872576479306765, 2.332291112241466};
doc["loc2"] = nlohmann::json::array();
doc["loc2"][0] = {48.84620987789056, 2.345152755563131};
doc["points"] = 100;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("*",
{}, "loc1: ([48.87491151802846, 2.343945883701618], radius: 1 km)",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
results = coll1->search("*",
{}, "loc2: ([48.87491151802846, 2.343945883701618], radius: 10 km)",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
// remove the document, index another document and try querying again
coll1->remove("0");
doc["id"] = "1";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
results = coll1->search("*",
{}, "loc1: ([48.87491151802846, 2.343945883701618], radius: 1 km)",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
results = coll1->search("*",
{}, "loc2: ([48.87491151802846, 2.343945883701618], radius: 10 km)",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
}
TEST_F(GeoFilteringTest, GeoPolygonFiltering) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("loc", field_types::GEOPOINT, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"Palais Garnier", "48.872576479306765, 2.332291112241466"},
{"Sacre Coeur", "48.888286721920934, 2.342340862419206"},
{"Arc de Triomphe", "48.87538726829884, 2.296113163780903"},
{"Place de la Concorde", "48.86536119187326, 2.321850747347093"},
{"Louvre Musuem", "48.86065813197502, 2.3381285349616725"},
{"Les Invalides", "48.856648379569904, 2.3118555692631357"},
{"Eiffel Tower", "48.85821022164442, 2.294239067890161"},
{"Notre-Dame de Paris", "48.852455825574495, 2.35071182406452"},
{"Musee Grevin", "48.872370541246816, 2.3431536410008906"},
{"Pantheon", "48.84620987789056, 2.345152755563131"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
std::vector<std::string> lat_lng;
StringUtils::split(records[i][1], lat_lng, ", ");
double lat = std::stod(lat_lng[0]);
double lng = std::stod(lat_lng[1]);
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["loc"] = {lat, lng};
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
// pick a location close to only the Sacre Coeur
auto results = coll1->search("*",
{}, "loc: ([48.875223042424125,2.323509661928681, "
"48.85745408145392, 2.3267084486160856, "
"48.859636574404355,2.351469427048221, "
"48.87756059389807, 2.3443610121873206])",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(3, results["found"].get<size_t>());
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ("8", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("4", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][2]["document"]["id"].get<std::string>());
// should work even if points of polygon are clockwise
results = coll1->search("*",
{}, "loc: ([48.87756059389807, 2.3443610121873206, "
"48.859636574404355,2.351469427048221, "
"48.85745408145392, 2.3267084486160856, "
"48.875223042424125,2.323509661928681])",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(3, results["found"].get<size_t>());
ASSERT_EQ(3, results["hits"].size());
// when geo query had NaN
auto gop = coll1->search("*", {}, "loc: ([48.87756059389807, 2.3443610121873206, NaN, nan])",
{}, {}, {0}, 10, 1, FREQUENCY);
ASSERT_FALSE(gop.ok());
ASSERT_EQ("Value of filter field `loc`: must be in the "
"`([-44.50, 170.29], radius: 0.75 km, exact_filter_radius: 5 km)` or "
"([56.33, -65.97, 23.82, -127.82], exact_filter_radius: 7 km) format.", gop.error());
gop = coll1->search("*", {}, "loc: ([56.33, -65.97, 23.82, -127.82], exact_filter_radius: 7k)",
{}, {}, {0}, 10, 1, FREQUENCY);
ASSERT_FALSE(gop.ok());
ASSERT_EQ("Unit must be either `km` or `mi`.", gop.error());
auto search_op = coll1->search("*", {}, "loc: (10, 20, 11, 12, 14, 16, 10, 20, 11, 40)", {}, {}, {0}, 10, 1,
FREQUENCY);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("Polygon is invalid: Edge 2 has duplicate vertex with edge 4", search_op.error());
search_op = coll1->search("*", {}, "loc: (10, 20, 11, 12, 14, 16, 10, 20)", {}, {}, {0}, 10, 1,
FREQUENCY);
ASSERT_TRUE(search_op.ok());
ASSERT_EQ(0, search_op.get()["found"].get<size_t>());
search_op = coll1->search("*", {}, "loc: [([10, 20, 30, 40, 50, 30]), ([10, 20, 11, 12, 14, 16, 10, 20])]", {}, {},
{0}, 10, 1, FREQUENCY);
ASSERT_TRUE(search_op.ok());
ASSERT_EQ(0, search_op.get()["found"].get<size_t>());
collectionManager.drop_collection("coll1");
}
TEST_F(GeoFilteringTest, GeoPolygonFilteringSouthAmerica) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("loc", field_types::GEOPOINT, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"North of Equator", "4.48615, -71.38049"},
{"South of Equator", "-8.48587, -71.02892"},
{"North of Equator, outside polygon", "4.13377, -56.00459"},
{"South of Equator, outside polygon", "-4.5041, -57.34523"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
std::vector<std::string> lat_lng;
StringUtils::split(records[i][1], lat_lng, ", ");
double lat = std::stod(lat_lng[0]);
double lng = std::stod(lat_lng[1]);
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["loc"] = {lat, lng};
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
// polygon only covers 2 points but all points are returned since exact filtering is not performed.
auto results = coll1->search("*",
{}, "loc: ([13.3163, -82.3585, "
"-29.134, -82.3585, "
"-29.134, -59.8528, "
"13.3163, -59.8528])",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(4, results["found"].get<size_t>());
ASSERT_EQ(4, results["hits"].size());
results = coll1->search("*",
{}, "loc: ([13.3163, -82.3585, "
"-29.134, -82.3585, "
"-29.134, -59.8528, "
"13.3163, -59.8528], exact_filter_radius: 2703km)",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(GeoFilteringTest, GeoPointFilteringWithNonSortableLocationField) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("loc", field_types::GEOPOINT, false),
field("points", field_types::INT32, false),};
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string", "sort": false},
{"name": "loc", "type": "geopoint", "sort": true},
{"name": "points", "type": "int32", "sort": false}
]
})"_json;
auto coll_op = collectionManager.create_collection(schema);
ASSERT_TRUE(coll_op.ok());
Collection* coll1 = coll_op.get();
std::vector<std::vector<std::string>> records = {
{"Palais Garnier", "48.872576479306765, 2.332291112241466"},
{"Sacre Coeur", "48.888286721920934, 2.342340862419206"},
{"Arc de Triomphe", "48.87538726829884, 2.296113163780903"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
std::vector<std::string> lat_lng;
StringUtils::split(records[i][1], lat_lng, ", ");
double lat = std::stod(lat_lng[0]);
double lng = std::stod(lat_lng[1]);
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["loc"] = {lat, lng};
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
// pick a location close to only the Sacre Coeur
auto results = coll1->search("*",
{}, "loc: ([48.90615915923891, 2.3435897727061175], radius:3 km)",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
}
| 24,420
|
C++
|
.cpp
| 466
| 41.56867
| 119
| 0.537483
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,704
|
match_score_test.cpp
|
typesense_typesense/test/match_score_test.cpp
|
#include <chrono>
#include <gtest/gtest.h>
#include <match_score.h>
#include "posting_list.h"
#include <fstream>
#define token_offsets_file_path (std::string(ROOT_DIR) + std::string("external/token_offsets/file/token_offsets.txt")).c_str()
TEST(MatchTest, TokenOffsetsExceedWindowSize) {
std::vector<token_positions_t> token_positions = {
token_positions_t{false, std::vector<uint16_t>({1})},
token_positions_t{false, std::vector<uint16_t>({1})},
token_positions_t{false, std::vector<uint16_t>({1})},
token_positions_t{false, std::vector<uint16_t>({1})},
token_positions_t{false, std::vector<uint16_t>({1})},
token_positions_t{false, std::vector<uint16_t>({1})},
token_positions_t{false, std::vector<uint16_t>({1})},
token_positions_t{false, std::vector<uint16_t>({1})},
token_positions_t{false, std::vector<uint16_t>({1})},
token_positions_t{false, std::vector<uint16_t>({1})},
token_positions_t{false, std::vector<uint16_t>({1})},
token_positions_t{false, std::vector<uint16_t>({1})}
};
const Match & this_match = Match(100, token_positions);
ASSERT_EQ(WINDOW_SIZE, (size_t)this_match.words_present);
}
TEST(MatchTest, MatchScoreV2) {
std::vector<token_positions_t> token_offsets;
token_offsets.push_back(token_positions_t{false, {25}});
token_offsets.push_back(token_positions_t{false, {26}});
token_offsets.push_back(token_positions_t{false, {11, 18, 24, 60}});
token_offsets.push_back(token_positions_t{false, {14, 27, 63}});
auto match = Match(100, token_offsets, true);
ASSERT_EQ(4, match.words_present);
ASSERT_EQ(3, match.distance);
ASSERT_FALSE(posting_list_t::has_phrase_match(token_offsets));
std::vector<uint16_t> expected_offsets = {25, 26, 24, 27};
for(size_t i=0; i<token_offsets.size(); i++) {
ASSERT_EQ(expected_offsets[i], match.offsets[i].offset);
}
// without populate window
match = Match(100, token_offsets, false);
ASSERT_EQ(4, match.words_present);
ASSERT_EQ(3, match.distance);
ASSERT_EQ(0, match.offsets.size());
token_offsets.clear();
token_offsets.push_back(token_positions_t{false, {38, 50, 170, 187, 195, 222}});
token_offsets.push_back(token_positions_t{true, {39, 140, 171, 189, 223}});
token_offsets.push_back(token_positions_t{false, {169, 180}});
match = Match(100, token_offsets, true, true);
ASSERT_EQ(3, match.words_present);
ASSERT_EQ(2, match.distance);
ASSERT_EQ(0, match.exact_match);
ASSERT_FALSE(posting_list_t::has_phrase_match(token_offsets));
expected_offsets = {170, 171, 169};
for(size_t i=0; i<token_offsets.size(); i++) {
ASSERT_EQ(expected_offsets[i], match.offsets[i].offset);
}
token_offsets.clear();
token_offsets.push_back(token_positions_t{false, {38, 50, 187, 195, 201}});
token_offsets.push_back(token_positions_t{false, {120, 167, 171, 223}});
token_offsets.push_back(token_positions_t{true, {240, 250}});
match = Match(100, token_offsets, true);
ASSERT_EQ(1, match.words_present);
ASSERT_EQ(0, match.distance);
ASSERT_EQ(0, match.exact_match);
ASSERT_FALSE(posting_list_t::has_phrase_match(token_offsets));
expected_offsets = {38, MAX_DISPLACEMENT, MAX_DISPLACEMENT};
for(size_t i=0; i<token_offsets.size(); i++) {
ASSERT_EQ(expected_offsets[i], match.offsets[i].offset);
}
// without populate window
match = Match(100, token_offsets, false);
ASSERT_EQ(1, match.words_present);
ASSERT_EQ(0, match.distance);
ASSERT_EQ(0, match.offsets.size());
ASSERT_EQ(0, match.exact_match);
// exact match
token_offsets.clear();
token_offsets.push_back(token_positions_t{false, {0}});
token_offsets.push_back(token_positions_t{true, {2}});
token_offsets.push_back(token_positions_t{false, {1}});
match = Match(100, token_offsets, true, true);
ASSERT_EQ(3, match.words_present);
ASSERT_EQ(2, match.distance);
ASSERT_EQ(1, match.exact_match);
ASSERT_FALSE(posting_list_t::has_phrase_match(token_offsets));
match = Match(100, token_offsets, true, false);
ASSERT_EQ(3, match.words_present);
ASSERT_EQ(2, match.distance);
ASSERT_EQ(0, match.exact_match);
ASSERT_FALSE(posting_list_t::has_phrase_match(token_offsets));
token_offsets.clear();
token_offsets.push_back(token_positions_t{false, {1}});
token_offsets.push_back(token_positions_t{false, {2}});
token_offsets.push_back(token_positions_t{true, {3}});
match = Match(100, token_offsets, true, true);
ASSERT_EQ(0, match.exact_match);
ASSERT_TRUE(posting_list_t::has_phrase_match(token_offsets));
token_offsets.clear();
token_offsets.push_back(token_positions_t{false, {0}});
token_offsets.push_back(token_positions_t{false, {1}});
token_offsets.push_back(token_positions_t{false, {2}});
match = Match(100, token_offsets, true, true);
ASSERT_EQ(0, match.exact_match);
ASSERT_TRUE(posting_list_t::has_phrase_match(token_offsets));
token_offsets.clear();
token_offsets.push_back(token_positions_t{false, {74}});
token_offsets.push_back(token_positions_t{false, {75}});
token_offsets.push_back(token_positions_t{false, {3, 42}});
expected_offsets = {74, 75, MAX_DISPLACEMENT};
match = Match(100, token_offsets, true, true);
ASSERT_EQ(3, match.offsets.size());
for(size_t i = 0; i < match.offsets.size(); i++) {
ASSERT_EQ(expected_offsets[i], match.offsets[i].offset);
}
// check phrase match
token_offsets.clear();
token_offsets.push_back(token_positions_t{false, {38, 50, 187, 195, 201}});
token_offsets.push_back(token_positions_t{false, {120, 167, 171, 196}});
token_offsets.push_back(token_positions_t{true, {197, 250}});
match = Match(100, token_offsets);
ASSERT_TRUE(posting_list_t::has_phrase_match(token_offsets));
token_offsets.clear();
token_offsets.push_back(token_positions_t{false, {120, 167, 171, 196}});
token_offsets.push_back(token_positions_t{false, {38, 50, 187, 195, 201}});
token_offsets.push_back(token_positions_t{true, {197, 250}});
match = Match(100, token_offsets);
ASSERT_FALSE(posting_list_t::has_phrase_match(token_offsets));
/*size_t total_distance = 0, words_present = 0, offset_sum = 0;
auto begin = std::chrono::high_resolution_clock::now();
for(size_t i = 0; i < 1; i++) {
auto match = Match(100, token_offsets, true);
total_distance += match.distance;
words_present += match.words_present;
offset_sum += match.offsets.size();
}
uint64_t timeNanos = std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::high_resolution_clock::now() - begin).count();
LOG(INFO) << "Time taken: " << timeNanos;
LOG(INFO) << total_distance << ", " << words_present << ", " << offset_sum;*/
}
TEST(MatchTest, MatchScoreWithOffsetWrapAround) {
std::vector<token_positions_t> token_offsets;
std::ifstream infile(token_offsets_file_path);
std::string line;
while (std::getline(infile, line)) {
if(line == "last_token:") {
std::vector<uint16_t> positions;
token_offsets.push_back(token_positions_t{false, positions});
} else {
token_offsets.back().positions.push_back(std::stoi(line));
}
}
infile.close();
ASSERT_FALSE(posting_list_t::has_phrase_match(token_offsets));
auto match = Match(100, token_offsets, true, true);
ASSERT_EQ(2, match.words_present);
ASSERT_EQ(2, match.distance);
ASSERT_EQ(2, match.offsets.size());
ASSERT_EQ(4062, match.offsets[0].offset);
ASSERT_EQ(4060, match.offsets[1].offset);
}
| 7,783
|
C++
|
.cpp
| 161
| 42.63354
| 126
| 0.664293
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,705
|
core_api_utils_test.cpp
|
typesense_typesense/test/core_api_utils_test.cpp
|
#include <gtest/gtest.h>
#include "collection.h"
#include <vector>
#include <collection_manager.h>
#include <core_api.h>
#include <analytics_manager.h>
#include "core_api_utils.h"
#include "raft_server.h"
#include "conversation_model_manager.h"
#include "conversation_manager.h"
class CoreAPIUtilsTest : public ::testing::Test {
protected:
Store *store;
CollectionManager & collectionManager = CollectionManager::get_instance();
std::atomic<bool> quit = false;
std::vector<std::string> query_fields;
std::vector<sort_by> sort_fields;
void setupCollection() {
std::string state_dir_path = "/tmp/typesense_test/core_api_utils";
LOG(INFO) << "Truncating and creating: " << state_dir_path;
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
store = new Store(state_dir_path);
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
ConversationModelManager::init(store);
nlohmann::json schema_json = R"({
"name": "conversation_store",
"fields": [
{
"name": "conversation_id",
"type": "string"
},
{
"name": "role",
"type": "string",
"index": false
},
{
"name": "message",
"type": "string",
"index": false
},
{
"name": "timestamp",
"type": "int32",
"sort": true
}
]
})"_json;
collectionManager.create_collection(schema_json);
}
virtual void SetUp() {
setupCollection();
}
virtual void TearDown() {
collectionManager.dispose();
delete store;
}
};
TEST_F(CoreAPIUtilsTest, StatefulRemoveDocs) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 2, fields, "points").get();
}
for(size_t i=0; i<100; i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = "Title " + std::to_string(i);
doc["points"] = i;
coll1->add(doc.dump());
}
bool done;
deletion_state_t deletion_state;
deletion_state.collection = coll1;
deletion_state.num_removed = 0;
// single document match
filter_result_t filter_results;
coll1->get_filter_ids("points: 99", filter_results);
deletion_state.index_ids.emplace_back(filter_results.count, filter_results.docs);
filter_results.docs = nullptr;
for(size_t i=0; i<deletion_state.index_ids.size(); i++) {
deletion_state.offsets.push_back(0);
}
stateful_remove_docs(&deletion_state, 5, done);
ASSERT_EQ(1, deletion_state.num_removed);
ASSERT_TRUE(done);
// match 12 documents (multiple batches)
for(auto& kv: deletion_state.index_ids) {
delete [] kv.second;
}
deletion_state.index_ids.clear();
deletion_state.offsets.clear();
deletion_state.num_removed = 0;
coll1->get_filter_ids("points:< 11", filter_results);
deletion_state.index_ids.emplace_back(filter_results.count, filter_results.docs);
filter_results.docs = nullptr;
for(size_t i=0; i<deletion_state.index_ids.size(); i++) {
deletion_state.offsets.push_back(0);
}
stateful_remove_docs(&deletion_state, 4, done);
ASSERT_EQ(4, deletion_state.num_removed);
ASSERT_FALSE(done);
stateful_remove_docs(&deletion_state, 4, done);
ASSERT_EQ(8, deletion_state.num_removed);
ASSERT_FALSE(done);
stateful_remove_docs(&deletion_state, 4, done);
ASSERT_EQ(11, deletion_state.num_removed);
ASSERT_TRUE(done);
// match 9 documents (multiple batches)
for(auto& kv: deletion_state.index_ids) {
delete [] kv.second;
}
deletion_state.index_ids.clear();
deletion_state.offsets.clear();
deletion_state.num_removed = 0;
coll1->get_filter_ids("points:< 20", filter_results);
deletion_state.index_ids.emplace_back(filter_results.count, filter_results.docs);
filter_results.docs = nullptr;
for(size_t i=0; i<deletion_state.index_ids.size(); i++) {
deletion_state.offsets.push_back(0);
}
stateful_remove_docs(&deletion_state, 7, done);
ASSERT_EQ(7, deletion_state.num_removed);
ASSERT_FALSE(done);
stateful_remove_docs(&deletion_state, 7, done);
ASSERT_EQ(9, deletion_state.num_removed);
ASSERT_TRUE(done);
// fetch raw document IDs
for(size_t i=0; i<100; i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = "Title " + std::to_string(i);
doc["points"] = i;
coll1->add(doc.dump());
}
for(auto& kv: deletion_state.index_ids) {
delete [] kv.second;
}
deletion_state.index_ids.clear();
deletion_state.offsets.clear();
deletion_state.num_removed = 0;
coll1->get_filter_ids("id:[0, 1, 2]", filter_results);
deletion_state.index_ids.emplace_back(filter_results.count, filter_results.docs);
filter_results.docs = nullptr;
for(size_t i=0; i<deletion_state.index_ids.size(); i++) {
deletion_state.offsets.push_back(0);
}
stateful_remove_docs(&deletion_state, 5, done);
ASSERT_EQ(3, deletion_state.num_removed);
ASSERT_TRUE(done);
// delete single doc
for(auto& kv: deletion_state.index_ids) {
delete [] kv.second;
}
deletion_state.index_ids.clear();
deletion_state.offsets.clear();
deletion_state.num_removed = 0;
coll1->get_filter_ids("id :10", filter_results);
deletion_state.index_ids.emplace_back(filter_results.count, filter_results.docs);
filter_results.docs = nullptr;
for(size_t i=0; i<deletion_state.index_ids.size(); i++) {
deletion_state.offsets.push_back(0);
}
stateful_remove_docs(&deletion_state, 5, done);
ASSERT_EQ(1, deletion_state.num_removed);
ASSERT_TRUE(done);
for(auto& kv: deletion_state.index_ids) {
delete [] kv.second;
}
deletion_state.index_ids.clear();
deletion_state.offsets.clear();
deletion_state.num_removed = 0;
// bad filter query
auto op = coll1->get_filter_ids("bad filter", filter_results);
ASSERT_FALSE(op.ok());
ASSERT_STREQ("Could not parse the filter query.", op.error().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CoreAPIUtilsTest, MultiSearchEmbeddedKeys) {
std::shared_ptr<http_req> req = std::make_shared<http_req>();
std::shared_ptr<http_res> res = std::make_shared<http_res>(nullptr);
req->params["filter_by"] = "user_id: 100";
nlohmann::json body;
body["searches"] = nlohmann::json::array();
nlohmann::json search;
search["collection"] = "users";
search["filter_by"] = "age: > 100";
body["searches"].push_back(search);
req->body = body.dump();
nlohmann::json embedded_params;
embedded_params["filter_by"] = "foo: bar";
req->embedded_params_vec.push_back(embedded_params);
post_multi_search(req, res);
// ensure that req params are appended to (embedded params are also rolled into req params)
ASSERT_EQ("((user_id: 100) && (age: > 100)) && (foo: bar)", req->params["filter_by"]);
// when empty filter_by is present in req params, don't add ()
req->params["filter_by"] = "";
post_multi_search(req, res);
ASSERT_EQ("((age: > 100)) && (foo: bar)", req->params["filter_by"]);
// when empty filter_by in collection search params, don't add ()
req->params["filter_by"] = "user_id: 100";
search["filter_by"] = "";
body["searches"].clear();
body["searches"].push_back(search);
req->body = body.dump();
post_multi_search(req, res);
ASSERT_EQ("((user_id: 100)) && (foo: bar)", req->params["filter_by"]);
// when both are empty, don't add ()
req->params["filter_by"] = "";
search["filter_by"] = "";
body["searches"].clear();
body["searches"].push_back(search);
req->body = body.dump();
post_multi_search(req, res);
ASSERT_EQ("(foo: bar)", req->params["filter_by"]);
// try setting max search limit
req->embedded_params_vec[0]["limit_multi_searches"] = 0;
ASSERT_FALSE(post_multi_search(req, res));
ASSERT_EQ("{\"message\": \"Number of multi searches exceeds `limit_multi_searches` parameter.\"}", res->body);
req->embedded_params_vec[0]["limit_multi_searches"] = 1;
ASSERT_TRUE(post_multi_search(req, res));
// req params must be overridden by embedded param
req->embedded_params_vec[0]["limit_multi_searches"] = 0;
req->params["limit_multi_searches"] = "100";
ASSERT_FALSE(post_multi_search(req, res));
ASSERT_EQ("{\"message\": \"Number of multi searches exceeds `limit_multi_searches` parameter.\"}", res->body);
// use req params if embedded param not present
req->embedded_params_vec[0].erase("limit_multi_searches");
ASSERT_TRUE(post_multi_search(req, res));
}
TEST_F(CoreAPIUtilsTest, SearchEmbeddedPresetKey) {
nlohmann::json preset_value = R"(
{"per_page": 100}
)"_json;
Option<bool> success_op = collectionManager.upsert_preset("apple", preset_value);
ASSERT_TRUE(success_op.ok());
std::shared_ptr<http_req> req = std::make_shared<http_req>();
std::shared_ptr<http_res> res = std::make_shared<http_res>(nullptr);
nlohmann::json embedded_params;
embedded_params["preset"] = "apple";
req->embedded_params_vec.push_back(embedded_params);
req->params["collection"] = "foo";
get_search(req, res);
ASSERT_EQ("100", req->params["per_page"]);
// with multi search
req->params.clear();
nlohmann::json body;
body["searches"] = nlohmann::json::array();
nlohmann::json search;
search["collection"] = "users";
search["filter_by"] = "age: > 100";
body["searches"].push_back(search);
req->body = body.dump();
post_multi_search(req, res);
ASSERT_EQ("100", req->params["per_page"]);
}
TEST_F(CoreAPIUtilsTest, ExtractCollectionsFromRequestBody) {
std::map<std::string, std::string> req_params;
std::string body = R"(
{
"name": "coll1",
"fields": [
{"name": "title", "type": "string" },
{"name": "points", "type": "int32" }
],
"default_sorting_field": "points"
}
)";
route_path rpath("POST", {"collections"}, post_create_collection, false, false);
std::vector<collection_key_t> collections;
std::vector<nlohmann::json> embedded_params_vec;
get_collections_for_auth(req_params, body, rpath, "foo", collections, embedded_params_vec);
ASSERT_EQ(1, collections.size());
ASSERT_EQ("coll1", collections[0].collection);
ASSERT_EQ("foo", collections[0].api_key);
// badly constructed collection schema body
collections.clear();
embedded_params_vec.clear();
body = R"(
{
"name": "coll1
"fields": [
{"name": "title", "type": "string" },
{"name": "points", "type": "int32" }
],
"default_sorting_field": "points"
}
)";
get_collections_for_auth(req_params, body, rpath, "foo", collections, embedded_params_vec);
ASSERT_EQ(1, collections.size());
ASSERT_EQ("", collections[0].collection);
ASSERT_EQ("foo", collections[0].api_key);
ASSERT_EQ(1, embedded_params_vec.size());
collections.clear();
embedded_params_vec.clear();
// missing collection name
body = R"(
{
"fields": [
{"name": "title", "type": "string" },
{"name": "points", "type": "int32" }
],
"default_sorting_field": "points"
}
)";
get_collections_for_auth(req_params, body, rpath, "foo", collections, embedded_params_vec);
ASSERT_EQ(1, collections.size());
ASSERT_EQ("", collections[0].collection);
ASSERT_EQ("foo", collections[0].api_key);
// check for multi_search
collections.clear();
embedded_params_vec.clear();
rpath = route_path("POST", {"collections"}, post_multi_search, false, false);
body = R"(
{"searches":[
{
"query_by": "concat",
"collection": "products",
"q": "battery",
"x-typesense-api-key": "bar"
}
]
}
)";
get_collections_for_auth(req_params, body, rpath, "foo", collections, embedded_params_vec);
ASSERT_EQ(1, collections.size());
ASSERT_EQ("products", collections[0].collection);
ASSERT_EQ("bar", collections[0].api_key);
// when api key type is bad
collections.clear();
embedded_params_vec.clear();
rpath = route_path("POST", {"collections"}, post_multi_search, false, false);
body = R"(
{"searches":[
{
"query_by": "concat",
"collection": "products",
"q": "battery",
"x-typesense-api-key": 123
}
]
}
)";
get_collections_for_auth(req_params, body, rpath, "foo", collections, embedded_params_vec);
ASSERT_EQ("foo", collections[0].api_key);
// when collection name is bad
collections.clear();
embedded_params_vec.clear();
rpath = route_path("POST", {"collections"}, post_multi_search, false, false);
body = R"(
{"searches":[
{
"query_by": "concat",
"collection": 123,
"q": "battery"
}
]
}
)";
get_collections_for_auth(req_params, body, rpath, "foo", collections, embedded_params_vec);
ASSERT_EQ("", collections[0].collection);
// get collection for multi-search
collections.clear();
embedded_params_vec.clear();
body = R"(
{"searches":
{
"query_by": "concat",
"collection": "products",
"q": "battery"
}
]
}
)";
get_collections_for_auth(req_params, body, rpath, "foo", collections, embedded_params_vec);
ASSERT_EQ(1, collections.size());
ASSERT_EQ("", collections[0].collection);
ASSERT_EQ("foo", collections[0].api_key);
collections.clear();
embedded_params_vec.clear();
body = R"(
{"searches":[
{
"query_by": "concat",
"q": "battery",
"x-typesense-api-key": "bar"
}
]
}
)";
get_collections_for_auth(req_params, body, rpath, "foo", collections, embedded_params_vec);
ASSERT_EQ(1, collections.size());
ASSERT_EQ("", collections[0].collection);
ASSERT_EQ("bar", collections[0].api_key);
}
TEST_F(CoreAPIUtilsTest, ExtractCollectionsFromRequestBodyExtended) {
route_path rpath_multi_search = route_path("POST", {"multi_search"}, post_multi_search, false, false);
std::map<std::string, std::string> req_params;
std::vector<collection_key_t> collections;
std::vector<nlohmann::json> embedded_params_vec;
get_collections_for_auth(req_params, "{]", rpath_multi_search, "", collections, embedded_params_vec);
ASSERT_EQ(1, collections.size());
ASSERT_EQ("", collections[0].collection);
ASSERT_EQ(1, embedded_params_vec.size());
nlohmann::json sample_search_body;
sample_search_body["searches"] = nlohmann::json::array();
nlohmann::json search_query;
search_query["q"] = "aaa";
search_query["collection"] = "company1";
sample_search_body["searches"].push_back(search_query);
search_query["collection"] = "company2";
sample_search_body["searches"].push_back(search_query);
collections.clear();
embedded_params_vec.clear();
get_collections_for_auth(req_params, sample_search_body.dump(), rpath_multi_search, "", collections, embedded_params_vec);
ASSERT_EQ(2, collections.size());
ASSERT_EQ("company1", collections[0].collection);
ASSERT_EQ("company2", collections[1].collection);
collections.clear();
req_params["collection"] = "foo";
get_collections_for_auth(req_params, sample_search_body.dump(), rpath_multi_search, "", collections, embedded_params_vec);
ASSERT_EQ(2, collections.size());
ASSERT_EQ("company1", collections[0].collection);
ASSERT_EQ("company2", collections[1].collection);
collections.clear();
embedded_params_vec.clear();
// when one of the search arrays don't have an explicit collection, use the collection name from req param
sample_search_body["searches"][1].erase("collection");
get_collections_for_auth(req_params, sample_search_body.dump(), rpath_multi_search, "", collections, embedded_params_vec);
ASSERT_EQ(2, collections.size());
ASSERT_EQ("company1", collections[0].collection);
ASSERT_EQ("foo", collections[1].collection);
collections.clear();
embedded_params_vec.clear();
req_params.clear();
route_path rpath_search = route_path("GET", {"collections", ":collection", "documents", "search"}, get_search, false, false);
get_collections_for_auth(req_params, sample_search_body.dump(), rpath_search, "", collections, embedded_params_vec);
ASSERT_EQ(1, collections.size());
ASSERT_EQ("", collections[0].collection);
ASSERT_EQ(1, embedded_params_vec.size());
collections.clear();
embedded_params_vec.clear();
req_params.clear();
req_params["collection"] = "foo";
get_collections_for_auth(req_params, sample_search_body.dump(), rpath_search, "", collections, embedded_params_vec);
ASSERT_EQ(1, collections.size());
ASSERT_EQ("foo", collections[0].collection);
ASSERT_EQ(1, embedded_params_vec.size());
}
TEST_F(CoreAPIUtilsTest, MultiSearchWithPresetShouldUsePresetForAuth) {
nlohmann::json preset_value = R"(
{"searches":[
{"collection":"foo","q":"apple", "query_by": "title"},
{"collection":"bar","q":"apple", "query_by": "title"}
]}
)"_json;
Option<bool> success_op = collectionManager.upsert_preset("apple", preset_value);
route_path rpath_multi_search = route_path("POST", {"multi_search"}, post_multi_search, false, false);
std::map<std::string, std::string> req_params;
std::vector<collection_key_t> collections;
std::vector<nlohmann::json> embedded_params_vec;
std::string search_body = R"(
{"searches":[
{"collection":"foo1","q":"apple", "query_by": "title"},
{"collection":"bar1","q":"apple", "query_by": "title"}
]}
)";
// without preset parameter, use collections from request body
get_collections_for_auth(req_params, search_body, rpath_multi_search, "", collections, embedded_params_vec);
ASSERT_EQ(2, collections.size());
ASSERT_EQ("foo1", collections[0].collection);
ASSERT_EQ("bar1", collections[1].collection);
ASSERT_EQ(2, embedded_params_vec.size());
// with preset parameter, use collections from preset configuration
collections.clear();
embedded_params_vec.clear();
req_params["preset"] = "apple";
get_collections_for_auth(req_params, search_body, rpath_multi_search, "", collections, embedded_params_vec);
ASSERT_EQ(2, collections.size());
ASSERT_EQ("foo", collections[0].collection);
ASSERT_EQ("bar", collections[1].collection);
ASSERT_EQ(2, embedded_params_vec.size());
// try using multi_search preset within individual search param
preset_value = R"(
{"collection":"preset_coll"}
)"_json;
collectionManager.upsert_preset("single_preset", preset_value);
req_params.clear();
collections.clear();
embedded_params_vec.clear();
search_body = R"(
{"searches":[
{"collection":"foo1","q":"apple", "query_by": "title", "preset": "single_preset"},
{"collection":"bar1","q":"apple", "query_by": "title", "preset": "single_preset"}
]}
)";
get_collections_for_auth(req_params, search_body, rpath_multi_search, "", collections, embedded_params_vec);
ASSERT_EQ(2, collections.size());
ASSERT_EQ("foo1", collections[0].collection);
ASSERT_EQ("bar1", collections[1].collection);
ASSERT_EQ(2, embedded_params_vec.size());
// without collection in search array
req_params.clear();
collections.clear();
embedded_params_vec.clear();
search_body = R"(
{"searches":[
{"q":"apple", "query_by": "title", "preset": "single_preset"},
{"q":"apple", "query_by": "title", "preset": "single_preset"}
]}
)";
get_collections_for_auth(req_params, search_body, rpath_multi_search, "", collections, embedded_params_vec);
ASSERT_EQ(2, collections.size());
ASSERT_EQ("preset_coll", collections[0].collection);
ASSERT_EQ("preset_coll", collections[1].collection);
ASSERT_EQ(2, embedded_params_vec.size());
}
TEST_F(CoreAPIUtilsTest, PresetMultiSearch) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "name", "type": "string" },
{"name": "points", "type": "int32" }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto preset_value = R"(
{"collection":"preset_coll", "per_page": "12"}
)"_json;
collectionManager.upsert_preset("single_preset", preset_value);
std::shared_ptr<http_req> req = std::make_shared<http_req>();
std::shared_ptr<http_res> res = std::make_shared<http_res>(nullptr);
req->params["collection"] = "coll1";
auto search_body = R"(
{"searches":[
{"collection":"coll1","q":"apple", "query_by": "name", "preset": "single_preset"}
]}
)";
req->body = search_body;
nlohmann::json embedded_params;
req->embedded_params_vec.push_back(embedded_params);
post_multi_search(req, res);
auto res_json = nlohmann::json::parse(res->body);
ASSERT_EQ(1, res_json["results"].size());
ASSERT_EQ(0, res_json["results"][0]["found"].get<size_t>());
// with multiple "searches" preset configuration
preset_value = R"(
{"searches":[
{"collection":"coll1", "q": "*", "per_page": "8"},
{"collection":"coll1", "q": "*", "per_page": "11"}
]}
)"_json;
collectionManager.upsert_preset("multi_preset", preset_value);
embedded_params.clear();
req->params.clear();
req->params["preset"] = "multi_preset";
req->embedded_params_vec.clear();
req->embedded_params_vec.push_back(embedded_params);
req->embedded_params_vec.push_back(embedded_params);
// "preset": "multi_preset"
search_body = R"(
{"searches":[
{"collection":"coll1","q":"apple", "query_by": "title"}
]}
)";
req->body = search_body;
post_multi_search(req, res);
res_json = nlohmann::json::parse(res->body);
ASSERT_EQ(2, res_json["results"].size());
ASSERT_EQ(0, res_json["results"][0]["found"].get<size_t>());
ASSERT_EQ(0, res_json["results"][1]["found"].get<size_t>());
collectionManager.drop_collection("coll1");
}
TEST_F(CoreAPIUtilsTest, SearchPagination) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "name", "type": "string" },
{"name": "points", "type": "int32" }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
for(size_t i = 0; i < 20; i++) {
nlohmann::json doc;
doc["name"] = "Title " + std::to_string(i);
doc["points"] = i;
coll1->add(doc.dump(), CREATE);
}
std::shared_ptr<http_req> req = std::make_shared<http_req>();
std::shared_ptr<http_res> res = std::make_shared<http_res>(nullptr);
nlohmann::json body;
// without any pagination params, default is top 10 records by sort order
body["searches"] = nlohmann::json::array();
nlohmann::json search;
search["collection"] = "coll1";
search["q"] = "title";
search["query_by"] = "name";
search["sort_by"] = "points:desc";
body["searches"].push_back(search);
req->body = body.dump();
nlohmann::json embedded_params;
req->embedded_params_vec.push_back(embedded_params);
post_multi_search(req, res);
nlohmann::json results = nlohmann::json::parse(res->body)["results"][0];
ASSERT_EQ(10, results["hits"].size());
ASSERT_EQ(19, results["hits"][0]["document"]["points"].get<size_t>());
ASSERT_EQ(1, results["page"].get<size_t>());
// when offset is used we should expect the same but "offset" should be returned in response
search.clear();
req->params.clear();
body["searches"] = nlohmann::json::array();
search["collection"] = "coll1";
search["q"] = "title";
search["offset"] = "1";
search["query_by"] = "name";
search["sort_by"] = "points:desc";
body["searches"].push_back(search);
req->body = body.dump();
post_multi_search(req, res);
results = nlohmann::json::parse(res->body)["results"][0];
ASSERT_EQ(10, results["hits"].size());
ASSERT_EQ(18, results["hits"][0]["document"]["points"].get<size_t>());
ASSERT_EQ(1, results["offset"].get<size_t>());
// use limit to restrict page size
search.clear();
req->params.clear();
body["searches"] = nlohmann::json::array();
search["collection"] = "coll1";
search["q"] = "title";
search["offset"] = "1";
search["limit"] = "5";
search["query_by"] = "name";
search["sort_by"] = "points:desc";
body["searches"].push_back(search);
req->body = body.dump();
post_multi_search(req, res);
results = nlohmann::json::parse(res->body)["results"][0];
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(18, results["hits"][0]["document"]["points"].get<size_t>());
ASSERT_EQ(1, results["offset"].get<size_t>());
// when page is -1
search.clear();
req->params.clear();
body["searches"] = nlohmann::json::array();
search["collection"] = "coll1";
search["q"] = "title";
search["page"] = "-1";
search["limit"] = "5";
search["query_by"] = "name";
search["sort_by"] = "points:desc";
body["searches"].push_back(search);
req->body = body.dump();
post_multi_search(req, res);
results = nlohmann::json::parse(res->body)["results"][0];
ASSERT_EQ(400, results["code"].get<size_t>());
ASSERT_EQ("Parameter `page` must be an unsigned integer.", results["error"].get<std::string>());
// when offset is -1
search.clear();
req->params.clear();
body["searches"] = nlohmann::json::array();
search["collection"] = "coll1";
search["q"] = "title";
search["offset"] = "-1";
search["query_by"] = "name";
search["sort_by"] = "points:desc";
body["searches"].push_back(search);
req->body = body.dump();
post_multi_search(req, res);
results = nlohmann::json::parse(res->body)["results"][0];
ASSERT_EQ(400, results["code"].get<size_t>());
ASSERT_EQ("Parameter `offset` must be an unsigned integer.", results["error"].get<std::string>());
// when page is 0 and offset is NOT sent, we will treat as page=1
search.clear();
req->params.clear();
body["searches"] = nlohmann::json::array();
search["collection"] = "coll1";
search["q"] = "title";
search["page"] = "0";
search["query_by"] = "name";
search["sort_by"] = "points:desc";
body["searches"].push_back(search);
req->body = body.dump();
post_multi_search(req, res);
results = nlohmann::json::parse(res->body)["results"][0];
ASSERT_EQ(10, results["hits"].size());
ASSERT_EQ(1, results["page"].get<size_t>());
ASSERT_EQ(0, results.count("offset"));
// when both page and offset are sent, use page
search.clear();
req->params.clear();
body["searches"] = nlohmann::json::array();
search["collection"] = "coll1";
search["q"] = "title";
search["page"] = "2";
search["offset"] = "30";
search["query_by"] = "name";
search["sort_by"] = "points:desc";
body["searches"].push_back(search);
req->body = body.dump();
post_multi_search(req, res);
results = nlohmann::json::parse(res->body)["results"][0];
ASSERT_EQ(10, results["hits"].size());
ASSERT_EQ(2, results["page"].get<size_t>());
ASSERT_EQ(0, results.count("offset"));
}
TEST_F(CoreAPIUtilsTest, ExportWithFilter) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 2, fields, "points").get();
}
for(size_t i=0; i<4; i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = "Title " + std::to_string(i);
doc["points"] = i;
coll1->add(doc.dump());
}
bool done;
std::string res_body;
export_state_t export_state;
filter_result_t filter_result;
coll1->get_filter_ids("points:>=0", export_state.filter_result);
export_state.collection = coll1;
export_state.res_body = &res_body;
stateful_export_docs(&export_state, 2, done);
ASSERT_FALSE(done);
ASSERT_EQ('\n', export_state.res_body->back());
// should not have trailing newline character for the last line
stateful_export_docs(&export_state, 2, done);
ASSERT_TRUE(done);
ASSERT_EQ('}', export_state.res_body->back());
}
TEST_F(CoreAPIUtilsTest, ExportWithJoin) {
auto schema_json =
R"({
"name": "Products",
"fields": [
{"name": "product_id", "type": "string"},
{"name": "product_name", "type": "string"},
{"name": "product_description", "type": "string"},
{"name": "rating", "type": "int32"}
]
})"_json;
std::vector<nlohmann::json> documents = {
R"({
"product_id": "product_a",
"product_name": "shampoo",
"product_description": "Our new moisturizing shampoo is perfect for those with dry or damaged hair.",
"rating": "2"
})"_json,
R"({
"product_id": "product_b",
"product_name": "soap",
"product_description": "Introducing our all-natural, organic soap bar made with essential oils and botanical ingredients.",
"rating": "4"
})"_json
};
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Customers",
"fields": [
{"name": "customer_id", "type": "string"},
{"name": "customer_name", "type": "string"},
{"name": "product_price", "type": "float"},
{"name": "product_id", "type": "string", "reference": "Products.product_id"}
]
})"_json;
documents = {
R"({
"customer_id": "customer_a",
"customer_name": "Joe",
"product_price": 143,
"product_id": "product_a"
})"_json,
R"({
"customer_id": "customer_a",
"customer_name": "Joe",
"product_price": 73.5,
"product_id": "product_b"
})"_json,
R"({
"customer_id": "customer_b",
"customer_name": "Dan",
"product_price": 75,
"product_id": "product_a"
})"_json,
R"({
"customer_id": "customer_b",
"customer_name": "Dan",
"product_price": 140,
"product_id": "product_b"
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Dummy",
"fields": [
{"name": "dummy_id", "type": "string"}
]
})"_json;
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
bool done;
std::string res_body;
export_state_t export_state;
auto coll1 = collectionManager.get_collection_unsafe("Products");
coll1->get_filter_ids("$Customers(customer_id:customer_a)", export_state.filter_result);
export_state.collection = coll1;
export_state.res_body = &res_body;
export_state.include_fields.insert("product_name");
export_state.ref_include_exclude_fields_vec.emplace_back(ref_include_exclude_fields{"Customers", {"product_price"}, "",
"", ref_include::nest});
stateful_export_docs(&export_state, 1, done);
ASSERT_FALSE(done);
ASSERT_EQ('\n', export_state.res_body->back());
auto doc = nlohmann::json::parse(export_state.res_body->c_str());
ASSERT_EQ("shampoo", doc["product_name"]);
ASSERT_EQ(143, doc["Customers"]["product_price"]);
// should not have trailing newline character for the last line
stateful_export_docs(&export_state, 1, done);
ASSERT_TRUE(done);
ASSERT_EQ('}', export_state.res_body->back());
doc = nlohmann::json::parse(export_state.res_body->c_str());
ASSERT_EQ("soap", doc["product_name"]);
ASSERT_EQ(73.5, doc["Customers"]["product_price"]);
std::shared_ptr<http_req> req = std::make_shared<http_req>();
std::shared_ptr<http_res> res = std::make_shared<http_res>(nullptr);
req->params["collection"] = "Products";
req->params["q"] = "*";
req->params["filter_by"] = "$Customers(customer_id: customer_a)";
get_export_documents(req, res);
std::vector<std::string> res_strs;
StringUtils::split(res->body, res_strs, "\n");
doc = nlohmann::json::parse(res_strs[0]);
ASSERT_EQ(6, doc.size());
ASSERT_EQ(1, doc.count("product_name"));
ASSERT_EQ("shampoo", doc["product_name"]);
ASSERT_EQ(1, doc.count("Customers"));
ASSERT_EQ(5, doc["Customers"].size());
ASSERT_EQ(1, doc["Customers"].count("product_price"));
ASSERT_EQ(143, doc["Customers"]["product_price"]);
doc = nlohmann::json::parse(res_strs[1]);
ASSERT_EQ(6, doc.size());
ASSERT_EQ(1, doc.count("product_name"));
ASSERT_EQ("soap", doc["product_name"]);
ASSERT_EQ(1, doc.count("Customers"));
ASSERT_EQ(5, doc["Customers"].size());
ASSERT_EQ(1, doc["Customers"].count("product_price"));
ASSERT_EQ(73.5, doc["Customers"]["product_price"]);
delete dynamic_cast<export_state_t*>(req->data);
req->data = nullptr;
res->body.clear();
req->params["filter_by"] = "rating: >2 && (id:* || $Customers(id:*))";
req->params["include_fields"] = "$Customers(*,strategy:nest_array) as Customers";
get_export_documents(req, res);
res_strs.clear();
StringUtils::split(res->body, res_strs, "\n");
doc = nlohmann::json::parse(res_strs[0]);
ASSERT_EQ(6, doc.size());
ASSERT_EQ(1, doc.count("product_name"));
ASSERT_EQ("soap", doc["product_name"]);
ASSERT_EQ(1, doc.count("Customers"));
ASSERT_EQ(2, doc["Customers"].size());
ASSERT_EQ(5, doc["Customers"][0].size());
ASSERT_EQ("customer_a", doc["Customers"][0]["customer_id"]);
ASSERT_EQ(73.5, doc["Customers"][0]["product_price"]);
ASSERT_EQ(5, doc["Customers"][1].size());
ASSERT_EQ("customer_b", doc["Customers"][1]["customer_id"]);
ASSERT_EQ(140, doc["Customers"][1]["product_price"]);
}
TEST_F(CoreAPIUtilsTest, TestParseAPIKeyIPFromMetadata) {
// format <length of api key>:<api key><ip address>
std::string valid_metadata = "4:abcd127.0.0.1";
std::string invalid_ip = "4:abcd127.0.0.1:1234";
std::string invalid_api_key = "3:abcd127.0.0.1";
std::string no_length = "abcd127.0.0.1";
std::string no_colon = "4abcd127.0.0.1";
std::string no_ip = "4:abcd";
std::string only_length = "4:";
std::string only_colon = ":";
std::string only_ip = "127.0.0.1";
Option<std::pair<std::string, std::string>> res = get_api_key_and_ip(valid_metadata);
EXPECT_TRUE(res.ok());
EXPECT_EQ("abcd", res.get().first);
EXPECT_EQ("127.0.0.1", res.get().second);
res = get_api_key_and_ip(invalid_ip);
EXPECT_FALSE(res.ok());
res = get_api_key_and_ip(invalid_api_key);
EXPECT_FALSE(res.ok());
res = get_api_key_and_ip(no_length);
EXPECT_FALSE(res.ok());
res = get_api_key_and_ip(no_colon);
EXPECT_FALSE(res.ok());
res = get_api_key_and_ip(no_ip);
EXPECT_FALSE(res.ok());
res = get_api_key_and_ip(only_length);
EXPECT_FALSE(res.ok());
res = get_api_key_and_ip(only_colon);
EXPECT_FALSE(res.ok());
res = get_api_key_and_ip(only_ip);
EXPECT_FALSE(res.ok());
}
TEST_F(CoreAPIUtilsTest, ExportIncludeExcludeFields) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "name", "type": "object" },
{"name": "points", "type": "int32" }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"name": {"first": "John", "last": "Smith"},
"points": 100,
"description": "description"
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
std::shared_ptr<http_req> req = std::make_shared<http_req>();
std::shared_ptr<http_res> res = std::make_shared<http_res>(nullptr);
req->params["collection"] = "coll1";
// include fields
req->params["include_fields"] = "name.last";
get_export_documents(req, res);
std::vector<std::string> res_strs;
StringUtils::split(res->body, res_strs, "\n");
nlohmann::json doc = nlohmann::json::parse(res_strs[0]);
ASSERT_EQ(1, doc.size());
ASSERT_EQ(1, doc.count("name"));
ASSERT_EQ(1, doc["name"].count("last"));
// exclude fields
delete dynamic_cast<export_state_t*>(req->data);
req->data = nullptr;
res->body.clear();
req->params.erase("include_fields");
req->params["exclude_fields"] = "name.last";
get_export_documents(req, res);
res_strs.clear();
StringUtils::split(res->body, res_strs, "\n");
doc = nlohmann::json::parse(res_strs[0]);
ASSERT_EQ(4, doc.size());
ASSERT_EQ(1, doc.count("id"));
ASSERT_EQ(1, doc.count("points"));
ASSERT_EQ(1, doc.count("name"));
ASSERT_EQ(1, doc["name"].count("first"));
ASSERT_EQ(1, doc.count("description")); // field not in schema is exported
// no include or exclude fields
delete dynamic_cast<export_state_t*>(req->data);
req->data = nullptr;
res->body.clear();
req->params.erase("include_fields");
req->params.erase("exclude_fields");
get_export_documents(req, res);
res_strs.clear();
StringUtils::split(res->body, res_strs, "\n");
doc = nlohmann::json::parse(res_strs[0]);
ASSERT_EQ(4, doc.size());
ASSERT_EQ(1, doc.count("id"));
ASSERT_EQ(1, doc.count("points"));
ASSERT_EQ(1, doc.count("name"));
ASSERT_EQ(1, doc["name"].count("first"));
ASSERT_EQ(1, doc["name"].count("last"));
ASSERT_EQ(1, doc.count("description")); // field not in schema is exported
collectionManager.drop_collection("coll1");
}
TEST_F(CoreAPIUtilsTest, ExportIncludeExcludeFieldsWithFilter) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "name", "type": "object" },
{"name": "points", "type": "int32" }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"name": {"first": "John", "last": "Smith"},
"points": 100
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
std::shared_ptr<http_req> req = std::make_shared<http_req>();
std::shared_ptr<http_res> res = std::make_shared<http_res>(nullptr);
req->params["collection"] = "coll1";
// include fields
req->params["include_fields"] = "name.last";
req->params["filter_by"] = "points:>=0";
get_export_documents(req, res);
std::vector<std::string> res_strs;
StringUtils::split(res->body, res_strs, "\n");
nlohmann::json doc = nlohmann::json::parse(res_strs[0]);
ASSERT_EQ(1, doc.size());
ASSERT_EQ(1, doc.count("name"));
ASSERT_EQ(1, doc["name"].count("last"));
// exclude fields
delete dynamic_cast<export_state_t*>(req->data);
req->data = nullptr;
res->body.clear();
req->params.erase("include_fields");
req->params["exclude_fields"] = "name.last";
get_export_documents(req, res);
res_strs.clear();
StringUtils::split(res->body, res_strs, "\n");
doc = nlohmann::json::parse(res_strs[0]);
ASSERT_EQ(3, doc.size());
ASSERT_EQ(1, doc.count("id"));
ASSERT_EQ(1, doc.count("points"));
ASSERT_EQ(1, doc.count("name"));
ASSERT_EQ(1, doc["name"].count("first"));
collectionManager.drop_collection("coll1");
}
TEST_F(CoreAPIUtilsTest, TestProxy) {
std::string res;
std::unordered_map<std::string, std::string> headers;
std::map<std::string, std::string> res_headers;
std::string url = "https://typesense.org";
long expected_status_code = HttpClient::get_instance().get_response(url, res, res_headers, headers);
auto req = std::make_shared<http_req>();
auto resp = std::make_shared<http_res>(nullptr);
nlohmann::json body;
body["url"] = url;
body["method"] = "GET";
body["headers"] = headers;
req->body = body.dump();
post_proxy(req, resp);
ASSERT_EQ(expected_status_code, resp->status_code);
ASSERT_EQ(res, resp->body);
}
TEST_F(CoreAPIUtilsTest, TestProxyInvalid) {
nlohmann::json body;
auto req = std::make_shared<http_req>();
auto resp = std::make_shared<http_res>(nullptr);
// test with url as empty string
body["url"] = "";
body["method"] = "GET";
body["headers"] = nlohmann::json::object();
req->body = body.dump();
post_proxy(req, resp);
ASSERT_EQ(400, resp->status_code);
ASSERT_EQ("URL and method must be non-empty strings.", nlohmann::json::parse(resp->body)["message"]);
// test with url as integer
body["url"] = 123;
body["method"] = "GET";
req->body = body.dump();
post_proxy(req, resp);
ASSERT_EQ(400, resp->status_code);
ASSERT_EQ("URL and method must be non-empty strings.", nlohmann::json::parse(resp->body)["message"]);
// test with no url parameter
body.erase("url");
body["method"] = "GET";
req->body = body.dump();
post_proxy(req, resp);
ASSERT_EQ(400, resp->status_code);
ASSERT_EQ("Missing required fields.", nlohmann::json::parse(resp->body)["message"]);
// test with invalid method
body["url"] = "https://typesense.org";
body["method"] = "INVALID";
req->body = body.dump();
post_proxy(req, resp);
ASSERT_EQ(400, resp->status_code);
ASSERT_EQ("Parameter `method` must be one of GET, POST, POST_STREAM, PUT, DELETE.", nlohmann::json::parse(resp->body)["message"]);
// test with method as integer
body["method"] = 123;
req->body = body.dump();
post_proxy(req, resp);
ASSERT_EQ(400, resp->status_code);
ASSERT_EQ("URL and method must be non-empty strings.", nlohmann::json::parse(resp->body)["message"]);
// test with no method parameter
body.erase("method");
req->body = body.dump();
post_proxy(req, resp);
ASSERT_EQ(400, resp->status_code);
ASSERT_EQ("Missing required fields.", nlohmann::json::parse(resp->body)["message"]);
// test with body as integer
body["method"] = "POST";
body["body"] = 123;
req->body = body.dump();
post_proxy(req, resp);
ASSERT_EQ(400, resp->status_code);
ASSERT_EQ("Body must be a string.", nlohmann::json::parse(resp->body)["message"]);
// test with headers as integer
body["body"] = "";
body["headers"] = 123;
req->body = body.dump();
post_proxy(req, resp);
ASSERT_EQ(400, resp->status_code);
ASSERT_EQ("Headers must be a JSON object.", nlohmann::json::parse(resp->body)["message"]);
}
TEST_F(CoreAPIUtilsTest, TestProxyTimeout) {
nlohmann::json body;
auto req = std::make_shared<http_req>();
auto resp = std::make_shared<http_res>(nullptr);
// test with url as empty string
body["url"] = "https://typesense.org/docs/";
body["method"] = "GET";
body["headers"] = nlohmann::json::object();
body["headers"]["timeout_ms"] = "1";
body["headers"]["num_retry"] = "1";
req->body = body.dump();
post_proxy(req, resp);
ASSERT_EQ(408, resp->status_code);
ASSERT_EQ("Server error on remote server. Please try again later.", nlohmann::json::parse(resp->body)["message"]);
}
TEST_F(CoreAPIUtilsTest, TestGetConversations) {
auto req = std::make_shared<http_req>();
auto resp = std::make_shared<http_res>(nullptr);
auto schema_json =
R"({
"name": "Products",
"fields": [
{"name": "product_name", "type": "string", "infix": true},
{"name": "category", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["product_name", "category"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
if (std::getenv("api_key") == nullptr) {
LOG(INFO) << "Skipping test as api_key is not set.";
return;
}
auto api_key = std::string(std::getenv("api_key"));
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto coll = collection_create_op.get();
auto add_op = coll->add(R"({
"product_name": "moisturizer",
"category": "beauty"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
add_op = coll->add(R"({
"product_name": "shampoo",
"category": "beauty"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
add_op = coll->add(R"({
"product_name": "shirt",
"category": "clothing"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
add_op = coll->add(R"({
"product_name": "pants",
"category": "clothing"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
nlohmann::json model_config = R"({
"model_name": "openai/gpt-3.5-turbo"
})"_json;
model_config["api_key"] = api_key;
auto add_model_op = ConversationModelManager::add_model(model_config, "", true);
ASSERT_TRUE(add_model_op.ok());
LOG(INFO) << "Model id: " << model_config["id"];
auto model_id = model_config["id"].get<std::string>();
auto results_op = coll->search("how many products are there for clothing category?", {"embedding"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
0, spp::sparse_hash_set<std::string>(), spp::sparse_hash_set<std::string>(),
10, "", 30, 4, "", 1, "", "", {}, 3, "<mark>", "</mark>", {}, 4294967295UL, true, false,
true, "", false, 6000000UL, 4, 7, fallback, 4, {off}, 32767UL, 32767UL, 2, 2, false, "",
true, 0, max_score, 100, 0, 0, "exhaustive", 30000, 2, "", {}, {}, "right_to_left", true, true, true, model_id);
ASSERT_TRUE(results_op.ok());
auto id = results_op.get()["conversation"]["id"].get<std::string>();
auto history_collection = ConversationManager::get_instance()
.get_history_collection(model_config["history_collection"].get<std::string>()).get();
auto history_search_res = history_collection->search(id, {"conversation_id"}, "", {}, {}, {0}).get();
ASSERT_EQ(2, history_search_res["hits"].size());
auto del_res = ConversationModelManager::delete_model(model_id);
}
TEST_F(CoreAPIUtilsTest, SampleGzipIndexTest) {
Collection *coll_hnstories;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll_hnstories = collectionManager.get_collection("coll_hnstories").get();
if(coll_hnstories == nullptr) {
coll_hnstories = collectionManager.create_collection("coll_hnstories", 4, fields, "title").get();
}
auto req = std::make_shared<http_req>();
std::ifstream infile(std::string(ROOT_DIR)+"test/resources/hnstories.jsonl.gz");
std::stringstream outbuffer;
infile.seekg (0, infile.end);
int length = infile.tellg();
infile.seekg (0, infile.beg);
req->body.resize(length);
infile.read(&req->body[0], length);
auto res = ReplicationState::handle_gzip(req);
if (!res.error().empty()) {
LOG(ERROR) << res.error();
FAIL();
} else {
outbuffer << req->body;
}
std::vector<std::string> doc_lines;
std::string line;
while(std::getline(outbuffer, line)) {
doc_lines.push_back(line);
}
ASSERT_EQ(14, doc_lines.size());
ASSERT_EQ("{\"points\":1,\"title\":\"DuckDuckGo Settings\"}", doc_lines[0]);
ASSERT_EQ("{\"points\":1,\"title\":\"Making Twitter Easier to Use\"}", doc_lines[1]);
ASSERT_EQ("{\"points\":2,\"title\":\"London refers Uber app row to High Court\"}", doc_lines[2]);
ASSERT_EQ("{\"points\":1,\"title\":\"Young Global Leaders, who should be nominated? (World Economic Forum)\"}", doc_lines[3]);
ASSERT_EQ("{\"points\":1,\"title\":\"Blooki.st goes BETA in a few hours\"}", doc_lines[4]);
ASSERT_EQ("{\"points\":1,\"title\":\"Unicode Security Data: Beta Review\"}", doc_lines[5]);
ASSERT_EQ("{\"points\":2,\"title\":\"FileMap: MapReduce on the CLI\"}", doc_lines[6]);
ASSERT_EQ("{\"points\":1,\"title\":\"[Full Video] NBC News Interview with Edward Snowden\"}", doc_lines[7]);
ASSERT_EQ("{\"points\":1,\"title\":\"Hybrid App Monetization Example with Mobile Ads and In-App Purchases\"}", doc_lines[8]);
ASSERT_EQ("{\"points\":1,\"title\":\"We need oppinion from Android Developers\"}", doc_lines[9]);
ASSERT_EQ("{\"points\":1,\"title\":\"\\\\t Why Mobile Developers Should Care About Deep Linking\"}", doc_lines[10]);
ASSERT_EQ("{\"points\":2,\"title\":\"Are we getting too Sassy? Weighing up micro-optimisation vs. maintainability\"}", doc_lines[11]);
ASSERT_EQ("{\"points\":2,\"title\":\"Google's XSS game\"}", doc_lines[12]);
ASSERT_EQ("{\"points\":1,\"title\":\"Telemba Turns Your Old Roomba and Tablet Into a Telepresence Robot\"}", doc_lines[13]);
infile.close();
}
TEST_F(CoreAPIUtilsTest, TestConversationModels) {
nlohmann::json model_config = R"({
"model_name": "openai/gpt-3.5-turbo",
"max_bytes": 10000,
"history_collection": "conversation_store"
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
if (std::getenv("api_key") == nullptr) {
LOG(INFO) << "Skipping test as api_key is not set.";
return;
}
model_config["api_key"] = std::string(std::getenv("api_key"));
auto req = std::make_shared<http_req>();
auto resp = std::make_shared<http_res>(nullptr);
req->body = model_config.dump();
post_conversation_model(req, resp);
ASSERT_EQ(200, resp->status_code);
auto id = nlohmann::json::parse(resp->body)["id"].get<std::string>();
req->params["id"] = id;
get_conversation_model(req, resp);
ASSERT_EQ(200, resp->status_code);
ASSERT_EQ(id, nlohmann::json::parse(resp->body)["id"].get<std::string>());
get_conversation_models(req, resp);
ASSERT_EQ(200, resp->status_code);
ASSERT_EQ(1, nlohmann::json::parse(resp->body).size());
del_conversation_model(req, resp);
ASSERT_EQ(200, resp->status_code);
get_conversation_models(req, resp);
ASSERT_EQ(200, resp->status_code);
ASSERT_EQ(0, nlohmann::json::parse(resp->body).size());
}
TEST_F(CoreAPIUtilsTest, TestInvalidConversationModels) {
// test with no model_name
nlohmann::json model_config = R"({
"history_collection": "conversation_store"
})"_json;
if (std::getenv("api_key") == nullptr) {
LOG(INFO) << "Skipping test as api_key is not set.";
return;
}
model_config["api_key"] = std::string(std::getenv("api_key"));
auto req = std::make_shared<http_req>();
auto resp = std::make_shared<http_res>(nullptr);
req->body = model_config.dump();
post_conversation_model(req, resp);
ASSERT_EQ(400, resp->status_code);
ASSERT_EQ("Property `model_name` is not provided or not a string.", nlohmann::json::parse(resp->body)["message"]);
// test with invalid model_name
model_config["model_name"] = "invalid_model_name";
req->body = model_config.dump();
post_conversation_model(req, resp);
ASSERT_EQ(400, resp->status_code);
ASSERT_EQ("Model namespace `` is not supported.", nlohmann::json::parse(resp->body)["message"]);
// test with no api_key
model_config["model_name"] = "openai/gpt-3.5-turbo";
model_config.erase("api_key");
req->body = model_config.dump();
post_conversation_model(req, resp);
ASSERT_EQ(400, resp->status_code);
ASSERT_EQ("API key is not provided", nlohmann::json::parse(resp->body)["message"]);
// test with api_key as integer
model_config["api_key"] = 123;
req->body = model_config.dump();
post_conversation_model(req, resp);
ASSERT_EQ(400, resp->status_code);
ASSERT_EQ("API key is not a string", nlohmann::json::parse(resp->body)["message"]);
// test with model_name as integer
model_config["api_key"] = std::string(std::getenv("api_key"));
model_config["model_name"] = 123;
req->body = model_config.dump();
post_conversation_model(req, resp);
ASSERT_EQ(400, resp->status_code);
ASSERT_EQ("Property `model_name` is not provided or not a string.", nlohmann::json::parse(resp->body)["message"]);
model_config["model_name"] = "openai/gpt-3.5-turbo";
// test without max_bytes
post_conversation_model(req, resp);
ASSERT_EQ(400, resp->status_code);
ASSERT_EQ("Property `max_bytes` is not provided or not a number.", nlohmann::json::parse(resp->body)["message"]);
// test with max_bytes as string
model_config["max_bytes"] = "10000";
req->body = model_config.dump();
post_conversation_model(req, resp);
ASSERT_EQ(400, resp->status_code);
ASSERT_EQ("Property `max_bytes` is not provided or not a number.", nlohmann::json::parse(resp->body)["message"]);
// test with max_bytes as negative number
model_config["max_bytes"] = -10000;
req->body = model_config.dump();
post_conversation_model(req, resp);
ASSERT_EQ(400, resp->status_code);
ASSERT_EQ("Property `max_bytes` must be a positive number.", nlohmann::json::parse(resp->body)["message"]);
model_config["max_bytes"] = 10000;
model_config["history_collection"] = 123;
// test with history_collection as integer
req->body = model_config.dump();
post_conversation_model(req, resp);
ASSERT_EQ(400, resp->status_code);
ASSERT_EQ("Property `history_collection` is not provided or not a string.", nlohmann::json::parse(resp->body)["message"]);
// test with history_collection as empty string
model_config["history_collection"] = "";
req->body = model_config.dump();
post_conversation_model(req, resp);
ASSERT_EQ(400, resp->status_code);
ASSERT_EQ("Collection not found", nlohmann::json::parse(resp->body)["message"]);
}
TEST_F(CoreAPIUtilsTest, DeleteNonExistingDoc) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 2, fields, "points").get();
}
for(size_t i=0; i<10; i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = "Title " + std::to_string(i);
doc["points"] = i;
coll1->add(doc.dump());
}
std::shared_ptr<http_req> req = std::make_shared<http_req>();
std::shared_ptr<http_res> res = std::make_shared<http_res>(nullptr);
req->params["collection"] = "coll1";
req->params["id"] = "9";
del_remove_document(req, res);
ASSERT_EQ(200, res->status_code);
req->params["id"] = "10";
del_remove_document(req, res);
ASSERT_EQ(404, res->status_code);
req->params["ignore_not_found"] = "true";
del_remove_document(req, res);
ASSERT_EQ(200, res->status_code);
}
TEST_F(CoreAPIUtilsTest, CollectionsPagination) {
//remove all collections first
auto collections = collectionManager.get_collections().get();
for(auto collection : collections) {
collectionManager.drop_collection(collection->get_name());
}
//create few collections
for(size_t i = 0; i < 5; i++) {
nlohmann::json coll_json = R"({
"name": "cp",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
coll_json["name"] = coll_json["name"].get<std::string>() + std::to_string(i + 1);
auto coll_op = collectionManager.create_collection(coll_json);
ASSERT_TRUE(coll_op.ok());
}
auto req = std::make_shared<http_req>();
auto resp = std::make_shared<http_res>(nullptr);
req->params["offset"] = "0";
req->params["limit"] = "1";
nlohmann::json expected_meta_json = R"(
{
"created_at":1663234047,
"default_sorting_field":"",
"enable_nested_fields":false,
"fields":[
{
"facet":false,
"index":true,
"infix":false,
"locale":"",
"name":"title",
"optional":false,
"sort":false,
"stem":false,
"store": true,
"type":"string"
}
],
"name":"cp2",
"num_documents":0,
"symbols_to_index":[],
"token_separators":[]
}
)"_json;
get_collections(req, resp);
auto actual_json = nlohmann::json::parse(resp->body);
expected_meta_json["created_at"] = actual_json[0]["created_at"];
ASSERT_EQ(expected_meta_json.dump(), actual_json[0].dump());
//invalid offset string
req->params["offset"] = "0a";
get_collections(req, resp);
ASSERT_EQ(400, resp->status_code);
ASSERT_EQ("{\"message\": \"Offset param should be unsigned integer.\"}", resp->body);
//invalid limit string
req->params["offset"] = "0";
req->params["limit"] = "-1";
get_collections(req, resp);
ASSERT_EQ(400, resp->status_code);
ASSERT_EQ("{\"message\": \"Limit param should be unsigned integer.\"}", resp->body);
}
TEST_F(CoreAPIUtilsTest, OverridesPagination) {
Collection *coll2;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false)};
coll2 = collectionManager.get_collection("coll2").get();
if(coll2 == nullptr) {
coll2 = collectionManager.create_collection("coll2", 1, fields, "points").get();
}
for(int i = 0; i < 5; ++i) {
nlohmann::json override_json = {
{"id", "override"},
{
"rule", {
{"query", "not-found"},
{"match", override_t::MATCH_EXACT}
}
},
{"metadata", { {"foo", "bar"}}},
};
override_json["id"] = override_json["id"].get<std::string>() + std::to_string(i + 1);
override_t override;
override_t::parse(override_json, "", override);
coll2->add_override(override);
}
auto req = std::make_shared<http_req>();
auto resp = std::make_shared<http_res>(nullptr);
req->params["collection"] = "coll2";
req->params["offset"] = "0";
req->params["limit"] = "1";
get_overrides(req, resp);
nlohmann::json expected_json = R"({
"overrides":[
{
"excludes":[],
"filter_curated_hits":false,
"id":"override1",
"includes":[],
"metadata":{"foo":"bar"},
"remove_matched_tokens":false,
"rule":{
"match":"exact",
"query":"not-found"
},
"stop_processing":true
}]
})"_json;
ASSERT_EQ(expected_json.dump(), resp->body);
//invalid offset string
req->params["offset"] = "0a";
get_collections(req, resp);
ASSERT_EQ(400, resp->status_code);
ASSERT_EQ("{\"message\": \"Offset param should be unsigned integer.\"}", resp->body);
//invalid limit string
req->params["offset"] = "0";
req->params["limit"] = "-1";
get_collections(req, resp);
ASSERT_EQ(400, resp->status_code);
ASSERT_EQ("{\"message\": \"Limit param should be unsigned integer.\"}", resp->body);
}
TEST_F(CoreAPIUtilsTest, SynonymsPagination) {
Collection *coll3;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false)};
coll3 = collectionManager.get_collection("coll3").get();
if (coll3 == nullptr) {
coll3 = collectionManager.create_collection("coll3", 1, fields, "points").get();
}
for (int i = 0; i < 5; ++i) {
nlohmann::json synonym_json = R"(
{
"id": "foobar",
"synonyms": ["blazer", "suit"]
})"_json;
synonym_json["id"] = synonym_json["id"].get<std::string>() + std::to_string(i + 1);
coll3->add_synonym(synonym_json);
}
auto req = std::make_shared<http_req>();
auto resp = std::make_shared<http_res>(nullptr);
req->params["collection"] = "coll3";
req->params["offset"] = "0";
req->params["limit"] = "1";
get_synonyms(req, resp);
nlohmann::json expected_json = R"({
"synonyms":[
{
"id":"foobar1",
"root":"",
"synonyms":["blazer","suit"]
}]
})"_json;
ASSERT_EQ(expected_json.dump(), resp->body);
//invalid offset string
req->params["offset"] = "0a";
get_collections(req, resp);
ASSERT_EQ(400, resp->status_code);
ASSERT_EQ("{\"message\": \"Offset param should be unsigned integer.\"}", resp->body);
//invalid limit string
req->params["offset"] = "0";
req->params["limit"] = "-1";
get_collections(req, resp);
ASSERT_EQ(400, resp->status_code);
ASSERT_EQ("{\"message\": \"Limit param should be unsigned integer.\"}", resp->body);
}
TEST_F(CoreAPIUtilsTest, CollectionMetadataUpdate) {
CollectionManager & collectionManager3 = CollectionManager::get_instance();
nlohmann::json schema = R"({
"name": "collection_meta",
"enable_nested_fields": true,
"fields": [
{"name": "value.color", "type": "string", "optional": false, "facet": true },
{"name": "value.r", "type": "int32", "optional": false, "facet": true },
{"name": "value.g", "type": "int32", "optional": false, "facet": true },
{"name": "value.b", "type": "int32", "optional": false, "facet": true }
],
"metadata": {
"batch_job":"",
"indexed_from":"2023-04-20T00:00:00.000Z",
"total_docs": 0
}
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
std::string collection_meta_json;
nlohmann::json collection_meta;
std::string next_seq_id;
std::string next_collection_id;
store->get(Collection::get_meta_key("collection_meta"), collection_meta_json);
nlohmann::json expected_meta_json = R"(
{
"created_at":1705482381,
"default_sorting_field":"",
"enable_nested_fields":true,
"fallback_field_type":"",
"fields":[
{
"facet":true,
"index":true,
"infix":false,
"locale":"",
"name":"value.color",
"nested":true,
"nested_array":2,
"optional":false,
"sort":false,
"store":true,
"type":"string",
"range_index":false,
"stem":false
},
{
"facet":true,
"index":true,
"infix":false,
"locale":"",
"name":"value.r",
"nested":true,
"nested_array":2,
"optional":false,
"sort":true,
"store":true,
"type":"int32",
"range_index":false,
"stem":false
},{
"facet":true,
"index":true,
"infix":false,
"locale":"",
"name":"value.g",
"nested":true,
"nested_array":2,
"optional":false,
"sort":true,
"store":true,
"type":"int32",
"range_index":false,
"stem":false
},{
"facet":true,
"index":true,
"infix":false,
"locale":"",
"name":"value.b",
"nested":true,
"nested_array":2,
"optional":false,
"sort":true,
"store":true,
"type":"int32",
"range_index":false,
"stem":false
}
],
"id":1,
"metadata":{
"batch_job":"",
"indexed_from":"2023-04-20T00:00:00.000Z",
"total_docs":0
},
"name":"collection_meta",
"num_memory_shards":4,
"symbols_to_index":[],
"token_separators":[]
})"_json;
auto actual_json = nlohmann::json::parse(collection_meta_json);
expected_meta_json["created_at"] = actual_json["created_at"];
ASSERT_EQ(expected_meta_json.dump(), actual_json.dump());
//try setting empty metadata
auto metadata = R"({
"metadata": {}
})"_json;
std::shared_ptr<http_req> req = std::make_shared<http_req>();
std::shared_ptr<http_res> res = std::make_shared<http_res>(nullptr);
req->params["collection"] = "collection_meta";
req->body = metadata.dump();
patch_update_collection(req, res);
expected_meta_json = R"(
{
"created_at":1705482381,
"default_sorting_field":"",
"enable_nested_fields":true,
"fallback_field_type":"",
"fields":[
{
"facet":true,
"index":true,
"infix":false,
"locale":"",
"name":"value.color",
"nested":true,
"nested_array":2,
"optional":false,
"sort":false,
"store":true,
"type":"string",
"range_index":false,
"stem":false
},
{
"facet":true,
"index":true,
"infix":false,
"locale":"",
"name":"value.r",
"nested":true,
"nested_array":2,
"optional":false,
"sort":true,
"store":true,
"type":"int32",
"range_index":false,
"stem":false
},{
"facet":true,
"index":true,
"infix":false,
"locale":"",
"name":"value.g",
"nested":true,
"nested_array":2,
"optional":false,
"sort":true,
"store":true,
"type":"int32",
"range_index":false,
"stem":false
},{
"facet":true,
"index":true,
"infix":false,
"locale":"",
"name":"value.b",
"nested":true,
"nested_array":2,
"optional":false,
"sort":true,
"store":true,
"type":"int32",
"range_index":false,
"stem":false
}
],
"id":1,
"metadata":{
},
"name":"collection_meta",
"num_memory_shards":4,
"symbols_to_index":[],
"token_separators":[]
})"_json;
store->get(Collection::get_meta_key("collection_meta"), collection_meta_json);
actual_json = nlohmann::json::parse(collection_meta_json);
expected_meta_json["created_at"] = actual_json["created_at"];
ASSERT_EQ(expected_meta_json.dump(), actual_json.dump());
}
TEST_F(CoreAPIUtilsTest, CollectionUpdateValidation) {
CollectionManager & collectionManager3 = CollectionManager::get_instance();
nlohmann::json schema = R"({
"name": "collection_meta",
"enable_nested_fields": true,
"fields": [
{"name": "value.color", "type": "string", "optional": false, "facet": true },
{"name": "value.r", "type": "int32", "optional": false, "facet": true },
{"name": "value.g", "type": "int32", "optional": false, "facet": true },
{"name": "value.b", "type": "int32", "optional": false, "facet": true }
],
"metadata": {
"batch_job":"",
"indexed_from":"2023-04-20T00:00:00.000Z",
"total_docs": 0
}
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto alter_schema = R"({
"metadata": {},
"fields":[
{"name": "value.color", "drop": true },
{"name": "value.color", "type": "string", "facet": true }
]
})"_json;
std::shared_ptr<http_req> req = std::make_shared<http_req>();
std::shared_ptr<http_res> res = std::make_shared<http_res>(nullptr);
req->params["collection"] = "collection_meta";
req->body = alter_schema.dump();
ASSERT_TRUE(patch_update_collection(req, res));
alter_schema = R"({
"metadata": {},
"symbols_to_index":[]
})"_json;
req->body = alter_schema.dump();
ASSERT_FALSE(patch_update_collection(req, res));
ASSERT_EQ("{\"message\": \"Only `fields` and `metadata` can be updated at the moment.\"}", res->body);
alter_schema = R"({
"symbols_to_index":[]
})"_json;
req->body = alter_schema.dump();
ASSERT_FALSE(patch_update_collection(req, res));
ASSERT_EQ("{\"message\": \"Only `fields` and `metadata` can be updated at the moment.\"}", res->body);
alter_schema = R"({
"name": "collection_meta2",
"metadata": {},
"fields":[
{"name": "value.hue", "type": "int32", "optional": false, "facet": true }
]
})"_json;
req->body = alter_schema.dump();
ASSERT_FALSE(patch_update_collection(req, res));
ASSERT_EQ("{\"message\": \"Only `fields` and `metadata` can be updated at the moment.\"}", res->body);
alter_schema = R"({
})"_json;
req->body = alter_schema.dump();
ASSERT_FALSE(patch_update_collection(req, res));
ASSERT_EQ("{\"message\": \"Alter payload is empty.\"}", res->body);
}
TEST_F(CoreAPIUtilsTest, DocumentGetIncludeExcludeFields) {
std::vector<field> fields = {
field("title", field_types::STRING, false),
field("brand", field_types::STRING, true, true),
field("size", field_types::INT32, true, false),
field("colors", field_types::STRING_ARRAY, true, false),
field("rating", field_types::FLOAT, true, false)
};
auto coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "rating").get();
}
nlohmann::json doc;
doc["id"] = "1";
doc["title"] = "Denim jeans";
doc["brand"] = "Spykar";
doc["size"] = 40;
doc["colors"] = {"blue", "black", "grey"};
doc["rating"] = 4.5;
coll1->add(doc.dump());
doc["id"] = "2";
doc["title"] = "Denim jeans";
doc["brand"] = "Levis";
doc["size"] = 42;
doc["colors"] = {"blue", "black"};
doc["rating"] = 4.4;
coll1->add(doc.dump());
std::shared_ptr<http_req> req = std::make_shared<http_req>();
std::shared_ptr<http_res> res = std::make_shared<http_res>(nullptr);
req->params["collection"] = "coll1";
req->params["id"] = "1";
//normal doc fetch
ASSERT_TRUE(get_fetch_document(req, res));
auto resp = nlohmann::json::parse(res->body);
ASSERT_EQ(6, resp.size());
ASSERT_TRUE(resp.contains("brand"));
ASSERT_TRUE(resp.contains("size"));
ASSERT_TRUE(resp.contains("colors"));
ASSERT_TRUE(resp.contains("rating"));
ASSERT_TRUE(resp.contains("id"));
ASSERT_TRUE(resp.contains("title"));
//include fields
req->params["include_fields"] = "brand,size,colors";
ASSERT_TRUE(get_fetch_document(req, res));
resp = nlohmann::json::parse(res->body);
ASSERT_EQ(3, resp.size());
ASSERT_TRUE(resp.contains("brand"));
ASSERT_TRUE(resp.contains("size"));
ASSERT_TRUE(resp.contains("colors"));
ASSERT_FALSE(resp.contains("rating"));
//exclude fields
req->params.erase("include_fields");
req->params["exclude_fields"] = "brand,size,colors";
ASSERT_TRUE(get_fetch_document(req, res));
resp = nlohmann::json::parse(res->body);
ASSERT_EQ(3, resp.size());
ASSERT_TRUE(resp.contains("id"));
ASSERT_TRUE(resp.contains("title"));
ASSERT_TRUE(resp.contains("rating"));
ASSERT_FALSE(resp.contains("brand"));
//both include and exclude fields
req->params["include_fields"] = "title,rating";
req->params["exclude_fields"] = "brand,size,colors";
ASSERT_TRUE(get_fetch_document(req, res));
resp = nlohmann::json::parse(res->body);
ASSERT_EQ(2, resp.size());
ASSERT_TRUE(resp.contains("title"));
ASSERT_TRUE(resp.contains("rating"));
ASSERT_FALSE(resp.contains("id"));
}
TEST_F(CoreAPIUtilsTest, CollectionSchemaResponseWithStoreValue) {
auto schema = R"({
"name": "collection3",
"enable_nested_fields": true,
"fields": [
{"name": "title", "type": "string", "locale": "en", "store":false},
{"name": "points", "type": "int32"}
],
"default_sorting_field": "points"
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
std::shared_ptr<http_req> req = std::make_shared<http_req>();
std::shared_ptr<http_res> res = std::make_shared<http_res>(nullptr);
req->params["collection"] = "collection3";
ASSERT_TRUE(get_collection_summary(req, res));
auto res_json = nlohmann::json::parse(res->body);
auto expected_json = R"({
"default_sorting_field":"points",
"enable_nested_fields":true,
"fields":[
{
"facet":false,
"index":true,
"infix":false,
"locale":"en",
"name":"title",
"optional":false,
"sort":false,
"stem":false,
"store":false,
"type":"string"
},
{
"facet":false,
"index":true,
"infix":false,
"locale":"",
"name":"points",
"optional":false,
"sort":true,
"stem":false,
"store":true,
"type":"int32"
}],
"name":"collection3",
"num_documents":0,
"symbols_to_index":[],
"token_separators":[]
})"_json;
expected_json["created_at"] = res_json["created_at"];
ASSERT_EQ(expected_json, res_json);
}
| 78,863
|
C++
|
.cpp
| 1,920
| 32.699479
| 146
| 0.575781
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,706
|
archive_utils_test.cpp
|
typesense_typesense/test/archive_utils_test.cpp
|
#include <gtest/gtest.h>
#include "archive_utils.h"
#include "tsconfig.h"
#include <fstream>
#include <cstdio>
#include <filesystem>
class ArchiveUtilsTest : public ::testing::Test {
protected:
void SetUp() override {
temp_dir = std::filesystem::temp_directory_path() / "archive_utils_test";
std::filesystem::create_directory(temp_dir);
Config::get_instance().set_data_dir(temp_dir.string());
}
void TearDown() override {
std::filesystem::remove_all(temp_dir);
}
std::filesystem::path temp_dir;
// Helper function to create a simple .tar.gz file
std::string create_test_tar_gz() {
std::string content = "This is a test file content.";
std::string filename = (temp_dir / "test.txt").string();
std::ofstream file(filename);
file << content;
file.close();
std::string archive_name = (temp_dir / "test.tar.gz").string();
std::string command = "tar -czf " + archive_name + " -C " + temp_dir.string() + " test.txt";
system(command.c_str());
return archive_name;
}
};
TEST_F(ArchiveUtilsTest, ExtractTarGzFromFile) {
std::string archive_path = create_test_tar_gz();
std::string extract_path = (temp_dir / "extract").string();
std::filesystem::create_directory(extract_path);
ASSERT_TRUE(ArchiveUtils::extract_tar_gz_from_file(archive_path, extract_path));
// Check if the extracted file exists and has the correct content
std::string extracted_file = (std::filesystem::path(extract_path) / "test.txt").string();
ASSERT_TRUE(std::filesystem::exists(extracted_file));
std::ifstream file(extracted_file);
std::string content((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
EXPECT_EQ(content, "This is a test file content.");
}
TEST_F(ArchiveUtilsTest, ExtractTarGzFromMemory) {
std::string archive_path = create_test_tar_gz();
std::string extract_path = (temp_dir / "extract_memory").string();
std::filesystem::create_directory(extract_path);
// Read the archive content into memory
std::ifstream archive_file(archive_path, std::ios::binary);
std::string archive_content((std::istreambuf_iterator<char>(archive_file)), std::istreambuf_iterator<char>());
ASSERT_TRUE(ArchiveUtils::extract_tar_gz_from_memory(archive_content, extract_path));
// Check if the extracted file exists and has the correct content
std::string extracted_file = (std::filesystem::path(extract_path) / "test.txt").string();
ASSERT_TRUE(std::filesystem::exists(extracted_file));
std::ifstream file(extracted_file);
std::string content((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
EXPECT_EQ(content, "This is a test file content.");
}
TEST_F(ArchiveUtilsTest, ExtractTarGzFromFileInvalidPath) {
std::string invalid_path = (temp_dir / "nonexistent.tar.gz").string();
std::string extract_path = (temp_dir / "extract_invalid").string();
std::filesystem::create_directory(extract_path);
ASSERT_FALSE(ArchiveUtils::extract_tar_gz_from_file(invalid_path, extract_path));
}
TEST_F(ArchiveUtilsTest, ExtractTarGzFromMemoryInvalidContent) {
std::string invalid_content = "This is not a valid tar.gz content";
std::string extract_path = (temp_dir / "extract_invalid_memory").string();
std::filesystem::create_directory(extract_path);
ASSERT_FALSE(ArchiveUtils::extract_tar_gz_from_memory(invalid_content, extract_path));
}
TEST_F(ArchiveUtilsTest, VerifyTarGzArchive) {
std::string archive_path = create_test_tar_gz();
std::ifstream archive_file(archive_path, std::ios::binary);
std::string archive_content((std::istreambuf_iterator<char>(archive_file)), std::istreambuf_iterator<char>());
ASSERT_TRUE(ArchiveUtils::verify_tar_gz_archive(archive_content));
}
TEST_F(ArchiveUtilsTest, VerifyTarGzArchiveInvalid) {
std::string invalid_content = "This is not a valid tar.gz content";
ASSERT_FALSE(ArchiveUtils::verify_tar_gz_archive(invalid_content));
}
| 4,062
|
C++
|
.cpp
| 79
| 46.56962
| 114
| 0.705778
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,707
|
collection_grouping_test.cpp
|
typesense_typesense/test/collection_grouping_test.cpp
|
#include <gtest/gtest.h>
#include <string>
#include <vector>
#include <fstream>
#include <algorithm>
#include <collection_manager.h>
#include "collection.h"
class CollectionGroupingTest : public ::testing::Test {
protected:
Store *store;
CollectionManager & collectionManager = CollectionManager::get_instance();
std::atomic<bool> quit = false;
Collection *coll_group;
void setupCollection() {
std::string state_dir_path = "/tmp/typesense_test/collection_grouping";
LOG(INFO) << "Truncating and creating: " << state_dir_path;
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
store = new Store(state_dir_path);
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
std::vector<field> fields = {
field("title", field_types::STRING, false),
field("brand", field_types::STRING, true, true),
field("size", field_types::INT32, true, false),
field("colors", field_types::STRING_ARRAY, true, false),
field("rating", field_types::FLOAT, true, false)
};
coll_group = collectionManager.get_collection("coll_group").get();
if(coll_group == nullptr) {
coll_group = collectionManager.create_collection("coll_group", 4, fields, "rating").get();
}
std::ifstream infile(std::string(ROOT_DIR)+"test/group_documents.jsonl");
std::string json_line;
while (std::getline(infile, json_line)) {
auto add_op = coll_group->add(json_line);
if(!add_op.ok()) {
std::cout << add_op.error() << std::endl;
}
ASSERT_TRUE(add_op.ok());
}
infile.close();
}
virtual void SetUp() {
setupCollection();
}
virtual void TearDown() {
collectionManager.dispose();
delete store;
}
};
TEST_F(CollectionGroupingTest, GroupingBasics) {
// group by size (int32)
auto res = coll_group->search("*", {}, "", {"brand"}, {}, {0}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10,
{}, {}, {"size"}, 2).get();
ASSERT_EQ(12, res["found_docs"].get<size_t>());
ASSERT_EQ(3, res["found"].get<size_t>());
ASSERT_EQ(3, res["grouped_hits"].size());
ASSERT_EQ(11, res["grouped_hits"][0]["group_key"][0].get<size_t>());
ASSERT_EQ(2, res["grouped_hits"][0]["found"].get<int32_t>());
ASSERT_FLOAT_EQ(4.8, res["grouped_hits"][0]["hits"][0]["document"]["rating"].get<float>());
ASSERT_EQ(11, res["grouped_hits"][0]["hits"][0]["document"]["size"].get<size_t>());
ASSERT_STREQ("5", res["grouped_hits"][0]["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_FLOAT_EQ(4.3, res["grouped_hits"][0]["hits"][1]["document"]["rating"].get<float>());
ASSERT_STREQ("1", res["grouped_hits"][0]["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(7, res["grouped_hits"][1]["found"].get<int32_t>());
ASSERT_FLOAT_EQ(4.8, res["grouped_hits"][1]["hits"][0]["document"]["rating"].get<float>());
ASSERT_STREQ("4", res["grouped_hits"][1]["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_FLOAT_EQ(4.6, res["grouped_hits"][1]["hits"][1]["document"]["rating"].get<float>());
ASSERT_STREQ("3", res["grouped_hits"][1]["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(3, res["grouped_hits"][2]["found"].get<int32_t>());
ASSERT_FLOAT_EQ(4.6, res["grouped_hits"][2]["hits"][0]["document"]["rating"].get<float>());
ASSERT_STREQ("2", res["grouped_hits"][2]["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_FLOAT_EQ(4.4, res["grouped_hits"][2]["hits"][1]["document"]["rating"].get<float>());
ASSERT_STREQ("8", res["grouped_hits"][2]["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("brand", res["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(3, (int) res["facet_counts"][0]["counts"][0]["count"]);
ASSERT_STREQ("Beta", res["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_EQ(3, (int) res["facet_counts"][0]["counts"][1]["count"]);
ASSERT_STREQ("Omega", res["facet_counts"][0]["counts"][1]["value"].get<std::string>().c_str());
ASSERT_EQ(2, (int) res["facet_counts"][0]["counts"][2]["count"]);
ASSERT_STREQ("Xorp", res["facet_counts"][0]["counts"][2]["value"].get<std::string>().c_str());
ASSERT_EQ(1, (int) res["facet_counts"][0]["counts"][3]["count"]);
ASSERT_STREQ("Zeta", res["facet_counts"][0]["counts"][3]["value"].get<std::string>().c_str());
// group by rating (float) and sort by size
std::vector<sort_by> sort_size = {sort_by("size", "DESC")};
res = coll_group->search("*", {}, "", {"brand"}, sort_size, {0}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "brand: omeg", 30, 5,
"", 10,
{}, {}, {"rating"}, 2).get();
// 7 unique ratings
ASSERT_EQ(12, res["found_docs"].get<size_t>());
ASSERT_EQ(7, res["found"].get<size_t>());
ASSERT_EQ(7, res["grouped_hits"].size());
ASSERT_FLOAT_EQ(4.4, res["grouped_hits"][0]["group_key"][0].get<float>());
ASSERT_EQ(1, res["grouped_hits"][0]["found"].get<int32_t>());
ASSERT_EQ(12, res["grouped_hits"][0]["hits"][0]["document"]["size"].get<uint32_t>());
ASSERT_STREQ("8", res["grouped_hits"][0]["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_FLOAT_EQ(4.4, res["grouped_hits"][0]["hits"][0]["document"]["rating"].get<float>());
ASSERT_EQ(4, res["grouped_hits"][1]["found"].get<int32_t>());
ASSERT_EQ(12, res["grouped_hits"][1]["hits"][0]["document"]["size"].get<uint32_t>());
ASSERT_STREQ("6", res["grouped_hits"][1]["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_FLOAT_EQ(4.3, res["grouped_hits"][1]["hits"][0]["document"]["rating"].get<float>());
ASSERT_EQ(11, res["grouped_hits"][1]["hits"][1]["document"]["size"].get<uint32_t>());
ASSERT_STREQ("1", res["grouped_hits"][1]["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_FLOAT_EQ(4.3, res["grouped_hits"][1]["hits"][1]["document"]["rating"].get<float>());
ASSERT_EQ(1, res["grouped_hits"][5]["found"].get<int32_t>());
ASSERT_EQ(10, res["grouped_hits"][5]["hits"][0]["document"]["size"].get<uint32_t>());
ASSERT_STREQ("9", res["grouped_hits"][5]["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_FLOAT_EQ(4.1, res["grouped_hits"][5]["hits"][0]["document"]["rating"].get<float>());
ASSERT_EQ(1, res["grouped_hits"][6]["found"].get<int32_t>());
ASSERT_EQ(10, res["grouped_hits"][6]["hits"][0]["document"]["size"].get<uint32_t>());
ASSERT_STREQ("0", res["grouped_hits"][6]["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_FLOAT_EQ(4.5, res["grouped_hits"][6]["hits"][0]["document"]["rating"].get<float>());
ASSERT_STREQ("brand", res["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(3, (int) res["facet_counts"][0]["counts"][0]["count"]);
ASSERT_STREQ("Omega", res["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_STREQ("<mark>Omeg</mark>a", res["facet_counts"][0]["counts"][0]["highlighted"].get<std::string>().c_str());
// Wildcard group_by is not allowed
auto error = coll_group->search("*", {}, "", {"brand"}, {}, {0}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10,
{}, {}, {"foo*"}, 2).error();
ASSERT_EQ("Pattern `foo*` is not allowed.", error);
// typo_tokens_threshold should respect num_groups
res = coll_group->search("beta", {"brand"}, "", {"brand"}, {}, {2}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 2,
{}, {}, {"brand"}, 1).get();
ASSERT_EQ(4, res["found_docs"].get<size_t>());
ASSERT_EQ(2, res["found"].get<size_t>());
ASSERT_EQ(2, res["grouped_hits"].size());
ASSERT_EQ("Beta", res["grouped_hits"][0]["group_key"][0]);
ASSERT_EQ("Zeta", res["grouped_hits"][1]["group_key"][0]);
res = coll_group->search("beta", {"brand"}, "", {"brand"}, {}, {2}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 1,
{}, {}, {"brand"}, 1).get();
ASSERT_EQ(3, res["found_docs"].get<size_t>());
ASSERT_EQ(1, res["found"].get<size_t>());
ASSERT_EQ(1, res["grouped_hits"].size());
ASSERT_EQ("Beta", res["grouped_hits"][0]["group_key"][0]);
}
TEST_F(CollectionGroupingTest, GroupingCompoundKey) {
// group by size+brand (int32, string)
auto res = coll_group->search("*", {}, "", {"brand"}, {}, {0}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10,
{}, {}, {"size", "brand"}, 2).get();
ASSERT_EQ(12, res["found_docs"].get<size_t>());
ASSERT_EQ(10, res["found"].get<size_t>());
ASSERT_EQ(10, res["grouped_hits"].size());
ASSERT_EQ(1, res["grouped_hits"][0]["found"].get<int32_t>());
ASSERT_EQ(11, res["grouped_hits"][0]["group_key"][0].get<size_t>());
ASSERT_STREQ("Beta", res["grouped_hits"][0]["group_key"][1].get<std::string>().c_str());
// optional field should have no value in the group key component
ASSERT_EQ(1, res["grouped_hits"][5]["group_key"].size());
ASSERT_STREQ("10", res["grouped_hits"][5]["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("11", res["grouped_hits"][5]["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(1, res["grouped_hits"][0]["hits"].size());
ASSERT_FLOAT_EQ(4.8, res["grouped_hits"][0]["hits"][0]["document"]["rating"].get<float>());
ASSERT_STREQ("5", res["grouped_hits"][0]["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(1, res["grouped_hits"][1]["found"].get<int32_t>());
ASSERT_EQ(1, res["grouped_hits"][1]["hits"].size());
ASSERT_FLOAT_EQ(4.8, res["grouped_hits"][1]["hits"][0]["document"]["rating"].get<float>());
ASSERT_STREQ("4", res["grouped_hits"][1]["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(2, res["grouped_hits"][2]["found"].get<int32_t>());
ASSERT_EQ(2, res["grouped_hits"][2]["hits"].size());
ASSERT_FLOAT_EQ(4.6, res["grouped_hits"][2]["hits"][0]["document"]["rating"].get<float>());
ASSERT_STREQ("3", res["grouped_hits"][2]["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_FLOAT_EQ(4.5, res["grouped_hits"][2]["hits"][1]["document"]["rating"].get<float>());
ASSERT_STREQ("0", res["grouped_hits"][2]["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("brand", res["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(3, (int) res["facet_counts"][0]["counts"][0]["count"]);
ASSERT_STREQ("Beta", res["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_EQ(3, (int) res["facet_counts"][0]["counts"][1]["count"]);
ASSERT_STREQ("Omega", res["facet_counts"][0]["counts"][1]["value"].get<std::string>().c_str());
ASSERT_EQ(2, (int) res["facet_counts"][0]["counts"][2]["count"]);
ASSERT_STREQ("Xorp", res["facet_counts"][0]["counts"][2]["value"].get<std::string>().c_str());
ASSERT_EQ(1, (int) res["facet_counts"][0]["counts"][3]["count"]);
ASSERT_STREQ("Zeta", res["facet_counts"][0]["counts"][3]["value"].get<std::string>().c_str());
// pagination with page=2, per_page=2
res = coll_group->search("*", {}, "", {"brand"}, {}, {0}, 2, 2, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10,
{}, {}, {"size", "brand"}, 2).get();
// 3rd result from previous assertion will be in the first position
ASSERT_EQ(2, res["grouped_hits"][0]["hits"].size());
ASSERT_FLOAT_EQ(4.6, res["grouped_hits"][0]["hits"][0]["document"]["rating"].get<float>());
ASSERT_STREQ("3", res["grouped_hits"][0]["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_FLOAT_EQ(4.5, res["grouped_hits"][0]["hits"][1]["document"]["rating"].get<float>());
ASSERT_STREQ("0", res["grouped_hits"][0]["hits"][1]["document"]["id"].get<std::string>().c_str());
// total count and facet counts should be the same
ASSERT_EQ(12, res["found_docs"].get<size_t>());
ASSERT_EQ(10, res["found"].get<size_t>());
ASSERT_EQ(2, res["grouped_hits"].size());
ASSERT_EQ(10, res["grouped_hits"][0]["group_key"][0].get<size_t>());
ASSERT_STREQ("Omega", res["grouped_hits"][0]["group_key"][1].get<std::string>().c_str());
ASSERT_STREQ("brand", res["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(3, (int) res["facet_counts"][0]["counts"][0]["count"]);
ASSERT_STREQ("Beta", res["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_EQ(3, (int) res["facet_counts"][0]["counts"][1]["count"]);
ASSERT_STREQ("Omega", res["facet_counts"][0]["counts"][1]["value"].get<std::string>().c_str());
ASSERT_EQ(2, (int) res["facet_counts"][0]["counts"][2]["count"]);
ASSERT_STREQ("Xorp", res["facet_counts"][0]["counts"][2]["value"].get<std::string>().c_str());
ASSERT_EQ(1, (int) res["facet_counts"][0]["counts"][3]["count"]);
ASSERT_STREQ("Zeta", res["facet_counts"][0]["counts"][3]["value"].get<std::string>().c_str());
// respect min and max grouping limit (greater than 0 and less than 99)
auto res_op = coll_group->search("*", {}, "", {"brand"}, {}, {0}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "brand: omeg", 30, 5,
"", 10,
{}, {}, {"rating"}, 100);
ASSERT_FALSE(res_op.ok());
ASSERT_STREQ("Value of `group_limit` must be between 1 and 99.", res_op.error().c_str());
res_op = coll_group->search("*", {}, "", {"brand"}, {}, {0}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "brand: omeg", 30, 5,
"", 10,
{}, {}, {"rating"}, 0);
ASSERT_FALSE(res_op.ok());
ASSERT_STREQ("Value of `group_limit` must be between 1 and 99.", res_op.error().c_str());
}
TEST_F(CollectionGroupingTest, GroupingWithMultiFieldRelevance) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("artist", field_types::STRING, false),
field("genre", field_types::STRING, true),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"Train or Highway", "Gord Downie", "rock"},
{"Down There by the Train", "Dustin Kensrue", "pop"},
{"In the Train", "Dustin Kensrue", "pop"},
{"State Trooper", "Dustin Kensrue", "country"},
{"Down There Somewhere", "Dustin Kensrue", "pop"},
{"Down There by the Train", "Gord Downie", "rock"},
{"Down and Outside", "Gord Downie", "rock"},
{"Let it be", "Downie Kensrue", "country"},
{"There was a Train", "Gord Kensrue", "country"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["artist"] = records[i][1];
doc["genre"] = records[i][2];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("Dustin Kensrue Down There by the Train",
{"title", "artist"}, "", {}, {}, {0}, 10, 1, FREQUENCY,
{false}, 10,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10,
{}, {}, {"genre"}, 2).get();
ASSERT_EQ(7, results["found_docs"].get<size_t>());
ASSERT_EQ(3, results["found"].get<size_t>());
ASSERT_EQ(3, results["grouped_hits"].size());
ASSERT_EQ(3, results["grouped_hits"][0]["found"].get<int32_t>());
ASSERT_STREQ("pop", results["grouped_hits"][0]["group_key"][0].get<std::string>().c_str());
ASSERT_EQ(2, results["grouped_hits"][0]["hits"].size());
ASSERT_STREQ("1", results["grouped_hits"][0]["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("4", results["grouped_hits"][0]["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(2, results["grouped_hits"][1]["found"].get<int32_t>());
ASSERT_STREQ("rock", results["grouped_hits"][1]["group_key"][0].get<std::string>().c_str());
ASSERT_EQ(2, results["grouped_hits"][1]["hits"].size());
ASSERT_STREQ("5", results["grouped_hits"][1]["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("0", results["grouped_hits"][1]["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(2, results["grouped_hits"][2]["found"].get<int32_t>());
ASSERT_STREQ("country", results["grouped_hits"][2]["group_key"][0].get<std::string>().c_str());
ASSERT_EQ(2, results["grouped_hits"][2]["hits"].size());
ASSERT_STREQ("8", results["grouped_hits"][2]["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("3", results["grouped_hits"][2]["hits"][1]["document"]["id"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionGroupingTest, GroupingWithGropLimitOfOne) {
auto res = coll_group->search("*", {}, "", {"brand"}, {}, {0}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10,
{}, {}, {"brand"}, 1).get();
ASSERT_EQ(12, res["found_docs"].get<size_t>());
ASSERT_EQ(5, res["found"].get<size_t>());
ASSERT_EQ(5, res["grouped_hits"].size());
// all hits array must be of size 1
for(auto i=0; i<5; i++) {
ASSERT_EQ(1, res["grouped_hits"][i]["hits"].size());
}
ASSERT_EQ(3, res["grouped_hits"][0]["found"].get<int32_t>());
ASSERT_STREQ("5", res["grouped_hits"][0]["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(4, res["grouped_hits"][1]["found"].get<int32_t>());
ASSERT_STREQ("3", res["grouped_hits"][1]["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(2, res["grouped_hits"][2]["found"].get<int32_t>());
ASSERT_STREQ("8", res["grouped_hits"][2]["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(2, res["grouped_hits"][3]["found"].get<int32_t>());
ASSERT_STREQ("10", res["grouped_hits"][3]["hits"][0]["document"]["id"].get<std::string>().c_str()); // unbranded
ASSERT_EQ(1, res["grouped_hits"][4]["found"].get<int32_t>());
ASSERT_STREQ("9", res["grouped_hits"][4]["hits"][0]["document"]["id"].get<std::string>().c_str());
// facet counts should each be 1, including unbranded
ASSERT_STREQ("brand", res["facet_counts"][0]["field_name"].get<std::string>().c_str());
for(size_t i=0; i < 4; i++) {
ASSERT_EQ(1, (int) res["facet_counts"][0]["counts"][i]["count"]);
}
}
TEST_F(CollectionGroupingTest, GroupingWithArrayFieldAndOverride) {
nlohmann::json override_json_include = {
{"id", "include-rule"},
{
"rule", {
{"query", "shirt"},
{"match", override_t::MATCH_EXACT}
}
},
{"stop_processing", false}
};
override_json_include["includes"] = nlohmann::json::array();
override_json_include["includes"][0] = nlohmann::json::object();
override_json_include["includes"][0]["id"] = "11";
override_json_include["includes"][0]["position"] = 1;
override_json_include["includes"][1] = nlohmann::json::object();
override_json_include["includes"][1]["id"] = "10";
override_json_include["includes"][1]["position"] = 1;
nlohmann::json override_json_exclude = {
{"id", "exclude-rule"},
{
"rule", {
{"query", "shirt"},
{"match", override_t::MATCH_EXACT}
}
},
{"stop_processing", false}
};
override_json_exclude["excludes"] = nlohmann::json::array();
override_json_exclude["excludes"][0] = nlohmann::json::object();
override_json_exclude["excludes"][0]["id"] = "2";
override_t override1;
override_t override2;
override_t::parse(override_json_include, "", override1);
override_t::parse(override_json_exclude, "", override2);
Option<uint32_t> ov1_op = coll_group->add_override(override1);
Option<uint32_t> ov2_op = coll_group->add_override(override2);
ASSERT_TRUE(ov1_op.ok());
ASSERT_TRUE(ov2_op.ok());
auto res = coll_group->search("shirt", {"title"}, "", {"brand"}, {}, {0}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10,
{}, {}, {"colors"}, 2).get();
ASSERT_EQ(9, res["found_docs"].get<size_t>());
ASSERT_EQ(4, res["found"].get<size_t>());
ASSERT_EQ(4, res["grouped_hits"].size());
ASSERT_EQ(1, res["grouped_hits"][0]["group_key"][0].size());
ASSERT_STREQ("white", res["grouped_hits"][0]["group_key"][0][0].get<std::string>().c_str());
ASSERT_STREQ("11", res["grouped_hits"][0]["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("10", res["grouped_hits"][0]["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("5", res["grouped_hits"][1]["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("3", res["grouped_hits"][1]["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("4", res["grouped_hits"][2]["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("0", res["grouped_hits"][2]["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(1, res["grouped_hits"][3]["hits"].size());
ASSERT_STREQ("8", res["grouped_hits"][3]["hits"][0]["document"]["id"].get<std::string>().c_str());
// assert facet counts
ASSERT_STREQ("brand", res["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(2, (int) res["facet_counts"][0]["counts"][0]["count"]);
ASSERT_STREQ("Xorp", res["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_EQ(2, (int) res["facet_counts"][0]["counts"][1]["count"]);
ASSERT_STREQ("Beta", res["facet_counts"][0]["counts"][1]["value"].get<std::string>().c_str());
ASSERT_EQ(2, (int) res["facet_counts"][0]["counts"][2]["count"]);
ASSERT_STREQ("Omega", res["facet_counts"][0]["counts"][2]["value"].get<std::string>().c_str());
ASSERT_EQ(1, (int) res["facet_counts"][0]["counts"][3]["count"]);
ASSERT_STREQ("Zeta", res["facet_counts"][0]["counts"][3]["value"].get<std::string>().c_str());
}
TEST_F(CollectionGroupingTest, GroupOrderIndependence) {
Collection *coll1;
std::vector<field> fields = {field("group", field_types::STRING, true),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
nlohmann::json doc;
for(size_t i = 0; i < 256; i++) {
int64_t points = 100 + i;
doc["id"] = std::to_string(i);
doc["group"] = std::to_string(i);
doc["points"] = points;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
// doc id "255" will have points of 255
// try to insert doc id "256" with group "256" but having lesser points than all records
doc["id"] = "256";
doc["group"] = "256";
doc["points"] = 50;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
// insert doc id "257" of same group "256" with greatest point
doc["id"] = "257";
doc["group"] = "256";
doc["points"] = 500;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
// when we search by grouping records, sorting descending on points, both records of group "256" should show up
std::vector<sort_by> sort_fields = {sort_by("points", "DESC")};
auto res = coll1->search("*", {}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10,
{}, {}, {"group"}, 10).get();
ASSERT_EQ(1, res["grouped_hits"][0]["group_key"].size());
ASSERT_STREQ("256", res["grouped_hits"][0]["group_key"][0].get<std::string>().c_str());
ASSERT_EQ(2, res["grouped_hits"][0]["hits"].size());
}
TEST_F(CollectionGroupingTest, UseHighestValueInGroupForOrdering) {
Collection *coll1;
std::vector<field> fields = {field("group", field_types::STRING, true),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
nlohmann::json doc;
for(size_t i = 0; i < 250; i++) {
int64_t points = 100 + i;
doc["id"] = std::to_string(i);
doc["group"] = std::to_string(i);
doc["points"] = points;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
// points: 100 -> 349
// group with highest point is "249" with 349 points
// insert another document for that group with 50 points
doc["id"] = "250";
doc["group"] = "249";
doc["points"] = 50;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
// now insert another new group whose points is greater than 50
doc["id"] = "251";
doc["group"] = "1000";
doc["points"] = 60;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
std::vector<sort_by> sort_fields = {sort_by("points", "DESC")};
auto res = coll1->search("*", {}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10,
{}, {}, {"group"}, 10).get();
ASSERT_EQ(1, res["grouped_hits"][0]["group_key"].size());
ASSERT_STREQ("249", res["grouped_hits"][0]["group_key"][0].get<std::string>().c_str());
ASSERT_EQ(2, res["grouped_hits"][0]["hits"].size());
}
TEST_F(CollectionGroupingTest, RepeatedFieldNameGroupHitCount) {
std::vector<field> fields = {
field("title", field_types::STRING, false),
field("brand", field_types::STRING, true, true),
field("colors", field_types::STRING, true, false),
};
Collection* coll2 = collectionManager.get_collection("coll2").get();
if(coll2 == nullptr) {
coll2 = collectionManager.create_collection("coll2", 1, fields).get();
}
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "foobar";
doc["brand"] = "Omega";
doc["colors"] = "foo";
ASSERT_TRUE(coll2->add(doc.dump()).ok());
auto res = coll2->search("f", {"title", "colors"}, "", {}, {}, {0}, 10, 1, FREQUENCY,
{true}, 10,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10,
{}, {}, {"brand"}, 2).get();
ASSERT_EQ(1, res["grouped_hits"].size());
ASSERT_EQ(1, res["grouped_hits"][0]["found"].get<int32_t>());
}
TEST_F(CollectionGroupingTest, ControlMissingValues) {
std::vector<field> fields = {
field("brand", field_types::STRING, true, true),
};
Collection* coll2 = collectionManager.get_collection("coll2").get();
if(coll2 == nullptr) {
coll2 = collectionManager.create_collection("coll2", 1, fields).get();
}
LOG(INFO) << "----------------------";
nlohmann::json doc;
doc["id"] = "0";
doc["brand"] = "Omega";
ASSERT_TRUE(coll2->add(doc.dump()).ok());
doc["id"] = "1";
doc["brand"] = nullptr;
ASSERT_TRUE(coll2->add(doc.dump()).ok());
doc["id"] = "2";
doc["brand"] = nullptr;
ASSERT_TRUE(coll2->add(doc.dump()).ok());
doc["id"] = "3";
doc["brand"] = "Omega";
ASSERT_TRUE(coll2->add(doc.dump()).ok());
// disable null value aggregation
auto res = coll2->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY,
{true}, 10,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10,
{}, {}, {"brand"}, 2,
"<mark>", "</mark>", {3,3}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 0, 0, 0, 2, false, "", true, 0, max_score,
100, 0, 0, "exhaustive", 30000, 2, "", {}, {}, "right_to_left", true, false).get();
ASSERT_EQ(3, res["grouped_hits"].size());
ASSERT_EQ("Omega", res["grouped_hits"][0]["group_key"][0].get<std::string>());
ASSERT_EQ(2, res["grouped_hits"][0]["hits"].size());
ASSERT_EQ(0, res["grouped_hits"][1]["group_key"].size());
ASSERT_EQ(1, res["grouped_hits"][1]["hits"].size());
ASSERT_EQ("2", res["grouped_hits"][1]["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ(0, res["grouped_hits"][2]["group_key"].size());
ASSERT_EQ(1, res["grouped_hits"][2]["hits"].size());
ASSERT_EQ("1", res["grouped_hits"][2]["hits"][0]["document"]["id"].get<std::string>());
// with null value aggregation (default)
res = coll2->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY,
{true}, 10,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10,
{}, {}, {"brand"}, 2,
"<mark>", "</mark>", {3,3}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 0, 0, 0, 2, false, "", true, 0, max_score,
100, 0, 0, "exhaustive", 30000, 2, "", {}, {}, "right_to_left", true, true).get();
ASSERT_EQ(2, res["grouped_hits"].size());
ASSERT_EQ("Omega", res["grouped_hits"][0]["group_key"][0].get<std::string>());
ASSERT_EQ(2, res["grouped_hits"][0]["hits"].size());
ASSERT_EQ(0, res["grouped_hits"][1]["group_key"].size());
ASSERT_EQ(2, res["grouped_hits"][1]["hits"].size());
}
TEST_F(CollectionGroupingTest, SortingOnGroupCount) {
std::vector<sort_by> sort_fields = {sort_by("_group_found", "DESC")};
auto res = coll_group->search("*", {}, "", {"brand"}, sort_fields, {0}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10,
{}, {}, {"size"}, 2).get();
ASSERT_EQ(12, res["found_docs"].get<size_t>());
ASSERT_EQ(3, res["found"].get<size_t>());
ASSERT_EQ(3, res["grouped_hits"].size());
ASSERT_EQ(10, res["grouped_hits"][0]["group_key"][0].get<size_t>());
ASSERT_EQ(7, res["grouped_hits"][0]["found"].get<int32_t>());
ASSERT_EQ(12, res["grouped_hits"][1]["group_key"][0].get<size_t>());
ASSERT_EQ(3, res["grouped_hits"][1]["found"].get<int32_t>());
ASSERT_EQ(11, res["grouped_hits"][2]["group_key"][0].get<size_t>());
ASSERT_EQ(2, res["grouped_hits"][2]["found"].get<int32_t>());
//search in asc order
std::vector<sort_by> sort_fields2 = {sort_by("_group_found", "ASC")};
auto res2 = coll_group->search("*", {}, "", {"brand"}, sort_fields2, {0}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10,
{}, {}, {"size"}, 2).get();
ASSERT_EQ(12, res2["found_docs"].get<size_t>());
ASSERT_EQ(3, res2["found"].get<size_t>());
ASSERT_EQ(3, res2["grouped_hits"].size());
ASSERT_EQ(11, res2["grouped_hits"][0]["group_key"][0].get<size_t>());
ASSERT_EQ(2, res2["grouped_hits"][0]["found"].get<int32_t>());
ASSERT_EQ(12, res2["grouped_hits"][1]["group_key"][0].get<size_t>());
ASSERT_EQ(3, res2["grouped_hits"][1]["found"].get<int32_t>());
ASSERT_EQ(10, res2["grouped_hits"][2]["group_key"][0].get<size_t>());
ASSERT_EQ(7, res2["grouped_hits"][2]["found"].get<int32_t>());
}
TEST_F(CollectionGroupingTest, SortingMoreThanMaxTopsterSize) {
std::vector<field> fields = {
field("title", field_types::STRING, false),
field("brand", field_types::STRING, true, true),
field("size", field_types::INT32, true, false),
field("colors", field_types::STRING, true, false),
field("rating", field_types::FLOAT, true, false)
};
Collection* coll3 = collectionManager.get_collection("coll3").get();
if(coll3 == nullptr) {
coll3 = collectionManager.create_collection("coll3", 4, fields, "rating").get();
}
for(auto i = 0; i < 150; i++) {
auto group_id = i;
for(auto j = 0; j < 4; j++) {
nlohmann::json doc;
doc["title"] = "Omega Casual Poplin Shirt";
doc["brand"] = "Omega";
doc["size"] = group_id;
doc["colors"] = "blue";
doc["rating"] = 4.5;
ASSERT_TRUE(coll3->add(doc.dump()).ok());
}
}
for(auto i = 150; i < 250; i++) {
auto group_id = i;
for(auto j = 0; j < 3; j++) {
nlohmann::json doc;
doc["title"] = "Beta Casual Poplin Shirt";
doc["brand"] = "Beta";
doc["size"] = group_id;
doc["colors"] = "white";
doc["rating"] = 4.3;
ASSERT_TRUE(coll3->add(doc.dump()).ok());
}
}
for(auto i = 250; i < 300; i++) {
auto group_id = i;
for(auto j = 0; j < 2; j++) {
nlohmann::json doc;
doc["title"] = "Zeta Casual Poplin Shirt";
doc["brand"] = "Zeta";
doc["size"] = group_id;
doc["colors"] = "red";
doc["rating"] = 4.6;
ASSERT_TRUE(coll3->add(doc.dump()).ok());
}
}
//first search in desc order
std::vector<sort_by> sort_fields = {sort_by("_group_found", "DESC")};
auto res = coll3->search("*", {}, "", {"brand"}, sort_fields, {0}, 100, 2, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10,
{}, {}, {"size"}, 2).get();
ASSERT_EQ(1000, res["found_docs"].get<size_t>());
ASSERT_EQ(300, res["found"].get<size_t>());
ASSERT_EQ(100, res["grouped_hits"].size());
ASSERT_EQ(4, res["grouped_hits"][4]["found"].get<int32_t>());
ASSERT_EQ(4, res["grouped_hits"][4]["found"].get<int32_t>());
ASSERT_EQ(3, res["grouped_hits"][50]["found"].get<int32_t>());
ASSERT_EQ(3, res["grouped_hits"][99]["found"].get<int32_t>());
res = coll3->search("*", {}, "", {"brand"}, sort_fields, {0}, 100, 3, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10,
{}, {}, {"size"}, 2).get();
ASSERT_EQ(1000, res["found_docs"].get<size_t>());
ASSERT_EQ(300, res["found"].get<size_t>());
ASSERT_EQ(100, res["grouped_hits"].size());
ASSERT_EQ(3, res["grouped_hits"][4]["found"].get<int32_t>());
ASSERT_EQ(3, res["grouped_hits"][4]["found"].get<int32_t>());
ASSERT_EQ(2, res["grouped_hits"][50]["found"].get<int32_t>());
ASSERT_EQ(2, res["grouped_hits"][99]["found"].get<int32_t>());
//search in asc order
std::vector<sort_by> sort_fields2 = {sort_by("_group_found", "ASC")};
auto res2 = coll3->search("*", {}, "", {"brand"}, sort_fields2, {0}, 100, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10,
{}, {}, {"size"}, 2).get();
ASSERT_EQ(1000, res2["found_docs"].get<size_t>());
ASSERT_EQ(300, res2["found"].get<size_t>());
ASSERT_EQ(100, res2["grouped_hits"].size());
ASSERT_EQ(2, res2["grouped_hits"][0]["found"].get<int32_t>());
ASSERT_EQ(2, res2["grouped_hits"][1]["found"].get<int32_t>());
ASSERT_EQ(3, res2["grouped_hits"][50]["found"].get<int32_t>());
ASSERT_EQ(3, res2["grouped_hits"][99]["found"].get<int32_t>());
res2 = coll3->search("*", {}, "", {"brand"}, sort_fields2, {0}, 100, 2, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10,
{}, {}, {"size"}, 2).get();
ASSERT_EQ(1000, res2["found_docs"].get<size_t>());
ASSERT_EQ(300, res2["found"].get<size_t>());
ASSERT_EQ(100, res2["grouped_hits"].size());
ASSERT_EQ(3, res2["grouped_hits"][0]["found"].get<int32_t>());
ASSERT_EQ(3, res2["grouped_hits"][1]["found"].get<int32_t>());
ASSERT_EQ(4, res2["grouped_hits"][50]["found"].get<int32_t>());
ASSERT_EQ(4, res2["grouped_hits"][99]["found"].get<int32_t>());
}
TEST_F(CollectionGroupingTest, GroupSortingWithoutGroupingFields) {
std::vector<sort_by> sort_fields = {sort_by("_group_found", "DESC")};
auto res = coll_group->search("*", {}, "", {"brand"}, sort_fields, {0}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10,
{}, {}, {});
ASSERT_EQ(res.ok(), false);
ASSERT_EQ(res.error(), "group_by parameters should not be empty when using sort_by group_found");
}
TEST_F(CollectionGroupingTest, SkipToReverseGroupBy) {
std::vector<field> fields = {
field("brand", field_types::STRING, true, true),
};
Collection* coll2 = collectionManager.get_collection("coll2").get();
if(coll2 == nullptr) {
coll2 = collectionManager.create_collection("coll2", 1, fields).get();
}
nlohmann::json doc;
doc["id"] = "0";
doc["brand"] = nullptr;
ASSERT_TRUE(coll2->add(doc.dump()).ok());
auto res = coll2->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY,
{true}, 10,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10,
{}, {}, {"brand"}, 2,
"<mark>", "</mark>", {3,3}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 0, 0, 0, 2, false, "", true, 0, max_score,
100, 0, 0, "exhaustive", 30000, 2, "", {}, {}, "right_to_left", true, false).get();
ASSERT_EQ(1, res["grouped_hits"].size());
ASSERT_EQ(0, res["grouped_hits"][0]["group_key"].size());
ASSERT_EQ(1, res["grouped_hits"][0]["hits"].size());
ASSERT_EQ("0", res["grouped_hits"][0]["hits"][0]["document"]["id"].get<std::string>());
doc["id"] = "1";
doc["brand"] = "adidas";
ASSERT_TRUE(coll2->add(doc.dump()).ok());
doc["id"] = "2";
doc["brand"] = "puma";
ASSERT_TRUE(coll2->add(doc.dump()).ok());
doc["id"] = "3";
doc["brand"] = nullptr;
ASSERT_TRUE(coll2->add(doc.dump()).ok());
doc["id"] = "4";
doc["brand"] = "nike";
ASSERT_TRUE(coll2->add(doc.dump()).ok());
res = coll2->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY,
{true}, 10,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10,
{}, {}, {"brand"}, 2,
"<mark>", "</mark>", {3,3}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 0, 0, 0, 2, false, "", true, 0, max_score,
100, 0, 0, "exhaustive", 30000, 2, "", {}, {}, "right_to_left", true, false).get();
ASSERT_EQ(5, res["grouped_hits"].size());
ASSERT_EQ("nike", res["grouped_hits"][0]["group_key"][0].get<std::string>());
ASSERT_EQ(1, res["grouped_hits"][0]["hits"].size());
ASSERT_EQ(0, res["grouped_hits"][1]["group_key"].size());
ASSERT_EQ(1, res["grouped_hits"][1]["hits"].size());
ASSERT_EQ("3", res["grouped_hits"][1]["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("puma", res["grouped_hits"][2]["group_key"][0].get<std::string>());
ASSERT_EQ(1, res["grouped_hits"][2]["hits"].size());
ASSERT_EQ("adidas", res["grouped_hits"][3]["group_key"][0].get<std::string>());
ASSERT_EQ(1, res["grouped_hits"][3]["hits"].size());
ASSERT_EQ(0, res["grouped_hits"][4]["group_key"].size());
ASSERT_EQ(1, res["grouped_hits"][4]["hits"].size());
ASSERT_EQ("0", res["grouped_hits"][4]["hits"][0]["document"]["id"].get<std::string>());
res = coll2->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY,
{true}, 10,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10,
{}, {}, {"brand"}, 2,
"<mark>", "</mark>", {3,3}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 0, 0, 0, 2, false, "", true, 0, max_score,
100, 0, 0, "exhaustive", 30000, 2, "", {}, {}, "right_to_left", true, true).get();
ASSERT_EQ(4, res["grouped_hits"].size());
ASSERT_EQ("nike", res["grouped_hits"][0]["group_key"][0].get<std::string>());
ASSERT_EQ(1, res["grouped_hits"][0]["hits"].size());
ASSERT_EQ(0, res["grouped_hits"][1]["group_key"].size());
ASSERT_EQ(2, res["grouped_hits"][1]["hits"].size());
ASSERT_EQ("3", res["grouped_hits"][1]["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("puma", res["grouped_hits"][2]["group_key"][0].get<std::string>());
ASSERT_EQ(1, res["grouped_hits"][2]["hits"].size());
}
TEST_F(CollectionGroupingTest, GroupByMultipleFacetFields) {
auto res = coll_group->search("*", {}, "", {"brand", "colors"}, {}, {0}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10,
{}, {}, {"size"}, 2).get();
ASSERT_EQ(12, res["found_docs"].get<size_t>());
ASSERT_EQ(3, res["found"].get<size_t>());
ASSERT_EQ(3, res["grouped_hits"].size());
ASSERT_EQ(11, res["grouped_hits"][0]["group_key"][0].get<size_t>());
ASSERT_EQ(2, res["grouped_hits"][0]["found"].get<int32_t>());
ASSERT_FLOAT_EQ(4.8, res["grouped_hits"][0]["hits"][0]["document"]["rating"].get<float>());
ASSERT_EQ(11, res["grouped_hits"][0]["hits"][0]["document"]["size"].get<size_t>());
ASSERT_STREQ("5", res["grouped_hits"][0]["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_FLOAT_EQ(4.3, res["grouped_hits"][0]["hits"][1]["document"]["rating"].get<float>());
ASSERT_STREQ("1", res["grouped_hits"][0]["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(7, res["grouped_hits"][1]["found"].get<int32_t>());
ASSERT_FLOAT_EQ(4.8, res["grouped_hits"][1]["hits"][0]["document"]["rating"].get<float>());
ASSERT_STREQ("4", res["grouped_hits"][1]["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_FLOAT_EQ(4.6, res["grouped_hits"][1]["hits"][1]["document"]["rating"].get<float>());
ASSERT_STREQ("3", res["grouped_hits"][1]["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(3, res["grouped_hits"][2]["found"].get<int32_t>());
ASSERT_FLOAT_EQ(4.6, res["grouped_hits"][2]["hits"][0]["document"]["rating"].get<float>());
ASSERT_STREQ("2", res["grouped_hits"][2]["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_FLOAT_EQ(4.4, res["grouped_hits"][2]["hits"][1]["document"]["rating"].get<float>());
ASSERT_STREQ("8", res["grouped_hits"][2]["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("brand", res["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(3, (int) res["facet_counts"][0]["counts"][0]["count"]);
ASSERT_STREQ("Beta", res["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_EQ(3, (int) res["facet_counts"][0]["counts"][1]["count"]);
ASSERT_STREQ("Omega", res["facet_counts"][0]["counts"][1]["value"].get<std::string>().c_str());
ASSERT_EQ(2, (int) res["facet_counts"][0]["counts"][2]["count"]);
ASSERT_STREQ("Xorp", res["facet_counts"][0]["counts"][2]["value"].get<std::string>().c_str());
ASSERT_EQ(1, (int) res["facet_counts"][0]["counts"][3]["count"]);
ASSERT_STREQ("Zeta", res["facet_counts"][0]["counts"][3]["value"].get<std::string>().c_str());
ASSERT_STREQ("colors", res["facet_counts"][1]["field_name"].get<std::string>().c_str());
ASSERT_EQ(3, (int) res["facet_counts"][1]["counts"][0]["count"]);
ASSERT_STREQ("blue", res["facet_counts"][1]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_EQ(3, (int) res["facet_counts"][1]["counts"][1]["count"]);
ASSERT_STREQ("white", res["facet_counts"][1]["counts"][1]["value"].get<std::string>().c_str());
ASSERT_EQ(1, (int) res["facet_counts"][1]["counts"][2]["count"]);
ASSERT_STREQ("red", res["facet_counts"][1]["counts"][2]["value"].get<std::string>().c_str());
}
TEST_F(CollectionGroupingTest, GroupByMultipleFacetFieldsWithFilter) {
auto res = coll_group->search("*", {}, "size:>10", {"colors", "brand"}, {}, {0}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10,
{}, {}, {"size"}, 2).get();
ASSERT_EQ(5, res["found_docs"].get<size_t>());
ASSERT_EQ(2, res["found"].get<size_t>());
ASSERT_EQ(2, res["grouped_hits"].size());
ASSERT_EQ(11, res["grouped_hits"][0]["group_key"][0].get<size_t>());
ASSERT_EQ(2, res["grouped_hits"][0]["found"].get<size_t>());
ASSERT_EQ(2, res["grouped_hits"][0]["hits"].size());
ASSERT_EQ("5", res["grouped_hits"][0]["hits"][0]["document"]["id"]);
ASSERT_FLOAT_EQ(4.8, res["grouped_hits"][0]["hits"][0]["document"]["rating"].get<float>());
ASSERT_EQ("1", res["grouped_hits"][0]["hits"][1]["document"]["id"]);
ASSERT_FLOAT_EQ(4.3, res["grouped_hits"][0]["hits"][1]["document"]["rating"].get<float>());
ASSERT_EQ(12, res["grouped_hits"][1]["group_key"][0].get<size_t>());
ASSERT_EQ(3, res["grouped_hits"][1]["found"].get<size_t>());
ASSERT_EQ(2, res["grouped_hits"][1]["hits"].size());
ASSERT_EQ("2", res["grouped_hits"][1]["hits"][0]["document"]["id"]);
ASSERT_FLOAT_EQ(4.6, res["grouped_hits"][1]["hits"][0]["document"]["rating"].get<float>());
ASSERT_EQ("8", res["grouped_hits"][1]["hits"][1]["document"]["id"]);
ASSERT_FLOAT_EQ(4.4, res["grouped_hits"][1]["hits"][1]["document"]["rating"].get<float>());
ASSERT_STREQ("colors", res["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(2, (int) res["facet_counts"][0]["counts"][0]["count"]);
ASSERT_STREQ("blue", res["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_EQ(2, (int) res["facet_counts"][0]["counts"][1]["count"]);
ASSERT_STREQ("white", res["facet_counts"][0]["counts"][1]["value"].get<std::string>().c_str());
ASSERT_EQ(1, (int) res["facet_counts"][0]["counts"][2]["count"]);
ASSERT_STREQ("red", res["facet_counts"][0]["counts"][2]["value"].get<std::string>().c_str());
ASSERT_STREQ("brand", res["facet_counts"][1]["field_name"].get<std::string>().c_str());
ASSERT_EQ(2, (int) res["facet_counts"][1]["counts"][0]["count"]);
ASSERT_STREQ("Beta", res["facet_counts"][1]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_EQ(2, (int) res["facet_counts"][1]["counts"][1]["count"]);
ASSERT_STREQ("Omega", res["facet_counts"][1]["counts"][1]["value"].get<std::string>().c_str());
ASSERT_EQ(1, (int) res["facet_counts"][1]["counts"][2]["count"]);
ASSERT_STREQ("Xorp", res["facet_counts"][1]["counts"][2]["value"].get<std::string>().c_str());
}
TEST_F(CollectionGroupingTest, GroupByMultipleFacetFieldsWithPinning) {
auto res = coll_group->search("*", {}, "size:>10", {"colors", "brand"}, {}, {0}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10,
{"3:1,4:2"}, {}, {"size"}, 2).get();
ASSERT_EQ(5, res["found_docs"].get<size_t>());
ASSERT_EQ(4, res["found"].get<size_t>());
ASSERT_EQ(4, res["grouped_hits"].size());
ASSERT_EQ(10, res["grouped_hits"][0]["group_key"][0].get<size_t>());
ASSERT_EQ(1, res["grouped_hits"][0]["hits"].size());
ASSERT_EQ("3", res["grouped_hits"][0]["hits"][0]["document"]["id"]);
ASSERT_FLOAT_EQ(4.6, res["grouped_hits"][0]["hits"][0]["document"]["rating"].get<float>());
ASSERT_EQ(10, res["grouped_hits"][1]["group_key"][0].get<size_t>());
ASSERT_EQ(1, res["grouped_hits"][1]["hits"].size());
ASSERT_EQ("4", res["grouped_hits"][1]["hits"][0]["document"]["id"]);
ASSERT_FLOAT_EQ(4.8, res["grouped_hits"][1]["hits"][0]["document"]["rating"].get<float>());
ASSERT_EQ(11, res["grouped_hits"][2]["group_key"][0].get<size_t>());
ASSERT_EQ(2, res["grouped_hits"][2]["found"].get<size_t>());
ASSERT_EQ(2, res["grouped_hits"][2]["hits"].size());
ASSERT_EQ("5", res["grouped_hits"][2]["hits"][0]["document"]["id"]);
ASSERT_FLOAT_EQ(4.8, res["grouped_hits"][2]["hits"][0]["document"]["rating"].get<float>());
ASSERT_EQ("1", res["grouped_hits"][2]["hits"][1]["document"]["id"]);
ASSERT_FLOAT_EQ(4.3, res["grouped_hits"][2]["hits"][1]["document"]["rating"].get<float>());
ASSERT_EQ(12, res["grouped_hits"][3]["group_key"][0].get<size_t>());
ASSERT_EQ(3, res["grouped_hits"][3]["found"].get<size_t>());
ASSERT_EQ(2, res["grouped_hits"][3]["hits"].size());
ASSERT_EQ("2", res["grouped_hits"][3]["hits"][0]["document"]["id"]);
ASSERT_FLOAT_EQ(4.6, res["grouped_hits"][3]["hits"][0]["document"]["rating"].get<float>());
ASSERT_EQ("8", res["grouped_hits"][3]["hits"][1]["document"]["id"]);
ASSERT_FLOAT_EQ(4.4, res["grouped_hits"][3]["hits"][1]["document"]["rating"].get<float>());
ASSERT_STREQ("colors", res["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(3, (int) res["facet_counts"][0]["counts"][0]["count"]);
ASSERT_STREQ("blue", res["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_EQ(3, (int) res["facet_counts"][0]["counts"][1]["count"]);
ASSERT_STREQ("white", res["facet_counts"][0]["counts"][1]["value"].get<std::string>().c_str());
ASSERT_EQ(1, (int) res["facet_counts"][0]["counts"][2]["count"]);
ASSERT_STREQ("red", res["facet_counts"][0]["counts"][2]["value"].get<std::string>().c_str());
ASSERT_STREQ("brand", res["facet_counts"][1]["field_name"].get<std::string>().c_str());
ASSERT_EQ(3, (int) res["facet_counts"][1]["counts"][0]["count"]);
ASSERT_STREQ("Beta", res["facet_counts"][1]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_EQ(3, (int) res["facet_counts"][1]["counts"][1]["count"]);
ASSERT_STREQ("Omega", res["facet_counts"][1]["counts"][1]["value"].get<std::string>().c_str());
ASSERT_EQ(1, (int) res["facet_counts"][1]["counts"][2]["count"]);
ASSERT_STREQ("Xorp", res["facet_counts"][1]["counts"][2]["value"].get<std::string>().c_str());
}
TEST_F(CollectionGroupingTest, GroupByPinnedHitsOrder) {
auto res = coll_group->search("*", {"title"}, "size:=[12,11]", {}, {}, {0}, 50, 0, NOT_SET,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4,
"", 1,
{"6:1,1:2"}, {}, {"size"}, 1).get();
ASSERT_EQ(2, res["found"].get<size_t>());
ASSERT_EQ(2, res["grouped_hits"].size());
ASSERT_EQ(12, res["grouped_hits"][0]["group_key"][0].get<size_t>());
ASSERT_EQ(1, res["grouped_hits"][0]["hits"].size());
ASSERT_EQ("6", res["grouped_hits"][0]["hits"][0]["document"]["id"]);
ASSERT_EQ(11, res["grouped_hits"][1]["group_key"][0].get<size_t>());
ASSERT_EQ(1, res["grouped_hits"][1]["hits"].size());
ASSERT_EQ("1", res["grouped_hits"][1]["hits"][0]["document"]["id"]);
//try with pinned hits in other order
res = coll_group->search("*", {"title"}, "size:=[12,11]", {}, {}, {0}, 50, 0, NOT_SET,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4,
"", 1,
{"5:1,8:2"}, {}, {"size"}, 1).get();
ASSERT_EQ(2, res["found"].get<size_t>());
ASSERT_EQ(2, res["grouped_hits"].size());
ASSERT_EQ(11, res["grouped_hits"][0]["group_key"][0].get<size_t>());
ASSERT_EQ(1, res["grouped_hits"][0]["hits"].size());
ASSERT_EQ("5", res["grouped_hits"][0]["hits"][0]["document"]["id"]);
ASSERT_EQ(12, res["grouped_hits"][1]["group_key"][0].get<size_t>());
ASSERT_EQ(1, res["grouped_hits"][1]["hits"].size());
ASSERT_EQ("8", res["grouped_hits"][1]["hits"][0]["document"]["id"]);
//random order
res = coll_group->search("*", {"title"}, "size:=[12,11,10]", {}, {}, {0}, 50, 0, NOT_SET,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4,
"", 1,
{"5:1,8:2,0:3"}, {}, {"size"}, 1).get();
ASSERT_EQ(3, res["found"].get<size_t>());
ASSERT_EQ(3, res["grouped_hits"].size());
ASSERT_EQ(11, res["grouped_hits"][0]["group_key"][0].get<size_t>());
ASSERT_EQ(1, res["grouped_hits"][0]["hits"].size());
ASSERT_EQ("5", res["grouped_hits"][0]["hits"][0]["document"]["id"]);
ASSERT_EQ(12, res["grouped_hits"][1]["group_key"][0].get<size_t>());
ASSERT_EQ(1, res["grouped_hits"][1]["hits"].size());
ASSERT_EQ("8", res["grouped_hits"][1]["hits"][0]["document"]["id"]);
ASSERT_EQ(10, res["grouped_hits"][2]["group_key"][0].get<size_t>());
ASSERT_EQ(1, res["grouped_hits"][2]["hits"].size());
ASSERT_EQ("0", res["grouped_hits"][2]["hits"][0]["document"]["id"]);
}
TEST_F(CollectionGroupingTest, GroupByPerPage) {
std::vector<field> fields = {
field("name", field_types::STRING, false, false),
field("id", field_types::STRING, true, true),
};
Collection* fabric = collectionManager.get_collection("fabric").get();
if(fabric == nullptr) {
fabric = collectionManager.create_collection("fabric", 1, fields).get();
}
nlohmann::json doc;
doc["id"] = "1001";
doc["name"] = "Cotton";
ASSERT_TRUE(fabric->add(doc.dump()).ok());
doc["id"] = "1002";
doc["name"] = "Nylon";
ASSERT_TRUE(fabric->add(doc.dump()).ok());
doc["id"] = "1003";
doc["name"] = "Polyester";
ASSERT_TRUE(fabric->add(doc.dump()).ok());
doc["id"] = "1004";
doc["name"] = "Linen";
ASSERT_TRUE(fabric->add(doc.dump()).ok());
doc["id"] = "1005";
doc["name"] = "Silk";
ASSERT_TRUE(fabric->add(doc.dump()).ok());
fields = {
field("name", field_types::STRING, false, false),
field("fabric_id", field_types::STRING, true, false, true,
"", -1, -1, false, 0, 0, cosine, "fabric.id"),
field("size", field_types::STRING, false, false),
};
Collection* garments = collectionManager.get_collection("garments").get();
if(garments == nullptr) {
garments = collectionManager.create_collection("garments", 1, fields).get();
}
nlohmann::json doc2;
doc2["name"] = "Tshirt";
doc2["fabric_id"] = "1001";
doc2["size"] = "Medium";
ASSERT_TRUE(garments->add(doc2.dump()).ok());
doc2["name"] = "Tshirt";
doc2["fabric_id"] = "1003";
doc2["size"] = "Large";
ASSERT_TRUE(garments->add(doc2.dump()).ok());
doc2["name"] = "Shirt";
doc2["fabric_id"] = "1004";
doc2["size"] = "Xtra Large";
ASSERT_TRUE(garments->add(doc2.dump()).ok());
doc2["name"] = "Trouser";
doc2["fabric_id"] = "1002";
doc2["size"] = "Small";
ASSERT_TRUE(garments->add(doc2.dump()).ok());
doc2["name"] = "Veshti";
doc2["fabric_id"] = "1005";
doc2["size"] = "Free";
ASSERT_TRUE(garments->add(doc2.dump()).ok());
doc2["name"] = "Shorts";
doc2["fabric_id"] = "1002";
doc2["size"] = "Medium";
ASSERT_TRUE(garments->add(doc2.dump()).ok());
doc2["name"] = "Shirt";
doc2["fabric_id"] = "1005";
doc2["size"] = "Large";
ASSERT_TRUE(garments->add(doc2.dump()).ok());
//limit per page to 4
auto res = garments->search("*", {"name"}, "", {}, {}, {0}, 4, 1, NOT_SET,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4,
"", 1,
{}, {}, {"fabric_id"}, 1).get();
ASSERT_EQ(5, res["found"].get<size_t>());
ASSERT_EQ(4, res["grouped_hits"].size());
ASSERT_EQ(7, res["found_docs"].get<size_t>());
ASSERT_EQ("1005", res["grouped_hits"][0]["group_key"][0]);
ASSERT_EQ("1002", res["grouped_hits"][1]["group_key"][0]);
ASSERT_EQ("1004", res["grouped_hits"][2]["group_key"][0]);
ASSERT_EQ("1003", res["grouped_hits"][3]["group_key"][0]);
//per page 10
res = garments->search("*", {"name"}, "", {}, {}, {0}, 10, 1, NOT_SET,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4,
"", 1,
{}, {}, {"fabric_id"}, 1).get();
ASSERT_EQ(5, res["found"].get<size_t>());
ASSERT_EQ(5, res["grouped_hits"].size());
ASSERT_EQ(7, res["found_docs"].get<size_t>());
ASSERT_EQ("1005", res["grouped_hits"][0]["group_key"][0]);
ASSERT_EQ("1002", res["grouped_hits"][1]["group_key"][0]);
ASSERT_EQ("1004", res["grouped_hits"][2]["group_key"][0]);
ASSERT_EQ("1003", res["grouped_hits"][3]["group_key"][0]);
ASSERT_EQ("1001", res["grouped_hits"][4]["group_key"][0]);
}
| 63,595
|
C++
|
.cpp
| 1,057
| 49.115421
| 121
| 0.522733
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,708
|
topster_test.cpp
|
typesense_typesense/test/topster_test.cpp
|
#include <gtest/gtest.h>
#include <index.h>
#include "topster.h"
#include "match_score.h"
#include <fstream>
TEST(TopsterTest, MaxIntValues) {
Topster topster(5);
struct {
uint16_t query_index;
uint64_t key;
uint64_t match_score;
int64_t primary_attr;
int64_t secondary_attr;
} data[14] = {
{0, 1, 11, 20, 30},
{0, 1, 12, 20, 32},
{0, 2, 4, 20, 30},
{2, 3, 7, 20, 30},
{0, 4, 14, 20, 30},
{1, 5, 9, 20, 30},
{1, 5, 10, 20, 32},
{1, 5, 9, 20, 30},
{0, 6, 6, 20, 30},
{2, 7, 6, 22, 30},
{2, 7, 6, 22, 30},
{1, 8, 9, 20, 30},
{0, 9, 8, 20, 30},
{3, 10, 5, 20, 30},
};
for(int i = 0; i < 14; i++) {
int64_t scores[3];
scores[0] = int64_t(data[i].match_score);
scores[1] = data[i].primary_attr;
scores[2] = data[i].secondary_attr;
KV kv(data[i].query_index, data[i].key, data[i].key, 0, scores);
topster.add(&kv);
}
topster.sort();
std::vector<uint64_t> ids = {4, 1, 5, 8, 9};
for(uint32_t i = 0; i < topster.size; i++) {
EXPECT_EQ(ids[i], topster.getKeyAt(i));
if(ids[i] == 1) {
EXPECT_EQ(12, (int) topster.getKV(i)->scores[topster.getKV(i)->match_score_index]);
}
if(ids[i] == 5) {
EXPECT_EQ(10, (int) topster.getKV(i)->scores[topster.getKV(i)->match_score_index]);
}
}
}
TEST(TopsterTest, StableSorting) {
// evaluate if the positions of the documents in Topster<1000> is the same in Topster 250, 500 and 750
std::ifstream infile("test/resources/record_values.txt");
std::string line;
std::vector<std::pair<uint64_t, int64_t>> records;
while (std::getline(infile, line)) {
std::vector<std::string> parts;
StringUtils::split(line, parts, ",");
uint64_t key = std::stoll(parts[0]);
records.emplace_back(key, std::stoi(parts[1]));
}
infile.close();
Topster topster1K(1000);
for(auto id_score: records) {
int64_t scores[3] = {id_score.second, 0, 0};
KV kv(0, id_score.first, id_score.first, 0, scores);
topster1K.add(&kv);
}
topster1K.sort();
std::vector<uint64_t> record_ids;
for(uint32_t i = 0; i < topster1K.size; i++) {
record_ids.push_back(topster1K.getKeyAt(i));
}
// check on Topster<250>
Topster topster250(250);
for(auto id_score: records) {
int64_t scores[3] = {id_score.second, 0, 0};
KV kv(0, id_score.first, id_score.first, 0, scores);
topster250.add(&kv);
}
topster250.sort();
for(uint32_t i = 0; i < topster250.size; i++) {
ASSERT_EQ(record_ids[i], topster250.getKeyAt(i));
}
// check on Topster<500>
Topster topster500(500);
for(auto id_score: records) {
int64_t scores[3] = {id_score.second, 0, 0};
KV kv(0, id_score.first, id_score.first, 0, scores);
topster500.add(&kv);
}
topster500.sort();
for(uint32_t i = 0; i < topster500.size; i++) {
ASSERT_EQ(record_ids[i], topster500.getKeyAt(i));
}
// check on Topster<750>
Topster topster750(750);
for(auto id_score: records) {
int64_t scores[3] = {id_score.second, 0, 0};
KV kv(0, id_score.first, id_score.first, 0, scores);
topster750.add(&kv);
}
topster750.sort();
for(uint32_t i = 0; i < topster750.size; i++) {
ASSERT_EQ(record_ids[i], topster750.getKeyAt(i));
}
}
TEST(TopsterTest, MaxFloatValues) {
Topster topster(5);
struct {
uint16_t query_index;
uint64_t key;
uint64_t match_score;
float primary_attr;
int64_t secondary_attr;
} data[12] = {
{0, 1, 11, 1.09, 30},
{0, 2, 11, -20, 30},
{2, 3, 11, -20, 30},
{0, 4, 11, 7.812, 30},
{0, 4, 11, 7.912, 30},
{1, 5, 11, 0.0, 34},
{0, 6, 11, -22, 30},
{2, 7, 11, -22, 30},
{1, 8, 11, -9.998, 30},
{1, 8, 11, -9.998, 30},
{0, 9, 11, -9.999, 30},
{3, 10, 11, -20, 30},
};
for(int i = 0; i < 12; i++) {
int64_t scores[3];
scores[0] = int64_t(data[i].match_score);
scores[1] = Index::float_to_int64_t(data[i].primary_attr);
scores[2] = data[i].secondary_attr;
KV kv(data[i].query_index, data[i].key, data[i].key, 0, scores);
topster.add(&kv);
}
topster.sort();
std::vector<uint64_t> ids = {4, 1, 5, 8, 9};
for(uint32_t i = 0; i < topster.size; i++) {
EXPECT_EQ(ids[i], topster.getKeyAt(i));
}
}
TEST(TopsterTest, DistinctIntValues) {
Topster dist_topster(5, 2);
struct {
uint16_t query_index;
uint64_t distinct_key;
uint64_t match_score;
int64_t primary_attr;
int64_t secondary_attr;
} data[14] = {
{0, 1, 11, 20, 30},
{0, 1, 12, 20, 32},
{0, 2, 4, 20, 30},
{2, 3, 7, 20, 30},
{0, 4, 14, 20, 30},
{1, 5, 9, 20, 30},
{1, 5, 10, 20, 32},
{1, 5, 9, 20, 30},
{0, 6, 6, 20, 30},
{2, 7, 6, 22, 30},
{2, 7, 6, 22, 30},
{1, 8, 9, 20, 30},
{0, 9, 8, 20, 30},
{3, 10, 5, 20, 30},
};
for(int i = 0; i < 14; i++) {
int64_t scores[3];
scores[0] = int64_t(data[i].match_score);
scores[1] = data[i].primary_attr;
scores[2] = data[i].secondary_attr;
KV kv(data[i].query_index, i+100, data[i].distinct_key, 0, scores);
dist_topster.add(&kv);
}
dist_topster.sort();
std::vector<uint64_t> distinct_ids = {4, 1, 8, 5, 9};
for(uint32_t i = 0; i < dist_topster.size; i++) {
EXPECT_EQ(distinct_ids[i], dist_topster.getDistinctKeyAt(i));
if(distinct_ids[i] == 1) {
EXPECT_EQ(12, (int) dist_topster.getKV(i)->scores[dist_topster.getKV(i)->match_score_index]);
EXPECT_EQ(2, dist_topster.group_kv_map[dist_topster.getDistinctKeyAt(i)]->size);
EXPECT_EQ(12, dist_topster.group_kv_map[dist_topster.getDistinctKeyAt(i)]->getKV(0)->scores[0]);
EXPECT_EQ(11, dist_topster.group_kv_map[dist_topster.getDistinctKeyAt(i)]->getKV(1)->scores[0]);
}
if(distinct_ids[i] == 5) {
EXPECT_EQ(9, (int) dist_topster.getKV(i)->scores[dist_topster.getKV(i)->match_score_index]);
EXPECT_EQ(2, dist_topster.group_kv_map[dist_topster.getDistinctKeyAt(i)]->size);
EXPECT_EQ(10, dist_topster.group_kv_map[dist_topster.getDistinctKeyAt(i)]->getKV(0)->scores[0]);
EXPECT_EQ(9, dist_topster.group_kv_map[dist_topster.getDistinctKeyAt(i)]->getKV(1)->scores[0]);
}
}
}
| 6,775
|
C++
|
.cpp
| 192
| 27.739583
| 108
| 0.539003
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,709
|
tsconfig_test.cpp
|
typesense_typesense/test/tsconfig_test.cpp
|
#include <gtest/gtest.h>
#include <stdlib.h>
#include <iostream>
#include <cmdline.h>
#include "typesense_server_utils.h"
#include "tsconfig.h"
std::vector<char*> get_argv(std::vector<std::string> & args) {
std::vector<char*> argv;
for (const auto& arg : args) {
argv.push_back((char*)arg.data());
}
argv.push_back(nullptr);
return argv;
}
class ConfigImpl : public Config {
public:
ConfigImpl(): Config() {
}
};
TEST(ConfigTest, LoadCmdLineArguments) {
cmdline::parser options;
std::vector<std::string> args = {
"./typesense-server",
"--data-dir=/tmp/data",
"--api-key=abcd",
"--listen-port=8080",
"--max-per-page=250",
};
std::vector<char*> argv = get_argv(args);
init_cmdline_options(options, argv.size() - 1, argv.data());
options.parse(argv.size() - 1, argv.data());
ConfigImpl config;
config.load_config_cmd_args(options);
ASSERT_EQ("abcd", config.get_api_key());
ASSERT_EQ(8080, config.get_api_port());
ASSERT_EQ("/tmp/data", config.get_data_dir());
ASSERT_EQ(true, config.get_enable_cors());
}
TEST(ConfigTest, LoadEnvVars) {
cmdline::parser options;
putenv((char*)"TYPESENSE_DATA_DIR=/tmp/ts");
putenv((char*)"TYPESENSE_LISTEN_PORT=9090");
ConfigImpl config;
config.load_config_env();
ASSERT_EQ("/tmp/ts", config.get_data_dir());
ASSERT_EQ(9090, config.get_api_port());
}
TEST(ConfigTest, BadConfigurationReturnsError) {
ConfigImpl config1;
config1.set_api_key("abcd");
auto validation = config1.is_valid();
ASSERT_EQ(false, validation.ok());
ASSERT_EQ("Data directory is not specified.", validation.error());
ConfigImpl config2;
config2.set_data_dir("/tmp/ts");
validation = config2.is_valid();
ASSERT_EQ(false, validation.ok());
ASSERT_EQ("API key is not specified.", validation.error());
}
TEST(ConfigTest, LoadConfigFile) {
cmdline::parser options;
std::vector<std::string> args = {
"./typesense-server",
std::string("--config=") + std::string(ROOT_DIR)+"test/valid_config.ini"
};
std::vector<char*> argv = get_argv(args);
init_cmdline_options(options, argv.size() - 1, argv.data());
options.parse(argv.size() - 1, argv.data());
ConfigImpl config;
config.load_config_file(options);
auto validation = config.is_valid();
ASSERT_EQ(true, validation.ok());
ASSERT_EQ("/tmp/ts", config.get_data_dir());
ASSERT_EQ("1234", config.get_api_key());
ASSERT_EQ("/tmp/logs", config.get_log_dir());
ASSERT_EQ(9090, config.get_api_port());
ASSERT_EQ(true, config.get_enable_cors());
}
TEST(ConfigTest, LoadIncompleteConfigFile) {
cmdline::parser options;
std::vector<std::string> args = {
"./typesense-server",
std::string("--config=") + std::string(ROOT_DIR)+"test/valid_sparse_config.ini"
};
std::vector<char*> argv = get_argv(args);
init_cmdline_options(options, argv.size() - 1, argv.data());
options.parse(argv.size() - 1, argv.data());
ConfigImpl config;
auto validation = config.is_valid();
ASSERT_EQ(false, validation.ok());
ASSERT_EQ("Data directory is not specified.", validation.error());
}
TEST(ConfigTest, LoadBadConfigFile) {
cmdline::parser options;
std::vector<std::string> args = {
"./typesense-server",
std::string("--config=") + std::string(ROOT_DIR)+"test/bad_config.ini"
};
std::vector<char*> argv = get_argv(args);
init_cmdline_options(options, argv.size() - 1, argv.data());
options.parse(argv.size() - 1, argv.data());
ConfigImpl config;
config.load_config_file(options);
auto validation = config.is_valid();
ASSERT_EQ(false, validation.ok());
ASSERT_EQ("Error parsing the configuration file.", validation.error());
}
TEST(ConfigTest, CmdLineArgsOverrideConfigFileAndEnvVars) {
cmdline::parser options;
std::vector<std::string> args = {
"./typesense-server",
"--data-dir=/tmp/data",
"--api-key=abcd",
"--listen-address=192.168.10.10",
"--cors-domains=http://localhost:8108",
"--max-per-page=250",
std::string("--config=") + std::string(ROOT_DIR)+"test/valid_sparse_config.ini"
};
putenv((char*)"TYPESENSE_DATA_DIR=/tmp/ts");
putenv((char*)"TYPESENSE_LOG_DIR=/tmp/ts_log");
putenv((char*)"TYPESENSE_LISTEN_PORT=9090");
putenv((char*)"TYPESENSE_LISTEN_ADDRESS=127.0.0.1");
putenv((char*)"TYPESENSE_ENABLE_CORS=TRUE");
putenv((char*)"TYPESENSE_CORS_DOMAINS=http://localhost:7108");
std::vector<char*> argv = get_argv(args);
init_cmdline_options(options, argv.size() - 1, argv.data());
options.parse(argv.size() - 1, argv.data());
ConfigImpl config;
config.load_config_env();
config.load_config_file(options);
config.load_config_cmd_args(options);
ASSERT_EQ("abcd", config.get_api_key());
ASSERT_EQ("/tmp/data", config.get_data_dir());
ASSERT_EQ("/tmp/ts_log", config.get_log_dir());
ASSERT_EQ(9090, config.get_api_port());
ASSERT_EQ(true, config.get_enable_cors());
ASSERT_EQ("192.168.10.10", config.get_api_address());
ASSERT_EQ("abcd", config.get_api_key()); // cli parameter overrides file config
ASSERT_EQ(1, config.get_cors_domains().size()); // cli parameter overrides file config
ASSERT_EQ("http://localhost:8108", *(config.get_cors_domains().begin()));
ASSERT_EQ(250, config.get_max_per_page());
}
TEST(ConfigTest, CorsDefaults) {
cmdline::parser options;
std::vector<std::string> args = {
"./typesense-server",
"--data-dir=/tmp/data",
"--api-key=abcd",
"--listen-address=192.168.10.10",
"--max-per-page=250",
std::string("--config=") + std::string(ROOT_DIR)+"test/valid_sparse_config.ini"
};
std::vector<char*> argv = get_argv(args);
init_cmdline_options(options, argv.size() - 1, argv.data());
options.parse(argv.size() - 1, argv.data());
ConfigImpl config;
config.load_config_cmd_args(options);
ASSERT_EQ(true, config.get_enable_cors());
ASSERT_EQ(0, config.get_cors_domains().size());
unsetenv("TYPESENSE_ENABLE_CORS");
unsetenv("TYPESENSE_CORS_DOMAINS");
ConfigImpl config2;
config2.load_config_env();
ASSERT_EQ(true, config2.get_enable_cors());
ASSERT_EQ(0, config2.get_cors_domains().size());
ConfigImpl config3;
config3.load_config_file(options);
ASSERT_EQ(true, config3.get_enable_cors());
ASSERT_EQ(1, config3.get_cors_domains().size());
}
| 6,613
|
C++
|
.cpp
| 170
| 33.582353
| 91
| 0.648429
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,710
|
main.cpp
|
typesense_typesense/test/main.cpp
|
#include <gtest/gtest.h>
#include "logger.h"
class TypesenseTestEnvironment : public testing::Environment {
public:
virtual void SetUp() {
}
virtual void TearDown() {
}
};
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
::testing::AddGlobalTestEnvironment(new TypesenseTestEnvironment);
int exitCode = RUN_ALL_TESTS();
return exitCode;
}
| 401
|
C++
|
.cpp
| 15
| 23.266667
| 70
| 0.711286
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,711
|
vector_query_ops_test.cpp
|
typesense_typesense/test/vector_query_ops_test.cpp
|
#include <gtest/gtest.h>
#include "vector_query_ops.h"
class VectorQueryOpsTest : public ::testing::Test {
protected:
void setupCollection() {
}
virtual void SetUp() {
setupCollection();
}
virtual void TearDown() {
}
};
TEST_F(VectorQueryOpsTest, ParseVectorQueryString) {
vector_query_t vector_query;
auto parsed = VectorQueryOps::parse_vector_query_str("vec:([0.34, 0.66, 0.12, 0.68], k: 10)", vector_query, false, nullptr, false);
ASSERT_TRUE(parsed.ok());
ASSERT_EQ("vec", vector_query.field_name);
ASSERT_EQ(10, vector_query.k);
std::vector<float> fvs = {0.34, 0.66, 0.12, 0.68};
ASSERT_EQ(fvs.size(), vector_query.values.size());
for (size_t i = 0; i < fvs.size(); i++) {
ASSERT_EQ(fvs[i], vector_query.values[i]);
}
vector_query._reset();
parsed = VectorQueryOps::parse_vector_query_str("vec:([0.34, 0.66, 0.12, 0.68], k: 10)", vector_query, false, nullptr, false);
ASSERT_TRUE(parsed.ok());
vector_query._reset();
parsed = VectorQueryOps::parse_vector_query_str("vec:([])", vector_query, false, nullptr, false);
ASSERT_FALSE(parsed.ok());
ASSERT_EQ("When a vector query value is empty, an `id` parameter must be present.", parsed.error());
// cannot pass both vector and id
vector_query._reset();
parsed = VectorQueryOps::parse_vector_query_str("vec:([0.34, 0.66, 0.12, 0.68], id: 10)", vector_query, false, nullptr, false);
ASSERT_FALSE(parsed.ok());
ASSERT_EQ("Malformed vector query string: cannot pass both vector query and `id` parameter.", parsed.error());
vector_query._reset();
parsed = VectorQueryOps::parse_vector_query_str("vec:([], k: 10)", vector_query, false, nullptr, false);
ASSERT_TRUE(parsed.ok());
vector_query._reset();
parsed = VectorQueryOps::parse_vector_query_str("vec:([], k: 10)", vector_query, true, nullptr, false);
ASSERT_TRUE(parsed.ok());
vector_query._reset();
parsed = VectorQueryOps::parse_vector_query_str("vec:[0.34, 0.66, 0.12, 0.68], k: 10)", vector_query, false, nullptr, false);
ASSERT_FALSE(parsed.ok());
ASSERT_EQ("Malformed vector query string.", parsed.error());
vector_query._reset();
parsed = VectorQueryOps::parse_vector_query_str("vec:([0.34, 0.66, 0.12, 0.68], k: 10", vector_query, false, nullptr, false);
ASSERT_TRUE(parsed.ok());
vector_query._reset();
parsed = VectorQueryOps::parse_vector_query_str("vec:(0.34, 0.66, 0.12, 0.68, k: 10)", vector_query, false, nullptr, false);
ASSERT_FALSE(parsed.ok());
ASSERT_EQ("Malformed vector query string.", parsed.error());
vector_query._reset();
parsed = VectorQueryOps::parse_vector_query_str("vec:([0.34, 0.66, 0.12, 0.68], )", vector_query, false, nullptr, false);
ASSERT_FALSE(parsed.ok());
ASSERT_EQ("Malformed vector query string.", parsed.error());
vector_query._reset();
parsed = VectorQueryOps::parse_vector_query_str("vec([0.34, 0.66, 0.12, 0.68])", vector_query, false, nullptr, false);
ASSERT_FALSE(parsed.ok());
ASSERT_EQ("Malformed vector query string: `:` is missing after the vector field name.", parsed.error());
vector_query._reset();
parsed = VectorQueryOps::parse_vector_query_str("vec([0.34, 0.66, 0.12, 0.68], k: 10)", vector_query, false, nullptr, false);
ASSERT_FALSE(parsed.ok());
ASSERT_EQ("Malformed vector query string: `:` is missing after the vector field name.", parsed.error());
}
| 3,472
|
C++
|
.cpp
| 65
| 48.476923
| 135
| 0.661162
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,712
|
array_utils_test.cpp
|
typesense_typesense/test/array_utils_test.cpp
|
#include <gtest/gtest.h>
#include "array_utils.h"
#include "logger.h"
TEST(SortedArrayTest, AndScalar) {
const size_t size1 = 9;
uint32_t *arr1 = new uint32_t[size1];
for(size_t i = 0; i < size1; i++) {
arr1[i] = i;
}
const size_t size2 = 10;
uint32_t *arr2 = new uint32_t[size2];
size_t arr2_len = 0;
for(size_t i = 2; i < size2; i++) {
if(i % 3 == 0) {
arr2[arr2_len++] = i;
}
}
// arr1: [0..8] , arr2: [3, 6, 9]
uint32_t *results = nullptr;
uint32_t results_size = ArrayUtils::and_scalar(arr1, size1, arr2, arr2_len, &results);
ASSERT_EQ(2, results_size);
std::vector<uint32_t> expected = {3, 6};
for(size_t i = 0; i < results_size; i++) {
ASSERT_EQ(expected[i], results[i]);
}
delete [] results;
delete [] arr1;
delete [] arr2;
}
TEST(SortedArrayTest, OrScalarMergeShouldRemoveDuplicates) {
const size_t size1 = 9;
uint32_t *arr1 = new uint32_t[size1];
for(size_t i = 0; i < size1; i++) {
arr1[i] = i;
}
const size_t size2 = 10;
uint32_t *arr2 = new uint32_t[size2];
size_t arr2_len = 0;
for(size_t i = 2; i < size2; i++) {
if(i % 3 == 0) {
arr2[arr2_len++] = i;
}
}
// arr1: [0..8] , arr2: [3, 6, 9]
uint32_t *results = nullptr;
uint32_t results_size = ArrayUtils::or_scalar(arr1, size1, arr2, arr2_len, &results);
ASSERT_EQ(10, results_size);
std::vector<uint32_t> expected = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
for(size_t i = 0; i < results_size; i++) {
ASSERT_EQ(expected[i], results[i]);
}
delete[] results;
delete[] arr1;
delete[] arr2;
}
TEST(SortedArrayTest, OrScalarMergeShouldRemoveDuplicatesAtBoundary) {
const size_t size1 = 9;
uint32_t *arr1 = new uint32_t[size1];
for(auto i = 0; i < 9; i++) {
arr1[i] = i;
}
std::vector<uint32_t> vec2 = {0, 4, 5};
uint32_t *arr2 = new uint32_t[vec2.size()];
auto j = 0;
for(auto i: vec2) {
arr2[j++] = i;
}
uint32_t *results = nullptr;
uint32_t results_size = ArrayUtils::or_scalar(arr1, size1, arr2, vec2.size(), &results);
ASSERT_EQ(9, results_size);
std::vector<uint32_t> expected = {0, 1, 2, 3, 4, 5, 6, 7, 8};
for(size_t i = 0; i < results_size; i++) {
ASSERT_EQ(expected[i], results[i]);
}
delete[] results;
delete[] arr1;
delete[] arr2;
}
TEST(SortedArrayTest, OrScalarWithEitherArrayAsNull) {
const size_t size1 = 9;
uint32_t *arr1 = new uint32_t[size1];
for(auto i = 0; i < 9; i++) {
arr1[i] = i;
}
uint32_t *results = nullptr;
uint32_t results_size = ArrayUtils::or_scalar(arr1, size1, nullptr, 0, &results);
ASSERT_EQ(9, results_size);
delete[] results;
results = nullptr;
results_size = ArrayUtils::or_scalar(nullptr, 0, arr1, size1, &results);
ASSERT_EQ(9, results_size);
delete[] results;
results = nullptr;
delete[] arr1;
}
TEST(SortedArrayTest, FilterArray) {
const size_t size1 = 9;
uint32_t *arr1 = new uint32_t[size1];
for(auto i = 0; i < 9; i++) {
arr1[i] = i;
}
std::vector<uint32_t> vec2 = {0, 1, 5, 7, 8};
uint32_t *arr2 = new uint32_t[vec2.size()];
auto j = 0;
for(auto i: vec2) {
arr2[j++] = i;
}
uint32_t *results = nullptr;
uint32_t results_size = ArrayUtils::exclude_scalar(arr1, size1, arr2, vec2.size(), &results);
ASSERT_EQ(4, results_size);
std::vector<uint32_t> expected = {2, 3, 4, 6};
for(size_t i = 0; i < results_size; i++) {
ASSERT_EQ(expected[i], results[i]);
}
delete[] arr2;
delete[] results;
vec2 = {0, 1, 2, 3, 4, 5, 6, 7, 8};
arr2 = new uint32_t[vec2.size()];
j = 0;
for(auto i: vec2) {
arr2[j++] = i;
}
results = nullptr;
results_size = ArrayUtils::exclude_scalar(arr1, size1, arr2, vec2.size(), &results);
ASSERT_EQ(0, results_size);
delete[] results;
// on a larger array
results = nullptr;
std::vector<uint32_t> vec3 = {58, 118, 185, 260, 322, 334, 353};
std::vector<uint32_t> filter_ids = {58, 103, 116, 117, 137, 154, 191, 210, 211, 284, 299, 302, 306, 309, 332, 334, 360};
std::vector<uint32_t> expected_res = {118, 185, 260, 322, 353};
results_size = ArrayUtils::exclude_scalar(&vec3[0], vec3.size(), &filter_ids[0], filter_ids.size(), &results);
ASSERT_EQ(expected_res.size(), results_size);
for(size_t i=0; i<expected_res.size(); i++) {
ASSERT_EQ(expected_res[i], results[i]);
}
delete[] arr2;
delete[] arr1;
delete[] results;
}
TEST(SortedArrayTest, SkipToID) {
std::vector<uint32_t> array;
for (uint32_t i = 0; i < 10; i++) {
array.push_back(i * 3);
}
uint32_t index = 0;
bool found = ArrayUtils::skip_index_to_id(index, array.data(), array.size(), 15);
ASSERT_TRUE(found);
ASSERT_EQ(5, index);
index = 4;
found = ArrayUtils::skip_index_to_id(index, array.data(), array.size(), 3);
ASSERT_FALSE(found);
ASSERT_EQ(4, index);
index = 4;
found = ArrayUtils::skip_index_to_id(index, array.data(), array.size(), 12);
ASSERT_TRUE(found);
ASSERT_EQ(4, index);
index = 4;
found = ArrayUtils::skip_index_to_id(index, array.data(), array.size(), 24);
ASSERT_TRUE(found);
ASSERT_EQ(8, index);
index = 4;
found = ArrayUtils::skip_index_to_id(index, array.data(), array.size(), 25);
ASSERT_FALSE(found);
ASSERT_EQ(9, index);
index = 4;
found = ArrayUtils::skip_index_to_id(index, array.data(), array.size(), 30);
ASSERT_FALSE(found);
ASSERT_EQ(10, index);
index = 12;
found = ArrayUtils::skip_index_to_id(index, array.data(), array.size(), 30);
ASSERT_FALSE(found);
ASSERT_EQ(12, index);
}
| 5,858
|
C++
|
.cpp
| 174
| 28.206897
| 124
| 0.586293
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,713
|
collection_faceting_test.cpp
|
typesense_typesense/test/collection_faceting_test.cpp
|
#include <gtest/gtest.h>
#include <string>
#include <vector>
#include <fstream>
#include <algorithm>
#include <collection_manager.h>
#include "collection.h"
class CollectionFacetingTest : public ::testing::Test {
protected:
Store *store;
CollectionManager & collectionManager = CollectionManager::get_instance();
std::atomic<bool> quit = false;
std::vector<std::string> query_fields;
std::vector<sort_by> sort_fields;
void setupCollection() {
std::string state_dir_path = "/tmp/typesense_test/collection_faceting";
LOG(INFO) << "Truncating and creating: " << state_dir_path;
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
store = new Store(state_dir_path);
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
}
virtual void SetUp() {
setupCollection();
}
virtual void TearDown() {
collectionManager.dispose();
delete store;
}
};
TEST_F(CollectionFacetingTest, FacetCounts) {
Collection *coll_array_fields;
std::ifstream infile(std::string(ROOT_DIR)+"test/numeric_array_documents.jsonl");
std::vector<field> fields = {field("name", field_types::STRING, false),
field("name_facet", field_types::STRING, true),
field("age", field_types::INT32, true),
field("years", field_types::INT32_ARRAY, true),
field("rating", field_types::FLOAT, true),
field("timestamps", field_types::INT64_ARRAY, true),
field("tags", field_types::STRING_ARRAY, true),
field("optional_facet", field_types::INT64_ARRAY, true, true),};
std::vector<sort_by> sort_fields = { sort_by("age", "DESC") };
coll_array_fields = collectionManager.get_collection("coll_array_fields").get();
if(coll_array_fields == nullptr) {
coll_array_fields = collectionManager.create_collection("coll_array_fields", 4, fields, "age").get();
}
std::string json_line;
while (std::getline(infile, json_line)) {
nlohmann::json document = nlohmann::json::parse(json_line);
document["name_facet"] = document["name"];
const std::string & patched_json_line = document.dump();
coll_array_fields->add(patched_json_line);
}
infile.close();
query_fields = {"name"};
std::vector<std::string> facets = {"tags"};
// single facet with no filters
nlohmann::json results = coll_array_fields->search("Jeremy", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(4, results["facet_counts"][0].size());
ASSERT_EQ("tags", results["facet_counts"][0]["field_name"]);
ASSERT_EQ(false, results["facet_counts"][0]["sampled"].get<bool>());
ASSERT_EQ(4, results["facet_counts"][0]["counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["stats"].size());
ASSERT_EQ(4, results["facet_counts"][0]["stats"]["total_values"].get<size_t>());
ASSERT_STREQ("gold", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_EQ(3, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_STREQ("silver", results["facet_counts"][0]["counts"][1]["value"].get<std::string>().c_str());
ASSERT_EQ(3, (int) results["facet_counts"][0]["counts"][1]["count"]);
ASSERT_STREQ("bronze", results["facet_counts"][0]["counts"][2]["value"].get<std::string>().c_str());
ASSERT_EQ(2, (int) results["facet_counts"][0]["counts"][2]["count"]);
ASSERT_STREQ("FINE PLATINUM", results["facet_counts"][0]["counts"][3]["value"].get<std::string>().c_str());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][3]["count"]);
// facet with facet count limit
results = coll_array_fields->search("Jeremy", query_fields, "", facets, sort_fields, {0}, 10, 1,
FREQUENCY, {false}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 2).get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_STREQ("tags", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_STREQ("gold", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_EQ(3, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_STREQ("silver", results["facet_counts"][0]["counts"][1]["value"].get<std::string>().c_str());
ASSERT_EQ(3, (int) results["facet_counts"][0]["counts"][1]["count"]);
// 2 facets, 1 text query with no filters
facets.clear();
facets.push_back("tags");
facets.push_back("name_facet");
results = coll_array_fields->search("Jeremy", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(2, results["facet_counts"].size());
ASSERT_STREQ("tags", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_STREQ("name_facet", results["facet_counts"][1]["field_name"].get<std::string>().c_str());
// facet value must one that's stored, not indexed (i.e. no tokenization/standardization)
ASSERT_STREQ("Jeremy Howard", results["facet_counts"][1]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_EQ(5, (int) results["facet_counts"][1]["counts"][0]["count"]);
// facet with filters
facets.clear();
facets.push_back("tags");
results = coll_array_fields->search("Jeremy", query_fields, "age: >24", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_STREQ("tags", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(2, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][1]["count"]);
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][2]["count"]);
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][3]["count"]);
ASSERT_STREQ("silver", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_STREQ("gold", results["facet_counts"][0]["counts"][1]["value"].get<std::string>().c_str());
ASSERT_STREQ("bronze", results["facet_counts"][0]["counts"][2]["value"].get<std::string>().c_str());
ASSERT_STREQ("FINE PLATINUM", results["facet_counts"][0]["counts"][3]["value"].get<std::string>().c_str());
// facet with wildcard query
facets.clear();
facets.push_back("tags");
results = coll_array_fields->search("*", query_fields, "age: >24", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_STREQ("tags", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(2, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][1]["count"]);
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][2]["count"]);
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][3]["count"]);
ASSERT_STREQ("silver", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_STREQ("gold", results["facet_counts"][0]["counts"][1]["value"].get<std::string>().c_str());
ASSERT_STREQ("bronze", results["facet_counts"][0]["counts"][2]["value"].get<std::string>().c_str());
ASSERT_STREQ("FINE PLATINUM", results["facet_counts"][0]["counts"][3]["value"].get<std::string>().c_str());
// facet with facet filter query (allows typo correction!)
results = coll_array_fields->search("*", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, " tags : sliver").get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_STREQ("tags", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(3, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_STREQ("silver", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
// facet with facet filter query matching 2 tokens
results = coll_array_fields->search("*", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "tags: fxne platim").get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_STREQ("tags", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_STREQ("FINE PLATINUM", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_STREQ("<mark>FINE</mark> <mark>PLATIN</mark>UM", results["facet_counts"][0]["counts"][0]["highlighted"].get<std::string>().c_str());
// facet with facet filter query matching first token of an array
results = coll_array_fields->search("*", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "tags: fine").get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_STREQ("tags", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_STREQ("FINE PLATINUM", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
// facet with facet filter query matching second token of an array
results = coll_array_fields->search("*", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "tags: pltinum").get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_STREQ("tags", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_STREQ("FINE PLATINUM", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
// facet with wildcard
results = coll_array_fields->search("Jeremy", query_fields, "", {"ag*"}, sort_fields, {0}, 10, 1, FREQUENCY,
{false}).get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_STREQ("age", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
// facet query on an integer field
results = coll_array_fields->search("*", query_fields, "", {"age"}, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "age: 2").get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_STREQ("age", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_STREQ("24", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_STREQ("<mark>2</mark>4", results["facet_counts"][0]["counts"][0]["highlighted"].get<std::string>().c_str());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][1]["count"]);
ASSERT_STREQ("21", results["facet_counts"][0]["counts"][1]["value"].get<std::string>().c_str());
ASSERT_STREQ("<mark>2</mark>1", results["facet_counts"][0]["counts"][1]["highlighted"].get<std::string>().c_str());
// facet on a float field without query to check on stats
results = coll_array_fields->search("*", query_fields, "", {"rating"}, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "").get();
ASSERT_EQ(5, results["facet_counts"][0]["stats"].size());
ASSERT_FLOAT_EQ(4.880199885368347, results["facet_counts"][0]["stats"]["avg"].get<double>());
ASSERT_FLOAT_EQ(0.0, results["facet_counts"][0]["stats"]["min"].get<double>());
ASSERT_FLOAT_EQ(9.99899959564209, results["facet_counts"][0]["stats"]["max"].get<double>());
ASSERT_FLOAT_EQ(24.400999426841736, results["facet_counts"][0]["stats"]["sum"].get<double>());
ASSERT_FLOAT_EQ(5, results["facet_counts"][0]["stats"]["total_values"].get<size_t>());
// check for "0" case
ASSERT_STREQ("0", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_EQ(1, results["facet_counts"][0]["counts"][0]["count"].get<size_t>());
// facet query on a float field
results = coll_array_fields->search("*", query_fields, "", {"rating"}, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "rating: 7").get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_STREQ("rating", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_STREQ("7.812", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_STREQ("<mark>7</mark>.812", results["facet_counts"][0]["counts"][0]["highlighted"].get<std::string>().c_str());
ASSERT_EQ(5, results["facet_counts"][0]["stats"].size());
ASSERT_FLOAT_EQ(4.880199885368347, results["facet_counts"][0]["stats"]["avg"].get<double>());
ASSERT_FLOAT_EQ(0.0, results["facet_counts"][0]["stats"]["min"].get<double>());
ASSERT_FLOAT_EQ(9.99899959564209, results["facet_counts"][0]["stats"]["max"].get<double>());
ASSERT_FLOAT_EQ(24.400999426841736, results["facet_counts"][0]["stats"]["sum"].get<double>());
ASSERT_FLOAT_EQ(1, results["facet_counts"][0]["stats"]["total_values"].get<size_t>());
// facet query on an array integer field
results = coll_array_fields->search("*", query_fields, "", {"timestamps"}, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "timestamps: 142189002").get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_STREQ("timestamps", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(2, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_STREQ("1421890022", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_STREQ("<mark>142189002</mark>2", results["facet_counts"][0]["counts"][0]["highlighted"].get<std::string>().c_str());
ASSERT_EQ(5, results["facet_counts"][0]["stats"].size());
ASSERT_FLOAT_EQ(1106321222, results["facet_counts"][0]["stats"]["avg"].get<double>());
ASSERT_FLOAT_EQ(348974822, results["facet_counts"][0]["stats"]["min"].get<double>());
ASSERT_FLOAT_EQ(1453426022, results["facet_counts"][0]["stats"]["max"].get<double>());
ASSERT_FLOAT_EQ(13275854664, results["facet_counts"][0]["stats"]["sum"].get<double>());
ASSERT_FLOAT_EQ(1, results["facet_counts"][0]["stats"]["total_values"].get<size_t>());
// facet query that does not match any indexed value
results = coll_array_fields->search("*", query_fields, "", {facets}, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, " tags : notfound").get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_STREQ("tags", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(0, results["facet_counts"][0]["counts"].size());
// empty facet query value should return all facets without any filtering of facets
results = coll_array_fields->search("*", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "tags: ").get();
ASSERT_EQ(5, results["hits"].size());
results = coll_array_fields->search("*", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "tags:").get();
ASSERT_EQ(5, results["hits"].size());
// Wildcard facet_by can have partial matches
results = coll_array_fields->search("*", query_fields, "", {"nam*"}, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10).get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ("name_facet", results["facet_counts"][0]["field_name"].get<std::string>());
// Wildcard facet_by having no counts should not be returned
results = coll_array_fields->search("*", query_fields, "", {"optio*"}, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10).get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(0, results["facet_counts"].size());
results = coll_array_fields->search("*", query_fields, "", {"optional_facet"}, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10).get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ("optional_facet", results["facet_counts"][0]["field_name"].get<std::string>());
// bad facet query syntax
auto res_op = coll_array_fields->search("*", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "foobar");
ASSERT_FALSE(res_op.ok());
ASSERT_STREQ("Facet query must be in the `facet_field: value` format.", res_op.error().c_str());
// unknown facet field
res_op = coll_array_fields->search("*", query_fields, "", {"foobar"}, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "foobar: baz");
ASSERT_FALSE(res_op.ok());
ASSERT_STREQ("Could not find a facet field named `foobar` in the schema.", res_op.error().c_str());
// only prefix matching is valid
res_op = coll_array_fields->search("*", query_fields, "", {"*_facet"}, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10);
ASSERT_FALSE(res_op.ok());
ASSERT_STREQ("Only prefix matching with a wildcard is allowed.", res_op.error().c_str());
// unknown wildcard facet field
res_op = coll_array_fields->search("*", query_fields, "", {"foo*"}, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10);
ASSERT_FALSE(res_op.ok());
ASSERT_STREQ("Could not find a facet field for `foo*` in the schema.", res_op.error().c_str());
// when facet query is given but no facet fields are specified, must return an error message
res_op = coll_array_fields->search("*", query_fields, "", {}, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "tags: foo");
ASSERT_FALSE(res_op.ok());
ASSERT_STREQ("The `facet_query` parameter is supplied without a `facet_by` parameter.", res_op.error().c_str());
res_op = coll_array_fields->search("*", query_fields, "", {""}, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "tags: foo");
ASSERT_FALSE(res_op.ok());
ASSERT_STREQ("Could not find a facet field named `` in the schema.", res_op.error().c_str());
// given facet query field must be part of facet fields requested
res_op = coll_array_fields->search("*", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "name_facet: jeremy");
ASSERT_FALSE(res_op.ok());
ASSERT_STREQ("Facet query refers to a facet field `name_facet` that is not part of `facet_by` parameter.", res_op.error().c_str());
// facet query with multiple colons should be fine (only first colon will be treate as separator)
res_op = coll_array_fields->search("*", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "tags:foo:bar");
ASSERT_TRUE(res_op.ok());
collectionManager.drop_collection("coll_array_fields");
}
TEST_F(CollectionFacetingTest, FacetCountsBool) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),
field("in_stock", field_types::BOOL, true)};
std::vector<sort_by> sort_fields = {sort_by("points", "DESC")};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "points").get();
}
nlohmann::json doc;
doc["id"] = "100";
doc["title"] = "Ford Mustang";
doc["points"] = 25;
doc["in_stock"] = true;
coll1->add(doc.dump());
doc["id"] = "101";
doc["title"] = "Tesla Model S";
doc["points"] = 40;
doc["in_stock"] = false;
coll1->add(doc.dump());
doc["id"] = "102";
doc["title"] = "Chevrolet Beat";
doc["points"] = 10;
doc["in_stock"] = true;
coll1->add(doc.dump());
std::vector<std::string> facets = {"in_stock"};
nlohmann::json results = coll1->search("*", {"title"}, "in_stock:true", facets, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "in_stock:true").get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["stats"].size());
ASSERT_FLOAT_EQ(1, results["facet_counts"][0]["stats"]["total_values"].get<size_t>());
ASSERT_STREQ("in_stock", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(2, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_STREQ("true", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_STREQ("<mark>true</mark>",
results["facet_counts"][0]["counts"][0]["highlighted"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionFacetingTest, FacetCountsFloatPrecision) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::FLOAT, true)};
std::vector<sort_by> sort_fields = {sort_by("points", "DESC")};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "points").get();
}
nlohmann::json doc;
doc["id"] = "100";
doc["title"] = "Ford Mustang";
doc["points"] = 113.4;
coll1->add(doc.dump());
std::vector<std::string> facets = {"points"};
nlohmann::json results = coll1->search("*", {"title"}, "", facets, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}).get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_STREQ("points", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_STREQ("113.4", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_STREQ("113.4",results["facet_counts"][0]["counts"][0]["highlighted"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionFacetingTest, FacetCountsHighlighting) {
Collection *coll1;
std::vector<field> fields = {field("categories", field_types::STRING_ARRAY, true),
field("points", field_types::INT32, false)};
std::vector<sort_by> sort_fields = { sort_by("points", "DESC") };
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "points").get();
}
nlohmann::json doc;
doc["id"] = "100";
doc["categories"] = {"Cell Phones", "Cell Phone Accessories", "Cell Phone Cases & Clips"};
doc["points"] = 25;
coll1->add(doc.dump());
std::vector<std::string> facets = {"categories"};
nlohmann::json results = coll1->search("phone", {"categories"}, "", facets, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "categories:cell").get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(3, results["facet_counts"][0]["counts"].size());
ASSERT_STREQ("categories", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(1, results["facet_counts"][0]["counts"][0]["count"].get<size_t>());
ASSERT_STREQ("Cell Phones", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_STREQ("<mark>Cell</mark> Phones", results["facet_counts"][0]["counts"][0]["highlighted"].get<std::string>().c_str());
ASSERT_EQ(1, results["facet_counts"][0]["counts"][1]["count"].get<size_t>());
ASSERT_STREQ("Cell Phone Accessories", results["facet_counts"][0]["counts"][1]["value"].get<std::string>().c_str());
ASSERT_STREQ("<mark>Cell</mark> Phone Accessories", results["facet_counts"][0]["counts"][1]["highlighted"].get<std::string>().c_str());
ASSERT_EQ(1, results["facet_counts"][0]["counts"][2]["count"].get<size_t>());
ASSERT_STREQ("Cell Phone Cases & Clips", results["facet_counts"][0]["counts"][2]["value"].get<std::string>().c_str());
ASSERT_STREQ("<mark>Cell</mark> Phone Cases & Clips", results["facet_counts"][0]["counts"][2]["highlighted"].get<std::string>().c_str());
coll1->remove("100");
doc["categories"] = {"Cell Phones", "Unlocked Cell Phones", "All Unlocked Cell Phones" };
coll1->add(doc.dump());
results = coll1->search("phone", {"categories"}, "", facets, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "categories:cell").get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_STREQ("categories", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_STREQ("Cell Phones", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_STREQ("<mark>Cell</mark> Phones", results["facet_counts"][0]["counts"][0]["highlighted"].get<std::string>().c_str());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][1]["count"]);
ASSERT_STREQ("Unlocked Cell Phones", results["facet_counts"][0]["counts"][1]["value"].get<std::string>().c_str());
ASSERT_STREQ("Unlocked <mark>Cell</mark> Phones", results["facet_counts"][0]["counts"][1]["highlighted"].get<std::string>().c_str());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][2]["count"]);
ASSERT_STREQ("All Unlocked Cell Phones", results["facet_counts"][0]["counts"][2]["value"].get<std::string>().c_str());
ASSERT_STREQ("All Unlocked <mark>Cell</mark> Phones", results["facet_counts"][0]["counts"][2]["highlighted"].get<std::string>().c_str());
coll1->remove("100");
doc["categories"] = {"Cell Phones", "Cell Phone Accessories", "Cell Phone Cases & Clips"};
coll1->add(doc.dump());
results = coll1->search("phone", {"categories"}, "", facets, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "categories:acces").get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_STREQ("categories", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_STREQ("Cell Phone Accessories", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_STREQ("Cell Phone <mark>Acces</mark>sories", results["facet_counts"][0]["counts"][0]["highlighted"].get<std::string>().c_str());
// ensure that query is NOT case sensitive
results = coll1->search("phone", {"categories"}, "", facets, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "categories:ACCES").get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_STREQ("categories", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_STREQ("Cell Phone Accessories", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_STREQ("Cell Phone <mark>Acces</mark>sories", results["facet_counts"][0]["counts"][0]["highlighted"].get<std::string>().c_str());
// ensure that only the last token is treated as prefix search
coll1->remove("100");
doc["categories"] = {"Cell Phones", "Cell Phone Accessories", "Cellophanes"};
coll1->add(doc.dump());
results = coll1->search("phone", {"categories"}, "", facets, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "categories:cell ph").get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_STREQ("Cell Phones", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_STREQ("Cell Phone Accessories", results["facet_counts"][0]["counts"][1]["value"].get<std::string>().c_str());
// facet query longer than a token is correctly matched with typo tolerance
// also ensure that setting per_page = 0 works fine
results = coll1->search("phone", {"categories"}, "", facets, sort_fields, {0}, 0, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "categories:cellx").get();
ASSERT_EQ(0, results["hits"].size());
ASSERT_EQ(1, results["found"].get<uint32_t>());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(3, results["facet_counts"][0]["counts"].size());
ASSERT_STREQ("<mark>Cello</mark>phanes", results["facet_counts"][0]["counts"][0]["highlighted"].get<std::string>().c_str());
ASSERT_STREQ("<mark>Cell</mark> Phones", results["facet_counts"][0]["counts"][1]["highlighted"].get<std::string>().c_str());
ASSERT_STREQ("<mark>Cell</mark> Phone Accessories", results["facet_counts"][0]["counts"][2]["highlighted"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionFacetingTest, FacetStatOnFloatFields) {
Collection *coll_float_fields;
std::ifstream infile(std::string(ROOT_DIR)+"test/float_documents.jsonl");
std::vector<field> fields = {
field("title", field_types::STRING, false),
field("score", field_types::FLOAT, false),
field("average", field_types::FLOAT, true)
};
std::vector<sort_by> sort_fields_desc = { sort_by("average", "DESC") };
coll_float_fields = collectionManager.get_collection("coll_float_fields").get();
if(coll_float_fields == nullptr) {
coll_float_fields = collectionManager.create_collection("coll_float_fields", 4, fields, "average").get();
}
std::string json_line;
while (std::getline(infile, json_line)) {
coll_float_fields->add(json_line);
}
infile.close();
query_fields = {"title"};
std::vector<std::string> facets;
auto res_op = coll_float_fields->search("Jeremy", query_fields, "", {"average"}, sort_fields_desc, {0}, 10,
1, FREQUENCY, {false});
auto results = res_op.get();
ASSERT_EQ(7, results["hits"].size());
ASSERT_EQ(5, results["facet_counts"][0]["stats"].size());
ASSERT_FLOAT_EQ(-21.3799991607666, results["facet_counts"][0]["stats"]["min"].get<double>());
ASSERT_FLOAT_EQ(300, results["facet_counts"][0]["stats"]["max"].get<double>());
ASSERT_FLOAT_EQ(277.8160007725237, results["facet_counts"][0]["stats"]["sum"].get<double>());
ASSERT_FLOAT_EQ(39.68800011036053, results["facet_counts"][0]["stats"]["avg"].get<double>());
ASSERT_FLOAT_EQ(7, results["facet_counts"][0]["stats"]["total_values"].get<size_t>());
results = coll_float_fields->search("*", query_fields, "average:>100", {"average"}, sort_fields_desc, {0}, 10,
1, FREQUENCY, {false}).get();
ASSERT_EQ(5, results["facet_counts"][0]["stats"].size());
ASSERT_FLOAT_EQ(300, results["facet_counts"][0]["stats"]["avg"].get<double>());
ASSERT_FLOAT_EQ(300, results["facet_counts"][0]["stats"]["min"].get<double>());
ASSERT_FLOAT_EQ(300, results["facet_counts"][0]["stats"]["max"].get<double>());
ASSERT_FLOAT_EQ(300, results["facet_counts"][0]["stats"]["sum"].get<double>());
ASSERT_FLOAT_EQ(1, results["facet_counts"][0]["stats"]["total_values"].get<size_t>());
// facet filter, though should not be considered when calculating facet stats (except total facet values)
results = coll_float_fields->search("*", query_fields, "", {"average"}, sort_fields_desc, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(),
10, "average: 11").get();
ASSERT_EQ(5, results["facet_counts"][0]["stats"].size());
ASSERT_FLOAT_EQ(39.68800011036053, results["facet_counts"][0]["stats"]["avg"].get<double>());
ASSERT_FLOAT_EQ(-21.3799991607666, results["facet_counts"][0]["stats"]["min"].get<double>());
ASSERT_FLOAT_EQ(300, results["facet_counts"][0]["stats"]["max"].get<double>());
ASSERT_FLOAT_EQ(277.8160007725237, results["facet_counts"][0]["stats"]["sum"].get<double>());
ASSERT_FLOAT_EQ(1, results["facet_counts"][0]["stats"]["total_values"].get<size_t>());
collectionManager.drop_collection("coll_float_fields");
}
TEST_F(CollectionFacetingTest, FacetStatsFloatLon) {
std::vector<field> fields = {
field("lon", field_types::FLOAT, true),
};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
nlohmann::json doc;
doc["lon"] = -99.184319;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("*", {},
"", {"lon"}, {}, {2}, 10, 1, FREQUENCY, {true}, 1).get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(5, results["facet_counts"][0]["stats"].size());
ASSERT_FLOAT_EQ(-99.1843, results["facet_counts"][0]["stats"]["avg"].get<double>());
ASSERT_FLOAT_EQ(-99.1843, results["facet_counts"][0]["stats"]["min"].get<double>());
ASSERT_FLOAT_EQ(-99.1843, results["facet_counts"][0]["stats"]["max"].get<double>());
ASSERT_FLOAT_EQ(-99.1843, results["facet_counts"][0]["stats"]["sum"].get<double>());
ASSERT_FLOAT_EQ(1, results["facet_counts"][0]["stats"]["total_values"].get<size_t>());
}
TEST_F(CollectionFacetingTest, FacetCountOnSimilarStrings) {
Collection *coll1;
std::vector<field> fields = {field("categories", field_types::STRING_ARRAY, true),
field("points", field_types::INT32, true)};
std::vector<sort_by> sort_fields = {sort_by("points", "DESC")};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "points").get();
}
nlohmann::json doc;
doc["id"] = "100";
doc["categories"] = {"England in India"};
doc["points"] = 25;
coll1->add(doc.dump());
doc["id"] = "101";
doc["categories"] = {"India in England"};
doc["points"] = 50;
coll1->add(doc.dump());
std::vector<std::string> facets = {"categories"};
nlohmann::json results = coll1->search("*", {"categories"}, "points:[25, 50]", facets, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_STREQ("India in England", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_STREQ("England in India", results["facet_counts"][0]["counts"][1]["value"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionFacetingTest, FacetQueryTest) {
std::vector<field> fields = {
field("color", field_types::STRING, true),
};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
std::vector<std::string> colors = {"apple red", "azure", "amazon green", "apricot orange",
"blue", "barrel blue", "banana yellow", "ball green", "baikal"};
for(size_t i = 0; i < 100; i++) {
nlohmann::json doc;
doc["color"] = colors[i % colors.size()];
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
// add colors that DON'T start with "b" to push these up the count list
for(size_t i = 0; i < 4; i++) {
nlohmann::json doc;
doc["color"] = colors[i];
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("*", {},
"", {"color"}, {}, {2}, 1, 1, FREQUENCY, {true}, 1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 5, "color:b", 30, 4, "", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 3, 3, 2, 2, false, "", true, 0, max_score, 100, 0, 4294967295UL, "top_values").get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(4, results["facet_counts"][0]["counts"].size()); // 4 is default candidate size
// junk string should produce no facets
results = coll1->search("*", {},
"", {"color"}, {}, {2}, 1, 1, FREQUENCY, {true}, 1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 5, "color:xsda", 30, 4, "", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 3, 3, 2, 2, false, "", true, 0, max_score, 100, 0, 4294967295UL, "top_values").get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(0, results["facet_counts"][0]["counts"].size());
results = coll1->search("*", {},
"", {"color"}, {}, {2}, 1, 1, FREQUENCY, {true}, 1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 5, "color:green a", 30, 4, "", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 3, 3, 2, 2, false, "", true, 0, max_score, 100, 0, 4294967295UL, "top_values").get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("amazon green", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("<mark>a</mark>mazon <mark>green</mark>", results["facet_counts"][0]["counts"][0]["highlighted"]);
}
TEST_F(CollectionFacetingTest, FacetQueryOnStringWithColon) {
std::vector<field> fields = {field("title", field_types::STRING, true),
field("points", field_types::INT32, false)};
std::vector<sort_by> sort_fields = {sort_by("points", "DESC")};
Collection* coll1 = collectionManager.create_collection("coll1", 4, fields, "points").get();
nlohmann::json doc;
doc["id"] = "100";
doc["title"] = "foo:bar";
doc["points"] = 25;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto res_op = coll1->search("*", {}, "", {"title"}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "title: foo:ba");
ASSERT_TRUE(res_op.ok());
auto results = res_op.get();
ASSERT_STREQ("foo:bar", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_STREQ("<mark>foo:ba</mark>r", results["facet_counts"][0]["counts"][0]["highlighted"].get<std::string>().c_str());
results = coll1->search("*", {}, "", {"title"}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "title: ").get();
ASSERT_STREQ("foo:bar", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_STREQ("foo:bar", results["facet_counts"][0]["counts"][0]["highlighted"].get<std::string>().c_str());
results = coll1->search("*", {}, "", {"title"}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "").get();
ASSERT_STREQ("foo:bar", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_STREQ("foo:bar", results["facet_counts"][0]["counts"][0]["highlighted"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionFacetingTest, FacetQueryOnStringArray) {
Collection* coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("genres", field_types::STRING_ARRAY, true)};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 2, fields, "").get();
}
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "Song 1";
doc1["genres"] = {"Country Punk Rock", "Country", "Slow"};
nlohmann::json doc2;
doc2["id"] = "1";
doc2["title"] = "Song 2";
doc2["genres"] = {"Soft Rock", "Rock", "Electronic"};
nlohmann::json doc3;
doc3["id"] = "2";
doc3["title"] = "Song 3";
doc3["genres"] = {"Rockabilly", "Metal"};
nlohmann::json doc4;
doc4["id"] = "3";
doc4["title"] = "Song 4";
doc4["genres"] = {"Pop Rock", "Rock", "Fast"};
nlohmann::json doc5;
doc5["id"] = "4";
doc5["title"] = "Song 5";
doc5["genres"] = {"Pop", "Rockabilly", "Fast"};
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
ASSERT_TRUE(coll1->add(doc4.dump()).ok());
ASSERT_TRUE(coll1->add(doc5.dump()).ok());
auto results = coll1->search("*", {}, "", {"genres"}, sort_fields, {0}, 0, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "genres: roc").get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(5, results["facet_counts"][0]["counts"].size());
results = coll1->search("*", {}, "", {"genres"}, sort_fields, {0}, 0, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "genres: soft roc").get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
results = coll1->search("*", {}, "", {"genres"}, sort_fields, {0}, 0, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "genres: punk roc").get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("Country <mark>Punk</mark> <mark>Roc</mark>k", results["facet_counts"][0]["counts"][0]["highlighted"].get<std::string>());
results = coll1->search("*", {}, "", {"genres"}, sort_fields, {0}, 0, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "genres: country roc").get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("<mark>Country</mark> Punk <mark>Roc</mark>k", results["facet_counts"][0]["counts"][0]["highlighted"].get<std::string>());
// with facet query num typo parameter
results = coll1->search("*", {}, "", {"genres"}, sort_fields, {0}, 0, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "genres: eletronic",
30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 1).get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("<mark>Electroni</mark>c", results["facet_counts"][0]["counts"][0]["highlighted"].get<std::string>());
results = coll1->search("*", {}, "", {"genres"}, sort_fields, {0}, 0, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "genres: eletronic",
30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 0).get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(0, results["facet_counts"][0]["counts"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionFacetingTest, FacetQueryReturnAllCandidates) {
std::vector<field> fields = {field("title", field_types::STRING, true),
field("points", field_types::INT32, false)};
std::vector<sort_by> sort_fields = {sort_by("points", "DESC")};
Collection* coll1 = collectionManager.create_collection("coll1", 4, fields, "points").get();
std::vector<std::string> titles = {
"everest", "evergreen", "everlast", "estrange", "energy", "extra"
};
for(size_t i=0; i < titles.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["points"] = i;
doc["title"] = titles[i];
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto res_op = coll1->search("*", {}, "", {"title"}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "title:e", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
10, {off}, 32767, 32767, 2,
false, false);
ASSERT_TRUE(res_op.ok());
auto results = res_op.get();
ASSERT_EQ(6, results["facet_counts"][0]["counts"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionFacetingTest, FacetValuesShouldBeNormalized) {
std::vector<field> fields = {field("brand", field_types::STRING, true),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
std::vector<std::vector<std::string>> records = {
{"BUQU"},
{"Buqu"},
{"bu-qu"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["brand"] = records[i][0];
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("*", {},
"", {"brand"}, {}, {2}, 10, 1, FREQUENCY, {true}, 1).get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(3, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("Buqu", results["facet_counts"][0]["counts"][0]["value"].get<std::string>());
ASSERT_EQ("BUQU", results["facet_counts"][0]["counts"][1]["value"].get<std::string>());
ASSERT_EQ("bu-qu", results["facet_counts"][0]["counts"][2]["value"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionFacetingTest, FacetArrayValuesShouldBeNormalized) {
std::vector<field> fields = {field("brands", field_types::STRING_ARRAY, true),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
std::vector<std::vector<std::string>> records = {
{"BUQU", "Buqu", "bu-qu"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["brands"] = nlohmann::json::array();
for(auto& str: records[i]) {
doc["brands"].push_back(str);
}
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("*", {},
"", {"brands"}, {}, {2}, 10, 1, FREQUENCY, {true}, 1).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(3, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("Buqu", results["facet_counts"][0]["counts"][0]["value"].get<std::string>());
ASSERT_EQ("BUQU", results["facet_counts"][0]["counts"][1]["value"].get<std::string>());
ASSERT_EQ("bu-qu", results["facet_counts"][0]["counts"][2]["value"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionFacetingTest, FacetByNestedIntField) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "details", "type": "object", "optional": false },
{"name": "company.num_employees", "type": "int32", "optional": false, "facet": true },
{"name": "companyRank", "type": "int32", "optional": false, "facet": true }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"details": {"count": 1000},
"company": {"num_employees": 2000},
"companyRank": 100
})"_json;
auto doc2 = R"({
"details": {"count": 2000},
"company": {"num_employees": 2000},
"companyRank": 101
})"_json;
ASSERT_TRUE(coll1->add(doc1.dump(), CREATE).ok());
ASSERT_TRUE(coll1->add(doc2.dump(), CREATE).ok());
std::vector<sort_by> sort_fields = { sort_by("details.count", "ASC") };
auto results = coll1->search("*", {}, "", {"company.num_employees"}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ("company.num_employees", results["facet_counts"][0]["field_name"]);
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ(2, results["facet_counts"][0]["counts"][0]["count"].get<size_t>());
ASSERT_EQ("2000", results["facet_counts"][0]["counts"][0]["value"].get<std::string>());
// Nested wildcard faceting
std::vector<facet> wildcard_facets;
coll1->parse_facet("company.*", wildcard_facets);
ASSERT_EQ(1, wildcard_facets.size());
ASSERT_EQ("company.num_employees", wildcard_facets[0].field_name);
wildcard_facets.clear();
coll1->parse_facet("company*", wildcard_facets);
ASSERT_EQ(2, wildcard_facets.size());
ASSERT_EQ("company.num_employees", wildcard_facets[0].field_name);
ASSERT_EQ("companyRank", wildcard_facets[1].field_name);
}
TEST_F(CollectionFacetingTest, FacetByNestedArrayField) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "data", "type": "object", "optional": false, "facet": true }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"data": {"details": [{"name": "Foo"}, {"name": "Foo"}]}
})"_json;
auto doc2 = R"({
"data": {"details": [{"name": "Foo"}, {"name": "Foo"}]}
})"_json;
ASSERT_TRUE(coll1->add(doc1.dump(), CREATE).ok());
ASSERT_TRUE(coll1->add(doc2.dump(), CREATE).ok());
auto results = coll1->search("*", {}, "", {"data.details.name"}, {}, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ("data.details.name", results["facet_counts"][0]["field_name"]);
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ(2, results["facet_counts"][0]["counts"][0]["count"].get<size_t>());
ASSERT_EQ("Foo", results["facet_counts"][0]["counts"][0]["value"].get<std::string>());
}
TEST_F(CollectionFacetingTest, FacetByArrayField) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "data", "type": "string[]", "optional": false, "facet": true }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"data": ["Foo", "Foo"]
})"_json;
auto doc2 = R"({
"data": ["Foo", "Foo", "Bazinga"]
})"_json;
ASSERT_TRUE(coll1->add(doc1.dump(), CREATE).ok());
ASSERT_TRUE(coll1->add(doc2.dump(), CREATE).ok());
auto results = coll1->search("*", {}, "", {"data"}, {}, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ("data", results["facet_counts"][0]["field_name"]);
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_EQ(2, results["facet_counts"][0]["counts"][0]["count"].get<size_t>());
ASSERT_EQ("Foo", results["facet_counts"][0]["counts"][0]["value"].get<std::string>());
ASSERT_EQ(1, results["facet_counts"][0]["counts"][1]["count"].get<size_t>());
ASSERT_EQ("Bazinga", results["facet_counts"][0]["counts"][1]["value"].get<std::string>());
results = coll1->search("*", {}, "", {"data"}, {}, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "data:baz", 30, 4).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ("data", results["facet_counts"][0]["field_name"]);
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"][0]["count"].get<size_t>());
ASSERT_EQ("Bazinga", results["facet_counts"][0]["counts"][0]["value"].get<std::string>());
}
TEST_F(CollectionFacetingTest, FacetParseTest){
std::vector<field> fields = {
field("score", field_types::INT32, true),
field("grade", field_types::INT32, true),
field("rank", field_types::INT32, true),
field("range", field_types::INT32, true),
field("review", field_types::FLOAT, true),
field("sortindex", field_types::INT32, true),
field("scale", field_types::INT32, false),
};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
std::vector<std::string> range_facet_fields {
"score(fail:[0, 40], pass:[40, 100])",
"grade(A:[80, 100], B:[60, 80], C:[40, 60])"
};
std::vector<facet> range_facets;
for(const std::string & facet_field: range_facet_fields) {
coll1->parse_facet(facet_field, range_facets);
}
ASSERT_EQ(2, range_facets.size());
ASSERT_STREQ("score", range_facets[0].field_name.c_str());
ASSERT_TRUE(range_facets[0].is_range_query);
ASSERT_GT(range_facets[0].facet_range_map.size(), 0);
ASSERT_STREQ("grade", range_facets[1].field_name.c_str());
ASSERT_TRUE(range_facets[1].is_range_query);
ASSERT_GT(range_facets[1].facet_range_map.size(), 0);
std::vector<std::string> normal_facet_fields {
"score",
"grade"
};
std::vector<facet> normal_facets;
for(const std::string & facet_field: normal_facet_fields) {
coll1->parse_facet(facet_field, normal_facets);
}
ASSERT_EQ(2, normal_facets.size());
ASSERT_STREQ("score", normal_facets[0].field_name.c_str());
ASSERT_STREQ("grade", normal_facets[1].field_name.c_str());
std::vector<std::string> wildcard_facet_fields {
"ran*",
"sc*",
};
std::vector<facet> wildcard_facets;
for(const std::string & facet_field: wildcard_facet_fields) {
coll1->parse_facet(facet_field, wildcard_facets);
}
ASSERT_EQ(3, wildcard_facets.size());
std::set<std::string> expected{"range", "rank", "score"};
for (size_t i = 0; i < wildcard_facets.size(); i++) {
ASSERT_TRUE(expected.count(wildcard_facets[i].field_name) == 1);
}
wildcard_facets.clear();
coll1->parse_facet("*", wildcard_facets);
// Last field is not a facet.
ASSERT_EQ(fields.size() - 1, wildcard_facets.size());
expected.clear();
for (size_t i = 0; i < fields.size() - 1; i++) {
expected.insert(fields[i].name);
}
for (size_t i = 0; i < wildcard_facets.size(); i++) {
ASSERT_TRUE(expected.count(wildcard_facets[i].field_name) == 1);
}
// should also allow zero or more spaces after ","
std::vector<std::string> mixed_facet_fields {
"score",
"grade(A:[80, 100], B:[60, 80], C:[40,60])",
"ra*",
};
std::vector<facet> mixed_facets;
for(const std::string & facet_field: mixed_facet_fields) {
coll1->parse_facet(facet_field, mixed_facets);
}
ASSERT_EQ(4, mixed_facets.size());
std::vector<facet*> mixed_facets_ptr;
for(auto& f: mixed_facets) {
mixed_facets_ptr.push_back(&f);
}
std::sort(mixed_facets_ptr.begin(), mixed_facets_ptr.end(), [](const facet* f1, const facet* f2) {
return f1->field_name < f2->field_name;
});
ASSERT_EQ("score", mixed_facets_ptr[3]->field_name);
ASSERT_EQ("grade", mixed_facets_ptr[0]->field_name);
ASSERT_TRUE(mixed_facets_ptr[0]->is_range_query);
ASSERT_GT(mixed_facets_ptr[0]->facet_range_map.size(), 0);
ASSERT_EQ("rank", mixed_facets_ptr[2]->field_name);
ASSERT_EQ("range", mixed_facets_ptr[1]->field_name);
std::vector<std::string> range_facet_float_fields {
"review(bad:[0, 2.5], good:[2.5, 5])"
};
std::vector<facet> float_facets;
for(const std::string & facet_field: range_facet_float_fields) {
auto res = coll1->parse_facet(facet_field, float_facets);
if(!res.error().empty()) {
LOG(ERROR) << res.error();
FAIL();
}
}
std::vector<std::string> range_facet_negative_range {
"review(bad:[-2.5, 2.5], good:[2.5, 5])"
};
std::vector<facet> negative_range;
for(const std::string & facet_field: range_facet_negative_range) {
auto res = coll1->parse_facet(facet_field, negative_range);
if(!res.error().empty()) {
LOG(ERROR) << res.error();
FAIL();
}
}
//facetfield containing sort keyword should parse successfully
std::vector<facet> range_facets_with_sort_as_field;
auto facet_range = "sortindex(Top:[85, 100], Average:[60, 85])";
coll1->parse_facet(facet_range, range_facets_with_sort_as_field);
ASSERT_EQ(1, range_facets_with_sort_as_field.size());
//range facet label with special chars
std::vector<std::string> range_facet_special_chars{
"score(%0 - %19:[0, 20], %20 - %59:[20, 60], %60+:[60, ])",
"range($$$:[0, 20])"
};
std::vector<facet> facet_speical_chars;
for(const std::string& facet_field: range_facet_special_chars) {
auto res = coll1->parse_facet(facet_field, facet_speical_chars);
if(!res.error().empty()) {
LOG(ERROR) << res.error();
FAIL();
}
}
//should not allow to pass only space chars
facet_speical_chars.clear();
auto only_space_char("review( :[0, 20])");
auto res = coll1->parse_facet(only_space_char, facet_speical_chars);
ASSERT_FALSE(res.error().empty());
ASSERT_EQ(400, res.code());
ASSERT_EQ("Facet range value is not valid.", res.error());
}
TEST_F(CollectionFacetingTest, RangeFacetTest) {
std::vector<field> fields = {field("place", field_types::STRING, false),
field("state", field_types::STRING, true),
field("visitors", field_types::INT32, true),
field("rating", field_types::FLOAT, true),
field("trackingFrom", field_types::INT32, true),};
Collection* coll1 = collectionManager.create_collection(
"coll1", 1, fields, "", 0, "", {}, {}
).get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["place"] = "Mysore Palace";
doc1["state"] = "Karnataka";
doc1["visitors"] = 235486;
doc1["rating"] = 4.7;
doc1["trackingFrom"] = 1900;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["place"] = "Hampi";
doc2["state"] = "Karnataka";
doc2["visitors"] = 187654;
doc2["rating"] = 2.9;
doc2["trackingFrom"] = 1900;
nlohmann::json doc3;
doc3["id"] = "2";
doc3["place"] = "Mahabalipuram";
doc3["state"] = "TamilNadu";
doc3["visitors"] = 174684;
doc3["rating"] = 3.8;
doc3["trackingFrom"] = 1900;
nlohmann::json doc4;
doc4["id"] = "3";
doc4["place"] = "Meenakshi Amman Temple";
doc4["state"] = "TamilNadu";
doc4["visitors"] = 246676;
doc4["rating"] = 4.5;
doc4["trackingFrom"] = 2000;
nlohmann::json doc5;
doc5["id"] = "4";
doc5["place"] = "Staue of Unity";
doc5["state"] = "Gujarat";
doc5["visitors"] = 345878;
doc5["rating"] = 3.5;
doc5["trackingFrom"] = 2000;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
ASSERT_TRUE(coll1->add(doc4.dump()).ok());
ASSERT_TRUE(coll1->add(doc5.dump()).ok());
auto result = coll1->search("Karnataka", {"state"},
"", {"visitors(Busy:[0, 200000], VeryBusy:[200000, 500000])"},
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true);
if(!result.ok()) {
LOG(INFO) << result.error();
}
auto results = result.get();
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_EQ("Busy", results["facet_counts"][0]["counts"][0]["value"].get<std::string>());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][1]["count"]);
ASSERT_EQ("VeryBusy", results["facet_counts"][0]["counts"][1]["value"].get<std::string>());
auto results2 = coll1->search("Gujarat", {"state"},
"", {"visitors(Busy:[0, 200000], VeryBusy:[200000, 500000])"},
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true).get();
ASSERT_EQ(1, results2["facet_counts"][0]["counts"].size());
ASSERT_EQ(1, results2["facet_counts"][0]["counts"][0]["count"].get<std::size_t>());
ASSERT_STREQ("VeryBusy", results2["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_TRUE(results2["facet_counts"][0]["counts"][1]["value"] == nullptr);
// ensure that unknown facet field are handled
auto results3 = coll1->search("Gujarat", {"state"},
"", {"visitorsz(Busy:[0, 200000], VeryBusy:[200000, 500000])"},
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true);
ASSERT_FALSE(results3.ok());
ASSERT_EQ("Could not find a facet field named `visitorsz` in the schema.", results3.error());
auto results4 = coll1->search("*", {"state"},
"", {"trackingFrom(Old:[0, 1910], New:[1910, 2100])"},
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true).get();
ASSERT_EQ(2, results4["facet_counts"][0]["counts"].size());
ASSERT_EQ(3, results4["facet_counts"][0]["counts"][0]["count"].get<std::size_t>());
ASSERT_EQ("Old", results4["facet_counts"][0]["counts"][0]["value"].get<std::string>());
ASSERT_EQ(2, results4["facet_counts"][0]["counts"][1]["count"].get<std::size_t>());
ASSERT_EQ("New", results4["facet_counts"][0]["counts"][1]["value"].get<std::string>());
//range faceting on float fields
results4 = coll1->search("*", {"state"},
"", {"rating(Average:[0, 3], Good:[3, 4], Best:[4, 5])"},
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true).get();
ASSERT_EQ(3, results4["facet_counts"][0]["counts"].size());
ASSERT_EQ(2, results4["facet_counts"][0]["counts"][0]["count"].get<std::size_t>());
ASSERT_EQ("Good", results4["facet_counts"][0]["counts"][0]["value"].get<std::string>());
ASSERT_EQ(2, results4["facet_counts"][0]["counts"][1]["count"].get<std::size_t>());
ASSERT_EQ("Best", results4["facet_counts"][0]["counts"][1]["value"].get<std::string>());
ASSERT_EQ(1, results4["facet_counts"][0]["counts"][2]["count"].get<std::size_t>());
ASSERT_EQ("Average", results4["facet_counts"][0]["counts"][2]["value"].get<std::string>());
//stats on float field
ASSERT_EQ(5, results4["facet_counts"][0]["stats"].size());
ASSERT_FLOAT_EQ(3.8799999713897706, results4["facet_counts"][0]["stats"]["avg"].get<double>());
ASSERT_FLOAT_EQ(2.9000000953674316, results4["facet_counts"][0]["stats"]["min"].get<double>());
ASSERT_FLOAT_EQ(4.699999809265137, results4["facet_counts"][0]["stats"]["max"].get<double>());
ASSERT_FLOAT_EQ(19.399999856948853, results4["facet_counts"][0]["stats"]["sum"].get<double>());
ASSERT_FLOAT_EQ(3, results4["facet_counts"][0]["stats"]["total_values"].get<size_t>());
// ensure that only integer fields are allowed
auto rop = coll1->search("Karnataka", {"state"},
"", {"state(Busy:[0, 200000], VeryBusy:[200000, 500000])"},
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true);
ASSERT_FALSE(rop.ok());
ASSERT_EQ("Range facet is restricted to only integer and float fields.", rop.error());
// ensure that bad facet range values are handled
rop = coll1->search("Karnataka", {"state"},
"", {"visitors(Busy:[alpha, 200000], VeryBusy:[200000, beta])"},
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true);
ASSERT_FALSE(rop.ok());
ASSERT_EQ("Facet range value is not valid.", rop.error());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionFacetingTest, RangeFacetContinuity) {
std::vector<field> fields = {field("place", field_types::STRING, false),
field("state", field_types::STRING, false),
field("visitors", field_types::INT32, true),};
Collection* coll1 = collectionManager.create_collection(
"coll1", 1, fields, "", 0, "", {}, {}
).get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["place"] = "Mysore Palace";
doc1["state"] = "Karnataka";
doc1["visitors"] = 235486;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["place"] = "Hampi";
doc2["state"] = "Karnataka";
doc2["visitors"] = 187654;
nlohmann::json doc3;
doc3["id"] = "2";
doc3["place"] = "Mahabalipuram";
doc3["state"] = "TamilNadu";
doc3["visitors"] = 174684;
nlohmann::json doc4;
doc4["id"] = "3";
doc4["place"] = "Meenakshi Amman Temple";
doc4["state"] = "TamilNadu";
doc4["visitors"] = 246676;
nlohmann::json doc5;
doc5["id"] = "4";
doc5["place"] = "Staue of Unity";
doc5["state"] = "Gujarat";
doc5["visitors"] = 345878;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
ASSERT_TRUE(coll1->add(doc4.dump()).ok());
ASSERT_TRUE(coll1->add(doc5.dump()).ok());
auto results = coll1->search("TamilNadu", {"state"},
"", {"visitors(Busy:[0, 200000], VeryBusy:[200001, 500000])"},
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true);
ASSERT_STREQ("Ranges in range facet syntax should be continous.", results.error().c_str());
auto results2 = coll1->search("TamilNadu", {"state"},
"", {"visitors(Busy:[0, 200000], VeryBusy:[199999, 500000])"},
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true);
ASSERT_STREQ("Ranges in range facet syntax should be continous.", results2.error().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionFacetingTest, RangeFacetTypo) {
std::vector<field> fields = {field("place", field_types::STRING, false),
field("state", field_types::STRING, false),
field("visitors", field_types::INT32, true),};
Collection* coll1 = collectionManager.create_collection(
"coll1", 1, fields, "", 0, "", {}, {}
).get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["place"] = "Mysore Palace";
doc1["state"] = "Karnataka";
doc1["visitors"] = 235486;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["place"] = "Hampi";
doc2["state"] = "Karnataka";
doc2["visitors"] = 187654;
nlohmann::json doc3;
doc3["id"] = "2";
doc3["place"] = "Mahabalipuram";
doc3["state"] = "TamilNadu";
doc3["visitors"] = 174684;
nlohmann::json doc4;
doc4["id"] = "3";
doc4["place"] = "Meenakshi Amman Temple";
doc4["state"] = "TamilNadu";
doc4["visitors"] = 246676;
nlohmann::json doc5;
doc5["id"] = "4";
doc5["place"] = "Staue of Unity";
doc5["state"] = "Gujarat";
doc5["visitors"] = 345878;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
ASSERT_TRUE(coll1->add(doc4.dump()).ok());
ASSERT_TRUE(coll1->add(doc5.dump()).ok());
auto results = coll1->search("TamilNadu", {"state"},
"", {"visitors(Busy:[0, 200000], VeryBusy:[200000, 500000)"}, //missing ']' at end
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true);
ASSERT_STREQ("Error splitting the facet range values.", results.error().c_str());
auto results2 = coll1->search("TamilNadu", {"state"},
"", {"visitors(Busy:[0, 200000], VeryBusy:200000, 500000])"}, //missing '[' in second range
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true);
ASSERT_STREQ("Invalid facet param `VeryBusy`.", results2.error().c_str());
auto results3 = coll1->search("TamilNadu", {"state"},
"", {"visitors(Busy:[0, 200000] VeryBusy:[200000, 500000])"}, //missing ',' between ranges
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true);
ASSERT_STREQ("Invalid facet format.", results3.error().c_str());
auto results4 = coll1->search("TamilNadu", {"state"},
"", {"visitors(Busy:[0 200000], VeryBusy:[200000, 500000])"}, //missing ',' between first ranges values
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true);
ASSERT_STREQ("Facet range value is not valid.", results4.error().c_str());
auto results5 = coll1->search("TamilNadu", {"state"},
"", {"visitors(Busy:[0, 200000 VeryBusy:200000, 500000])"}, //missing '],' and '['
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true);
ASSERT_STREQ("Error splitting the facet range values.", results5.error().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionFacetingTest, RangeFacetsFloatRange) {
std::vector<field> fields = {field("name", field_types::STRING, false),
field("inches", field_types::FLOAT, true),};
Collection* coll1 = collectionManager.create_collection(
"coll1", 1, fields, "", 0, "", {}, {}).get();
nlohmann::json doc;
doc["id"] = "0";
doc["name"] = "TV 1";
doc["inches"] = 32.4;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "1";
doc["name"] = "TV 2";
doc["inches"] = 55;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "2";
doc["name"] = "TV 3";
doc["inches"] = 55.6;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("*", {},
"", {"inches(small:[0, 55.5])"},
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true).get();
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ(2, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_EQ("small", results["facet_counts"][0]["counts"][0]["value"]);
results = coll1->search("*", {},
"", {"inches(big:[55, 55.6])"},
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true,
6000*1000, 4, 7, fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "", true, 0, max_score, 100, 0, 0, "top_values").get();
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_EQ("big", results["facet_counts"][0]["counts"][0]["value"]);
}
TEST_F(CollectionFacetingTest, RangeFacetsMinMaxRange) {
std::vector<field> fields = {field("name", field_types::STRING, false),
field("inches", field_types::FLOAT, true),};
Collection* coll1 = collectionManager.create_collection(
"coll1", 1, fields, "", 0, "", {}, {}).get();
nlohmann::json doc;
doc["id"] = "0";
doc["name"] = "TV 1";
doc["inches"] = 32.4;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "1";
doc["name"] = "TV 2";
doc["inches"] = 55;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "2";
doc["name"] = "TV 3";
doc["inches"] = 55.6;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("*", {},
"", {"inches(small:[0, 55], large:[55, ])"},
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true).get();
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_EQ(2, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_EQ("large", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][1]["count"]);
ASSERT_EQ("small", results["facet_counts"][0]["counts"][1]["value"]);
results = coll1->search("*", {},
"", {"inches(small:[,55])"},
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true).get();
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_EQ("small", results["facet_counts"][0]["counts"][0]["value"]);
}
TEST_F(CollectionFacetingTest, RangeFacetRangeLabelWithSpace) {
std::vector<field> fields = {field("name", field_types::STRING, false),
field("inches", field_types::FLOAT, true),};
Collection* coll1 = collectionManager.create_collection(
"coll1", 1, fields, "", 0, "", {}, {}).get();
nlohmann::json doc;
doc["id"] = "0";
doc["name"] = "TV 1";
doc["inches"] = 32.4;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "1";
doc["name"] = "TV 2";
doc["inches"] = 55;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "2";
doc["name"] = "TV 3";
doc["inches"] = 55.6;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("*", {},
"", {"inches(small tvs with display size:[0,55])"},
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true).get();
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_EQ("small tvs with display size", results["facet_counts"][0]["counts"][0]["value"]);
}
TEST_F(CollectionFacetingTest, RangeFacetRangeNegativeRanges) {
std::vector<field> fields = {field("team", field_types::STRING, false),
field("nrr", field_types::FLOAT, true),};
Collection* coll1 = collectionManager.create_collection(
"coll1", 1, fields, "", 0, "",
{},{}).get();
nlohmann::json doc;
doc["id"] = "0";
doc["team"] = "india";
doc["nrr"] = 1.353;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "1";
doc["team"] = "australia";
doc["nrr"] = -0.193;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "2";
doc["team"] = "pakistan";
doc["nrr"] = -0.400;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "3";
doc["team"] = "afghanistan";
doc["nrr"] = -0.969;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "4";
doc["team"] = "srilanka";
doc["nrr"] = -1.048;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "5";
doc["team"] = "england";
doc["nrr"] = -1.248;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "6";
doc["team"] = "bangladesh";
doc["nrr"] = -1.253;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "7";
doc["team"] = "new zealand";
doc["nrr"] = 1.481;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("*", {},
"", {"nrr(poor:[-1.5,-1], decent:[-1,0], good:[0,2])"},
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true).get();
ASSERT_EQ(3, results["facet_counts"][0]["counts"].size());
ASSERT_EQ(3, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_EQ("poor", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ(3, (int) results["facet_counts"][0]["counts"][1]["count"]);
ASSERT_EQ("decent", results["facet_counts"][0]["counts"][1]["value"]);
ASSERT_EQ(2, (int) results["facet_counts"][0]["counts"][2]["count"]);
ASSERT_EQ("good", results["facet_counts"][0]["counts"][2]["value"]);
}
TEST_F(CollectionFacetingTest, FacetWithPhraseSearch) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, true),
field("rating", field_types::FLOAT, false)};
std::vector<sort_by> sort_fields = {sort_by("rating", "DESC")};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "rating").get();
}
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "The Shawshank Redemption";
doc["rating"] = 9.3;
coll1->add(doc.dump());
doc["id"] = "1";
doc["title"] = "The Godfather";
doc["rating"] = 9.2;
coll1->add(doc.dump());
std::vector<std::string> facets = {"title"};
nlohmann::json results = coll1->search(R"("shawshank")", {"title"}, "", facets, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 2,"", 30UL, 4UL,
"", 1UL, "", "", {}, 3UL, "<mark>", "</mark>", {},
4294967295UL, true, false, true, "", false, 6000000UL, 4UL,
7UL, fallback, 4UL, {off}, 32767UL, 32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL, 0UL, 4294967295UL, "exhaustive").get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("The Shawshank Redemption", results["facet_counts"][0]["counts"][0]["value"]);
}
TEST_F(CollectionFacetingTest, SampleFacetCounts) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "color", "type": "string", "facet": true}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
std::mt19937 gen(137723); // use constant seed to make sure that counts don't jump around
std::uniform_int_distribution<> distr(1, 100); // 1 to 100 inclusive
size_t count_blue = 0, count_red = 0;
for(size_t i = 0; i < 1000; i++) {
nlohmann::json doc;
if(distr(gen) % 2 == 0) {
doc["color"] = "blue";
count_blue++;
} else {
doc["color"] = "red";
count_red++;
}
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto res = coll1->search("*", {}, "", {"color"}, {}, {0}, 3, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 3, 3, 2, 2, false, "", true, 0, max_score, 10, 0).get();
ASSERT_EQ(1000, res["found"].get<size_t>());
ASSERT_EQ(1, res["facet_counts"].size());
ASSERT_EQ(2, res["facet_counts"][0]["counts"].size());
// verify approximate counts
ASSERT_GE(res["facet_counts"][0]["counts"][0]["count"].get<size_t>(), 250);
ASSERT_GE(res["facet_counts"][0]["counts"][1]["count"].get<size_t>(), 250);
ASSERT_TRUE(res["facet_counts"][0]["sampled"].get<bool>());
// when sample threshold is high, don't estimate
res = coll1->search("*", {}, "", {"color"}, {}, {0}, 3, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 3, 3, 2, 2, false, "", true, 0, max_score, 10, 10000).get();
ASSERT_EQ(1000, res["found"].get<size_t>());
ASSERT_EQ(1, res["facet_counts"].size());
ASSERT_EQ(2, res["facet_counts"][0]["counts"].size());
for(size_t i = 0; i < res["facet_counts"][0]["counts"].size(); i++) {
if(res["facet_counts"][0]["counts"][i]["value"].get<std::string>() == "red") {
ASSERT_EQ(count_red, res["facet_counts"][0]["counts"][i]["count"].get<size_t>());
} else {
ASSERT_EQ(count_blue, res["facet_counts"][0]["counts"][i]["count"].get<size_t>());
}
}
ASSERT_FALSE(res["facet_counts"][0]["sampled"].get<bool>());
// facet sample percent zero is treated as not sampled
res = coll1->search("*", {}, "", {"color"}, {}, {0}, 3, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 3, 3, 2, 2, false, "", true, 0, max_score, 0, 10).get();
ASSERT_EQ(1000, res["found"].get<size_t>());
ASSERT_EQ(1, res["facet_counts"].size());
ASSERT_EQ(2, res["facet_counts"][0]["counts"].size());
ASSERT_FALSE(res["facet_counts"][0]["sampled"].get<bool>());
// test for sample percent > 100
auto res_op = coll1->search("*", {}, "", {"color"}, {}, {0}, 3, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 3, 3, 2, 2, false, "", true, 0, max_score, 200, 0);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Value of `facet_sample_percent` must be less than 100.", res_op.error());
}
TEST_F(CollectionFacetingTest, FacetOnArrayFieldWithSpecialChars) {
std::vector<field> fields = {
field("tags", field_types::STRING_ARRAY, true),
field("points", field_types::INT32, true),
};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
nlohmann::json doc;
doc["tags"] = {"gamma"};
doc["points"] = 10;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["tags"] = {"alpha", "| . |", "beta", "gamma"};
doc["points"] = 10;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("*", {},
"", {"tags"}, {}, {2}, 10, 1, FREQUENCY, {true}, 1).get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(4, results["facet_counts"][0]["counts"].size());
for(size_t i = 0; i < results["facet_counts"][0]["counts"].size(); i++) {
auto fvalue = results["facet_counts"][0]["counts"][i]["value"].get<std::string>();
if(fvalue == "gamma") {
ASSERT_EQ(2, results["facet_counts"][0]["counts"][i]["count"].get<size_t>());
} else {
ASSERT_EQ(1, results["facet_counts"][0]["counts"][i]["count"].get<size_t>());
}
}
}
TEST_F(CollectionFacetingTest, FloatFieldValueTruncation) {
std::vector<field> fields = {
field("tags", field_types::STRING_ARRAY, true),
field("points", field_types::FLOAT, true),
};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
nlohmann::json doc;
doc["tags"] = {"gamma"};
doc["points"] = 300;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("*", {},
"", {"points"}, {}, {2}, 10, 1, FREQUENCY, {true}, 1).get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("300", results["facet_counts"][0]["counts"][0]["value"].get<std::string>());
}
TEST_F(CollectionFacetingTest, FacetingReturnParent) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "value.color", "type": "string", "optional": false, "facet": true },
{"name": "value.r", "type": "int32", "optional": false, "facet": true },
{"name": "value.g", "type": "int32", "optional": false, "facet": true },
{"name": "value.b", "type": "int32", "optional": false, "facet": true }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
nlohmann::json doc1 = R"({
"value": {
"color": "red",
"r": 255,
"g": 0,
"b": 0
}
})"_json;
nlohmann::json doc2 = R"({
"value": {
"color": "blue",
"r": 0,
"g": 0,
"b": 255
}
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
add_op = coll1->add(doc2.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto search_op = coll1->search("*", {},"", {"value.color"},
{}, {2}, 10, 1,FREQUENCY, {true},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(),10, "",
30, 4, "",
Index::TYPO_TOKENS_THRESHOLD, "", "",{},
3, "<mark>", "</mark>", {},
UINT32_MAX, true, false, true,
"", false, 6000*1000, 4, 7,
fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "",
true, 0, max_score, 100,
0, 0, "exhaustive", 30000,
2, "", {"value.color"});
if(!search_op.ok()) {
LOG(ERROR) << search_op.error();
FAIL();
}
auto results = search_op.get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("{\"b\":0,\"color\":\"red\",\"g\":0,\"r\":255}", results["facet_counts"][0]["counts"][0]["parent"].dump());
ASSERT_EQ("red", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("{\"b\":255,\"color\":\"blue\",\"g\":0,\"r\":0}", results["facet_counts"][0]["counts"][1]["parent"].dump());
ASSERT_EQ("blue", results["facet_counts"][0]["counts"][1]["value"]);
//not passing facet_fields in facet_return_parent list will only return facet value, not immediate parent for those field
search_op = coll1->search("*", {},"", {"value.color"},
{}, {2}, 10, 1,FREQUENCY, {true},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(),10, "",
30, 4, "",
Index::TYPO_TOKENS_THRESHOLD, "", "",{},
3, "<mark>", "</mark>", {},
UINT32_MAX, true, false, true,
"", false, 6000*1000, 4, 7,
fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "",
true, 0, max_score, 100,
0, 0, "exhaustive", 30000,
2, "", {});
if(!search_op.ok()) {
LOG(ERROR) << search_op.error();
FAIL();
}
results = search_op.get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("red", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("blue", results["facet_counts"][0]["counts"][1]["value"]);
search_op = coll1->search("*", {},"", {"value.color", "value.r"},
{}, {2}, 10, 1,FREQUENCY, {true},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(),10, "",
30, 4, "",
Index::TYPO_TOKENS_THRESHOLD, "", "",{},
3, "<mark>", "</mark>", {},
UINT32_MAX, true, false, true,
"", false, 6000*1000, 4, 7,
fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "",
true, 0, max_score, 100,
0, 0, "exhaustive", 30000,
2, "", {"value.r"});
if(!search_op.ok()) {
LOG(ERROR) << search_op.error();
FAIL();
}
results = search_op.get();
ASSERT_EQ(2, results["facet_counts"].size());
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("red", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("blue", results["facet_counts"][0]["counts"][1]["value"]);
ASSERT_EQ(2, results["facet_counts"][1]["counts"].size());
ASSERT_EQ("{\"b\":255,\"color\":\"blue\",\"g\":0,\"r\":0}", results["facet_counts"][1]["counts"][0]["parent"].dump());
ASSERT_EQ("0", results["facet_counts"][1]["counts"][0]["value"]);
ASSERT_EQ("{\"b\":0,\"color\":\"red\",\"g\":0,\"r\":255}", results["facet_counts"][1]["counts"][1]["parent"].dump());
ASSERT_EQ("255", results["facet_counts"][1]["counts"][1]["value"]);
//return parent for multiple facet fields
search_op = coll1->search("*", {},"", {"value.color", "value.r", "value.g", "value.b"},
{}, {2}, 10, 1,FREQUENCY, {true},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(),10, "",
30, 4, "",
Index::TYPO_TOKENS_THRESHOLD, "", "",{},
3, "<mark>", "</mark>", {},
UINT32_MAX, true, false, true,
"", false, 6000*1000, 4, 7,
fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "",
true, 0, max_score, 100,
0, 0, "exhaustive", 30000,
2, "", {"value.r", "value.g", "value.b"});
if(!search_op.ok()) {
LOG(ERROR) << search_op.error();
FAIL();
}
results = search_op.get();
ASSERT_EQ(4, results["facet_counts"].size());
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("red", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("blue", results["facet_counts"][0]["counts"][1]["value"]);
ASSERT_EQ(2, results["facet_counts"][1]["counts"].size());
ASSERT_EQ("{\"b\":255,\"color\":\"blue\",\"g\":0,\"r\":0}", results["facet_counts"][1]["counts"][0]["parent"].dump());
ASSERT_EQ("0", results["facet_counts"][1]["counts"][0]["value"]);
ASSERT_EQ("{\"b\":0,\"color\":\"red\",\"g\":0,\"r\":255}", results["facet_counts"][1]["counts"][1]["parent"].dump());
ASSERT_EQ("255", results["facet_counts"][1]["counts"][1]["value"]);
ASSERT_EQ(1, results["facet_counts"][2]["counts"].size());
ASSERT_EQ("0", results["facet_counts"][2]["counts"][0]["value"]);
//same facet value appearing in multiple records can return any parent
ASSERT_TRUE(("{\"b\":255,\"color\":\"blue\",\"g\":0,\"r\":0}" == results["facet_counts"][2]["counts"][0]["parent"].dump())
|| ("{\"b\":0,\"color\":\"red\",\"g\":0,\"r\":255}" == results["facet_counts"][2]["counts"][0]["parent"].dump()));
ASSERT_EQ(2, results["facet_counts"][3]["counts"].size());
ASSERT_EQ("{\"b\":0,\"color\":\"red\",\"g\":0,\"r\":255}", results["facet_counts"][3]["counts"][0]["parent"].dump());
ASSERT_EQ("0", results["facet_counts"][3]["counts"][0]["value"]);
ASSERT_EQ("{\"b\":255,\"color\":\"blue\",\"g\":0,\"r\":0}", results["facet_counts"][3]["counts"][1]["parent"].dump());
ASSERT_EQ("255", results["facet_counts"][3]["counts"][1]["value"]);
}
TEST_F(CollectionFacetingTest, FacetingReturnParentDeepNested) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "product.specification.detail.width", "type": "int32", "optional": false, "facet": true }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
nlohmann::json doc1 = R"({
"product" : {
"specification": {
"detail" : {
"width": 25
}
}
}
})"_json;
nlohmann::json doc2 = R"({
"product" : {
"specification": {
"detail" : {
"width": 30
}
}
}
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
add_op = coll1->add(doc2.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto search_op = coll1->search("*", {},"", {"product.specification.detail.width"},
{}, {2}, 10, 1,FREQUENCY, {true},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(),10, "",
30, 4, "",
Index::TYPO_TOKENS_THRESHOLD, "", "",{},
3, "<mark>", "</mark>", {},
UINT32_MAX, true, false, true,
"", false, 6000*1000, 4, 7,
fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "",
true, 0, max_score, 100,
0, 0, "exhaustive", 30000,
2, "", {"product.specification.detail.width"});
if(!search_op.ok()) {
LOG(ERROR) << search_op.error();
FAIL();
}
auto results = search_op.get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("{\"width\":30}", results["facet_counts"][0]["counts"][0]["parent"].dump());
ASSERT_EQ("30", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("{\"width\":25}", results["facet_counts"][0]["counts"][1]["parent"].dump());
ASSERT_EQ("25", results["facet_counts"][0]["counts"][1]["value"]);
}
TEST_F(CollectionFacetingTest, FacetingReturnParentObject) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "value", "type": "object", "optional": false, "facet": true }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
nlohmann::json doc1 = R"({
"value": {
"color": "red",
"r": 255,
"g": 0,
"b": 0
}
})"_json;
nlohmann::json doc2 = R"({
"value": {
"color": "blue",
"r": 0,
"g": 0,
"b": 255
}
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
add_op = coll1->add(doc2.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto search_op = coll1->search("*", {},"", {"value.color"},
{}, {2}, 10, 1,FREQUENCY, {true},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(),10, "",
30, 4, "",
Index::TYPO_TOKENS_THRESHOLD, "", "",{},
3, "<mark>", "</mark>", {},
UINT32_MAX, true, false, true,
"", false, 6000*1000, 4, 7,
fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "",
true, 0, max_score, 100,
0, 0, "exhaustive", 30000,
2, "", {"value.color"});
if(!search_op.ok()) {
LOG(ERROR) << search_op.error();
FAIL();
}
auto results = search_op.get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("{\"b\":0,\"color\":\"red\",\"g\":0,\"r\":255}", results["facet_counts"][0]["counts"][0]["parent"].dump());
ASSERT_EQ("red", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("{\"b\":255,\"color\":\"blue\",\"g\":0,\"r\":0}", results["facet_counts"][0]["counts"][1]["parent"].dump());
ASSERT_EQ("blue", results["facet_counts"][0]["counts"][1]["value"]);
}
TEST_F(CollectionFacetingTest, FacetingReturnParentArrayFields) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "tags.id", "type": "string[]", "facet": true }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
nlohmann::json doc1 = R"({
"tags": [
{
"id": "tag-1",
"name": "name for tag-1"
},
{
"id": "tag-2",
"name": "name for tag-2"
}
]
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto search_op = coll1->search("*", {}, "", {"tags.id"},
{}, {2}, 10, 1, FREQUENCY, {true},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "",
30, 4, "",
Index::TYPO_TOKENS_THRESHOLD, "", "", {},
3, "<mark>", "</mark>", {},
UINT32_MAX, true, false, true,
"", false, 6000 * 1000, 4, 7,
fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "",
true, 0, max_score, 100,
0, 0, "exhaustive", 30000,
2, "", {"tags.id"});
if(!search_op.ok()) {
LOG(ERROR) << search_op.error();
FAIL();
}
auto results = search_op.get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("{\"id\":\"tag-2\",\"name\":\"name for tag-2\"}", results["facet_counts"][0]["counts"][0]["parent"].dump());
ASSERT_EQ("tag-2", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("{\"id\":\"tag-1\",\"name\":\"name for tag-1\"}", results["facet_counts"][0]["counts"][1]["parent"].dump());
ASSERT_EQ("tag-1", results["facet_counts"][0]["counts"][1]["value"]);
}
TEST_F(CollectionFacetingTest, FacetingReturnParentArrayFields2) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "nestedCategories", "type": "object"},
{"name": "nestedCategories.categories.FullPath", "type": "string[]", "facet": true }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
nlohmann::json doc1 = R"({
"nestedCategories": {
"categories": [
{"FullPath": "foobar"}
]
}
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto search_op = coll1->search("*", {}, "", {"nestedCategories.categories.FullPath"},
{}, {2}, 10, 1, FREQUENCY, {true},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "",
30, 4, "",
Index::TYPO_TOKENS_THRESHOLD, "", "", {},
3, "<mark>", "</mark>", {},
UINT32_MAX, true, false, true,
"", false, 6000 * 1000, 4, 7,
fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "",
true, 0, max_score, 100,
0, 0, "exhaustive", 30000,
2, "", {"nestedCategories.categories.FullPath"});
if(!search_op.ok()) {
LOG(ERROR) << search_op.error();
FAIL();
}
auto results = search_op.get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ(R"({"FullPath":"foobar"})", results["facet_counts"][0]["counts"][0]["parent"].dump());
ASSERT_EQ("foobar", results["facet_counts"][0]["counts"][0]["value"]);
}
TEST_F(CollectionFacetingTest, FacetingReturnParentArrayFields3) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "nestedCategories", "type": "object"},
{"name": "nestedCategories.categories", "type": "string[]", "facet": true }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
nlohmann::json doc1 = R"({
"nestedCategories": {
"categories": [
"hello", "world"
]
}
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto search_op = coll1->search("*", {}, "", {"nestedCategories.categories"},
{}, {2}, 10, 1, FREQUENCY, {true},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "",
30, 4, "",
Index::TYPO_TOKENS_THRESHOLD, "", "", {},
3, "<mark>", "</mark>", {},
UINT32_MAX, true, false, true,
"", false, 6000 * 1000, 4, 7,
fallback, 4, {off}, INT16_MAX, INT16_MAX,
2, 2, false, "",
true, 0, max_score, 100,
0, 0, "exhaustive", 30000,
2, "", {"nestedCategories.categories"});
if(!search_op.ok()) {
LOG(ERROR) << search_op.error();
FAIL();
}
auto results = search_op.get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("{\"categories\":[\"hello\",\"world\"]}", results["facet_counts"][0]["counts"][0]["parent"].dump());
ASSERT_EQ("world", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("{\"categories\":[\"hello\",\"world\"]}", results["facet_counts"][0]["counts"][1]["parent"].dump());
ASSERT_EQ("hello", results["facet_counts"][0]["counts"][1]["value"]);
}
TEST_F(CollectionFacetingTest, FacetSortByAlpha) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "phone", "type": "string", "optional": false, "facet": true },
{"name": "brand", "type": "string", "optional": false, "facet": true },
{"name": "rating", "type": "float", "optional": false, "facet": true }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection *coll1 = op.get();
nlohmann::json doc;
doc["phone"] = "Oneplus 11R";
doc["brand"] = "Oneplus";
doc["rating"] = 4.6;
auto add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
doc["phone"] = "Fusion Plus";
doc["brand"] = "Moto";
doc["rating"] = 4.2;
add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
doc["phone"] = "S22 Ultra";
doc["brand"] = "Samsung";
doc["rating"] = 4.1;
add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
doc["phone"] = "GT Master";
doc["brand"] = "Realme";
doc["rating"] = 4.4;
add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
doc["phone"] = "T2";
doc["brand"] = "Vivo";
doc["rating"] = 4.0;
add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
doc["phone"] = "Mi 6";
doc["brand"] = "Xiaomi";
doc["rating"] = 3.9;
add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
doc["phone"] = "Z6 Lite";
doc["brand"] = "Iqoo";
doc["rating"] = 4.3;
add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
//sort facets by phone in asc order
auto search_op = coll1->search("*", {}, "", {"phone(sort_by:_alpha:asc)"},
{}, {2});
if (!search_op.ok()) {
LOG(ERROR) << search_op.error();
FAIL();
}
auto results = search_op.get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(7, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("Fusion Plus", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("GT Master", results["facet_counts"][0]["counts"][1]["value"]);
ASSERT_EQ("Mi 6", results["facet_counts"][0]["counts"][2]["value"]);
ASSERT_EQ("Oneplus 11R", results["facet_counts"][0]["counts"][3]["value"]);
ASSERT_EQ("S22 Ultra", results["facet_counts"][0]["counts"][4]["value"]);
ASSERT_EQ("T2", results["facet_counts"][0]["counts"][5]["value"]);
ASSERT_EQ("Z6 Lite", results["facet_counts"][0]["counts"][6]["value"]);
//sort facets by brand in desc order
search_op = coll1->search("*", {}, "", {"brand(sort_by:_alpha:desc)"},
{}, {2});
if (!search_op.ok()) {
LOG(ERROR) << search_op.error();
FAIL();
}
results = search_op.get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(7, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("Xiaomi", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("Vivo", results["facet_counts"][0]["counts"][1]["value"]);
ASSERT_EQ("Samsung", results["facet_counts"][0]["counts"][2]["value"]);
ASSERT_EQ("Realme", results["facet_counts"][0]["counts"][3]["value"]);
ASSERT_EQ("Oneplus", results["facet_counts"][0]["counts"][4]["value"]);
ASSERT_EQ("Moto", results["facet_counts"][0]["counts"][5]["value"]);
ASSERT_EQ("Iqoo", results["facet_counts"][0]["counts"][6]["value"]);
//sort facets by brand in desc order and phone by asc order
search_op = coll1->search("*", {}, "", {"brand(sort_by:_alpha:desc)",
"phone(sort_by:_alpha:asc)"},
{}, {2});
if (!search_op.ok()) {
LOG(ERROR) << search_op.error();
FAIL();
}
results = search_op.get();
ASSERT_EQ(2, results["facet_counts"].size());
ASSERT_EQ(7, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("Xiaomi", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("Vivo", results["facet_counts"][0]["counts"][1]["value"]);
ASSERT_EQ("Samsung", results["facet_counts"][0]["counts"][2]["value"]);
ASSERT_EQ("Realme", results["facet_counts"][0]["counts"][3]["value"]);
ASSERT_EQ("Oneplus", results["facet_counts"][0]["counts"][4]["value"]);
ASSERT_EQ("Moto", results["facet_counts"][0]["counts"][5]["value"]);
ASSERT_EQ("Iqoo", results["facet_counts"][0]["counts"][6]["value"]);
ASSERT_EQ(7, results["facet_counts"][1]["counts"].size());
ASSERT_EQ("Fusion Plus", results["facet_counts"][1]["counts"][0]["value"]);
ASSERT_EQ("GT Master", results["facet_counts"][1]["counts"][1]["value"]);
ASSERT_EQ("Mi 6", results["facet_counts"][1]["counts"][2]["value"]);
ASSERT_EQ("Oneplus 11R", results["facet_counts"][1]["counts"][3]["value"]);
ASSERT_EQ("S22 Ultra", results["facet_counts"][1]["counts"][4]["value"]);
ASSERT_EQ("T2", results["facet_counts"][1]["counts"][5]["value"]);
ASSERT_EQ("Z6 Lite", results["facet_counts"][1]["counts"][6]["value"]);
}
TEST_F(CollectionFacetingTest, FacetSortByOtherField) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "receipe", "type": "object", "optional": false, "facet": true }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
nlohmann::json doc1 = R"({
"receipe": {
"name": "cheese pizza",
"calories": 300,
"origin": "america"
}
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
nlohmann::json doc2 = R"({
"receipe": {
"name": "noodles",
"calories": 250,
"origin": "china"
}
})"_json;
add_op = coll1->add(doc2.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
nlohmann::json doc3 = R"({
"receipe": {
"name": "hamburger",
"calories": 350,
"origin": "america"
}
})"_json;
add_op = coll1->add(doc3.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
nlohmann::json doc4 = R"({
"receipe": {
"name": "schezwan rice",
"calories": 150,
"origin": "china"
}
})"_json;
add_op = coll1->add(doc4.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
nlohmann::json doc5 = R"({
"receipe": {
"name": "butter chicken",
"calories": 270,
"origin": "india"
}
})"_json;
add_op = coll1->add(doc5.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
//search by calories in asc order
auto search_op = coll1->search("*", {},"",
{"receipe.name(sort_by:receipe.calories:asc)"},
{}, {2});
if(!search_op.ok()) {
LOG(ERROR) << search_op.error();
FAIL();
}
auto results = search_op.get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(5, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("schezwan rice", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("noodles", results["facet_counts"][0]["counts"][1]["value"]);
ASSERT_EQ("butter chicken", results["facet_counts"][0]["counts"][2]["value"]);
ASSERT_EQ("cheese pizza", results["facet_counts"][0]["counts"][3]["value"]);
ASSERT_EQ("hamburger", results["facet_counts"][0]["counts"][4]["value"]);
//search by calories in desc order
search_op = coll1->search("*", {},"",
{"receipe.name(sort_by:receipe.calories:desc)"},
{}, {2});
if(!search_op.ok()) {
LOG(ERROR) << search_op.error();
FAIL();
}
results = search_op.get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(5, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("hamburger", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("cheese pizza", results["facet_counts"][0]["counts"][1]["value"]);
ASSERT_EQ("butter chicken", results["facet_counts"][0]["counts"][2]["value"]);
ASSERT_EQ("noodles", results["facet_counts"][0]["counts"][3]["value"]);
ASSERT_EQ("schezwan rice", results["facet_counts"][0]["counts"][4]["value"]);
}
TEST_F(CollectionFacetingTest, FacetSortByOtherFloatField) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "investment", "type": "object", "optional": false, "facet": true }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
nlohmann::json doc1 = R"({
"investment": {
"name": "Term Deposits",
"interest_rate": 7.1,
"class": "fixed"
}
})"_json;
auto add_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
nlohmann::json doc2 = R"({
"investment": {
"name": "Gold",
"interest_rate": 5.4,
"class": "fixed"
}
})"_json;
add_op = coll1->add(doc2.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
nlohmann::json doc3 = R"({
"investment": {
"name": "Mutual Funds",
"interest_rate": 12,
"class": "Equity"
}
})"_json;
add_op = coll1->add(doc3.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
nlohmann::json doc4 = R"({
"investment": {
"name": "Land",
"interest_rate": 9.1,
"class": "real estate"
}
})"_json;
add_op = coll1->add(doc4.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
nlohmann::json doc5 = R"({
"investment": {
"name": "Bonds",
"interest_rate": 7.24,
"class": "g-sec"
}
})"_json;
add_op = coll1->add(doc5.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
//search by calories in asc order
auto search_op = coll1->search("*", {},"",
{"investment.name(sort_by:investment.interest_rate:asc)"},
{}, {2});
if(!search_op.ok()) {
LOG(ERROR) << search_op.error();
FAIL();
}
auto results = search_op.get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(5, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("Gold", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("Term Deposits", results["facet_counts"][0]["counts"][1]["value"]);
ASSERT_EQ("Bonds", results["facet_counts"][0]["counts"][2]["value"]);
ASSERT_EQ("Land", results["facet_counts"][0]["counts"][3]["value"]);
ASSERT_EQ("Mutual Funds", results["facet_counts"][0]["counts"][4]["value"]);
//search by calories in desc order
search_op = coll1->search("*", {},"",
{"investment.name(sort_by:investment.interest_rate:desc)"},
{}, {2});
if(!search_op.ok()) {
LOG(ERROR) << search_op.error();
FAIL();
}
results = search_op.get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(5, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("Mutual Funds", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("Land", results["facet_counts"][0]["counts"][1]["value"]);
ASSERT_EQ("Bonds", results["facet_counts"][0]["counts"][2]["value"]);
ASSERT_EQ("Term Deposits", results["facet_counts"][0]["counts"][3]["value"]);
ASSERT_EQ("Gold", results["facet_counts"][0]["counts"][4]["value"]);
}
TEST_F(CollectionFacetingTest, FacetSortValidation) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "phone", "type": "string", "optional": false, "facet": true },
{"name": "brand", "type": "string", "optional": false, "facet": true },
{"name": "rating", "type": "float", "optional": false, "facet": true }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection *coll1 = op.get();
nlohmann::json doc;
doc["phone"] = "Oneplus 11R";
doc["brand"] = "Oneplus";
doc["rating"] = 4.6;
auto add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
doc["phone"] = "Fusion Plus";
doc["brand"] = "Moto";
doc["rating"] = 4.2;
add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
doc["phone"] = "S22 Ultra";
doc["brand"] = "Samsung";
doc["rating"] = 4.1;
add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
//try sort on non string field
auto search_op = coll1->search("*", {}, "", {"rating(sort_by:_alpha:desc)"},
{}, {2});
ASSERT_EQ(400, search_op.code());
ASSERT_EQ("Facet field should be string type to apply alpha sort.", search_op.error());
//try sort by string field
search_op = coll1->search("*", {}, "", {"phone(sort_by:brand:desc)"},
{}, {2});
ASSERT_EQ(400, search_op.code());
ASSERT_EQ("Sort field should be non string type to apply sort.", search_op.error());
//incorrect syntax
search_op = coll1->search("*", {}, "", {"phone(sort_by:desc)"},
{}, {2});
ASSERT_EQ(400, search_op.code());
ASSERT_EQ("Invalid sort format.", search_op.error());
search_op = coll1->search("*", {}, "", {"phone(sort:_alpha:desc)"},
{}, {2});
ASSERT_EQ(400, search_op.code());
ASSERT_EQ("Invalid facet param `sort`.", search_op.error());
//invalid param
search_op = coll1->search("*", {}, "", {"phone(sort_by:_alpha:foo)"},
{}, {2});
ASSERT_EQ(400, search_op.code());
ASSERT_EQ("Invalid sort param.", search_op.error());
//whitespace is allowed
search_op = coll1->search("*", {}, "", {"phone( sort_by: _alpha : asc)"},
{}, {2});
if (!search_op.ok()) {
LOG(ERROR) << search_op.error();
FAIL();
}
auto results = search_op.get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(3, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("Fusion Plus", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ("Oneplus 11R", results["facet_counts"][0]["counts"][1]["value"]);
ASSERT_EQ("S22 Ultra", results["facet_counts"][0]["counts"][2]["value"]);
//facet sort with facet query should work
search_op = coll1->search("*", query_fields, "", {"phone(sort_by:_alpha:desc)"},
sort_fields, {0}, 10, 1, FREQUENCY,{false},
Index::DROP_TOKENS_THRESHOLD,spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(),10, "phone: plus",
30UL, 4UL,"", 1UL,
"", "", {}, 3UL, "<mark>",
"</mark>", {},4294967295UL, true,
false, true, "", false, 6000000UL,
4UL,7UL, fallback, 4UL, {off}, 32767UL,
32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL,
0UL, 4294967295UL, "exhaustive");
results = search_op.get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("Fusion Plus", results["facet_counts"][0]["counts"][0]["value"]);
}
TEST_F(CollectionFacetingTest, FacetQueryWithDifferentLocale) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "phone", "type": "string", "optional": false, "facet": true },
{"name": "brand", "type": "string", "optional": false, "facet": true },
{"name": "rating", "type": "float", "optional": false, "facet": true }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection *coll1 = op.get();
nlohmann::json doc;
doc["phone"] = "çapeta";
doc["brand"] = "Samsung";
doc["rating"] = 4.1;
auto add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
doc["phone"] = "teléfono justo";
doc["brand"] = "Oneplus";
doc["rating"] = 4.6;
add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto search_op = coll1->search("*", query_fields, "", {"phone(sort_by:_alpha:desc)"},
sort_fields, {0}, 10, 1, FREQUENCY,{false},
Index::DROP_TOKENS_THRESHOLD,spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(),10, "phone: ç",
30UL, 4UL,"", 1UL,
"", "", {}, 3UL, "<mark>",
"</mark>", {},4294967295UL, true,
false, true, "", false, 6000000UL,
4UL,7UL, fallback, 4UL, {off}, 32767UL,
32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL,
0UL, 4294967295UL, "exhaustive");
auto results = search_op.get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("çapeta", results["facet_counts"][0]["counts"][0]["value"]);
search_op = coll1->search("*", query_fields, "", {"phone(sort_by:_alpha:desc)"},
sort_fields, {0}, 10, 1, FREQUENCY,{false},
Index::DROP_TOKENS_THRESHOLD,spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(),10, "phone: telé",
30UL, 4UL,"", 1UL,
"", "", {}, 3UL, "<mark>",
"</mark>", {},4294967295UL, true,
false, true, "", false, 6000000UL,
4UL,7UL, fallback, 4UL, {off}, 32767UL,
32767UL, 2UL, 2UL, false,
"", true, 0UL, max_score, 100UL,
0UL, 4294967295UL, "exhaustive");
results = search_op.get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("teléfono justo", results["facet_counts"][0]["counts"][0]["value"]);
}
TEST_F(CollectionFacetingTest, FhashInt64MapTest) {
std::vector<int64_t> visitors = {227489798, 124098972, 180247624};
facet_index_t facet_index_v4;
std::unordered_map<facet_value_id_t, std::vector<uint32_t>, facet_value_id_t::Hash> fvalue_to_seq_ids;
std::unordered_map<uint32_t, std::vector<facet_value_id_t>> seq_id_to_fvalues;
facet_index_v4.initialize("visitors");
//insert timestamps
int seq_id = 0;
for (auto it = visitors.begin(); it != visitors.end(); ++it) {
auto val = std::to_string(*it);
facet_value_id_t facet_value_id(val);
fvalue_to_seq_ids[facet_value_id].push_back(seq_id);
seq_id_to_fvalues[seq_id].push_back(facet_value_id);
++seq_id;
}
facet_index_v4.insert("visitors", fvalue_to_seq_ids, seq_id_to_fvalues);
ASSERT_EQ(3, facet_index_v4.get_fhash_int64_map("visitors").size());
field visitorsf("visitors", field_types::INT64, true);
nlohmann::json doc;
doc["visitors"] = 227489798;
facet_index_v4.remove(doc, visitorsf, 0);
ASSERT_EQ(2, facet_index_v4.get_fhash_int64_map("visitors").size());
fvalue_to_seq_ids.clear();
seq_id_to_fvalues.clear();
facet_value_id_t facet_value_id("124798721");
fvalue_to_seq_ids[facet_value_id].push_back(seq_id);
seq_id_to_fvalues[seq_id].push_back(facet_value_id);
facet_index_v4.insert("visitors", fvalue_to_seq_ids, seq_id_to_fvalues);
ASSERT_EQ(3, facet_index_v4.get_fhash_int64_map("visitors").size());
}
TEST_F(CollectionFacetingTest, RangeFacetTestWithGroupBy) {
std::vector<field> fields = {field("place", field_types::STRING, false),
field("state", field_types::STRING, true),
field("visitors", field_types::INT32, true),
field("rating", field_types::FLOAT, true),
field("trackingFrom", field_types::INT32, true),};
Collection* coll1 = collectionManager.create_collection(
"coll1", 1, fields, "", 0, "", {}, {}
).get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["place"] = "Mysore Palace";
doc1["state"] = "Karnataka";
doc1["visitors"] = 235486;
doc1["rating"] = 4.5;
doc1["trackingFrom"] = 1900;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["place"] = "Hampi";
doc2["state"] = "Karnataka";
doc2["visitors"] = 201022;
doc2["rating"] = 4.5;
doc2["trackingFrom"] = 1900;
nlohmann::json doc3;
doc3["id"] = "2";
doc3["place"] = "Mahabalipuram";
doc3["state"] = "TamilNadu";
doc3["visitors"] = 174684;
doc3["rating"] = 3.8;
doc3["trackingFrom"] = 1900;
nlohmann::json doc4;
doc4["id"] = "3";
doc4["place"] = "Meenakshi Amman Temple";
doc4["state"] = "TamilNadu";
doc4["visitors"] = 246676;
doc4["rating"] = 4.5;
doc4["trackingFrom"] = 2000;
nlohmann::json doc5;
doc5["id"] = "4";
doc5["place"] = "Staue of Unity";
doc5["state"] = "Gujarat";
doc5["visitors"] = 345878;
doc5["rating"] = 3.8;
doc5["trackingFrom"] = 2000;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
ASSERT_TRUE(coll1->add(doc4.dump()).ok());
ASSERT_TRUE(coll1->add(doc5.dump()).ok());
auto result = coll1->search("Karnataka", {"state"},
"", {"visitors(Busy:[0, 200000], VeryBusy:[200000, 500000])"},
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true);
if(!result.ok()) {
LOG(INFO) << result.error();
}
auto results = result.get();
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ(2, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_EQ("VeryBusy", results["facet_counts"][0]["counts"][0]["value"].get<std::string>());
//apply group_by
result = coll1->search("*", {"state"},
"", {"visitors(Busy:[0, 200000], VeryBusy:[200000, 500000])"},
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "",
30, 4, "", 10,
{}, {}, {"rating"}, 10,"<mark>",
"</mark>", {}, 1000,true,
false, true, "", true);
if(!result.ok()) {
LOG(INFO) << result.error();
}
results = result.get();
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_EQ(2, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_EQ("VeryBusy", results["facet_counts"][0]["counts"][0]["value"].get<std::string>());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][1]["count"]);
ASSERT_EQ("Busy", results["facet_counts"][0]["counts"][1]["value"].get<std::string>());
ASSERT_EQ(2, results["grouped_hits"].size());
ASSERT_EQ(2, results["grouped_hits"][0]["hits"].size());
ASSERT_EQ(3.8, results["grouped_hits"][0]["group_key"][0]);
ASSERT_EQ(3, results["grouped_hits"][1]["hits"].size());
ASSERT_EQ(4.5, results["grouped_hits"][1]["group_key"][0]);
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionFacetingTest, RangeFacetAlphanumericLabels) {
std::vector<field> fields = {field("monuments", field_types::STRING, false),
field("year", field_types::INT32, true),};
Collection* coll1 = collectionManager.create_collection(
"coll1", 1, fields, "", 0, "",
{},{}).get();
nlohmann::json doc;
doc["id"] = "0";
doc["monuments"] = "Statue Of Unity";
doc["year"] = 2018;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "1";
doc["monuments"] = "Taj Mahal";
doc["year"] = 1653;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "2";
doc["monuments"] = "Mysore Palace";
doc["year"] = 1897;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "3";
doc["monuments"] = "Chennakesava Temple";
doc["year"] = 1117;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("*", {},
"", {"year(10thAD:[1000,1500], 15thAD:[1500,2000], 20thAD:[2000, ])"},
{}, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true).get();
ASSERT_EQ(3, results["facet_counts"][0]["counts"].size());
ASSERT_EQ(2, results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_EQ("15thAD", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ(1, results["facet_counts"][0]["counts"][1]["count"]);
ASSERT_EQ("20thAD", results["facet_counts"][0]["counts"][1]["value"]);
ASSERT_EQ(1, results["facet_counts"][0]["counts"][2]["count"]);
ASSERT_EQ("10thAD", results["facet_counts"][0]["counts"][2]["value"]);
}
TEST_F(CollectionFacetingTest, FacetingWithCoercedString) {
std::vector<field> fields = {field("years", field_types::INT64_ARRAY, true)};
Collection* coll1 = collectionManager.create_collection(
"coll1", 1, fields, "", 0, "",
{},{}).get();
nlohmann::json doc;
doc["id"] = "0";
doc["years"] = {2000, 2010, 2020};
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto schema_changes = R"({
"fields": [
{"name": "years", "drop": true},
{"name": "years", "type": "string[]", "facet": true}
]
})"_json;
// schema change will not change the data on disk, so we have to account for this during hash based faceting
auto alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
auto results = coll1->search("*", {}, "", {"years"}, {}, {2}, 10,
1, FREQUENCY, {true}).get();
ASSERT_EQ(3, results["facet_counts"][0]["counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"][0]["count"]);
}
TEST_F(CollectionFacetingTest, RangeFacetsWithSortDisabled) {
std::vector<field> fields = {field("name", field_types::STRING, false, false, true, "", 1),
field("brand", field_types::STRING, true, false, true, "", 0),
field("price", field_types::FLOAT, true, false, true, "", 0)};
Collection* coll2 = collectionManager.create_collection(
"coll2", 1, fields, "", 0, "",
{},{}).get();
nlohmann::json doc;
doc["name"] = "keyboard";
doc["id"] = "pd-1";
doc["brand"] = "Logitech";
doc["price"] = 49.99;
ASSERT_TRUE(coll2->add(doc.dump()).ok());
doc["name"] = "mouse";
doc["id"] = "pd-2";
doc["brand"] = "Logitech";
doc["price"] = 29.99;
ASSERT_TRUE(coll2->add(doc.dump()).ok());
auto results = coll2->search("*", {}, "brand:=Logitech",
{"price(Low:[0, 30], Medium:[30, 75], High:[75, ])"}, {}, {2},
10, 1, FREQUENCY, {true});
//if no facet index is provided then it uses hash index
//hash index requires sort enabled for field for range faceting
ASSERT_FALSE(results.ok());
ASSERT_EQ("Range facets require sort enabled for the field.", results.error());
}
TEST_F(CollectionFacetingTest, FacetSearchIndexTypeValidation) {
std::vector<field> fields = {
field("attribute.title", field_types::STRING, true),
field("attribute.category", field_types::STRING, true),
};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
nlohmann::json doc;
doc["attribute.title"] = "Foobar";
doc["attribute.category"] = "shoes";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto res_op = coll1->search("*", {},
"", {"attribute.*"}, {}, {2}, 1, 1, FREQUENCY, {true}, 1,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 5, "", 30, 4, "", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 3, 3, 2, 2, false, "", true, 0, max_score, 100, 0, 4294967295UL,
"top_values");
ASSERT_TRUE(res_op.ok());
res_op = coll1->search("*", {},
"", {"attribute.*"}, {}, {2}, 1, 1, FREQUENCY, {true}, 1,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 5, "", 30, 4, "", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 3, 3, 2, 2, false, "", true, 0, max_score, 100, 0, 4294967295UL,
"");
ASSERT_TRUE(res_op.ok());
}
TEST_F(CollectionFacetingTest, TopKFaceting) {
std::vector<field> fields = {field("name", field_types::STRING, true, false, true, "", 1),
field("price", field_types::FLOAT, true, false, true, "", 0)};
Collection* coll2 = collectionManager.create_collection(
"coll2", 1, fields, "", 0, "",
{},{}).get();
nlohmann::json doc;
for(int i=0; i < 500; ++i) {
doc["name"] = "jeans";
doc["price"] = 49.99;
ASSERT_TRUE(coll2->add(doc.dump()).ok());
doc["name"] = "narrow jeans";
doc["price"] = 29.99;
ASSERT_TRUE(coll2->add(doc.dump()).ok());
}
//normal facet
auto results = coll2->search("jeans", {"name"}, "",
{"name"}, {}, {2},
10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ("name", results["facet_counts"][0]["field_name"]);
ASSERT_EQ(2, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("jeans", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ(500, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_EQ("narrow jeans", results["facet_counts"][0]["counts"][1]["value"]);
ASSERT_EQ(500, (int) results["facet_counts"][0]["counts"][1]["count"]);
//facet with top_k
results = coll2->search("jeans", {"name"}, "",
{"name(top_k:true)"}, {}, {2},
10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ("name", results["facet_counts"][0]["field_name"]);
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("jeans", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ(250, (int) results["facet_counts"][0]["counts"][0]["count"]);
//some are facets with top-K
results = coll2->search("jeans", {"name"}, "",
{"name(top_k:true)", "price"}, {}, {2},
10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(2, results["facet_counts"].size());
ASSERT_EQ("name", results["facet_counts"][0]["field_name"]);
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("jeans", results["facet_counts"][0]["counts"][0]["value"]);
ASSERT_EQ(250, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_EQ("price", results["facet_counts"][1]["field_name"]);
ASSERT_EQ(2, results["facet_counts"][1]["counts"].size());
ASSERT_EQ("49.99", results["facet_counts"][1]["counts"][0]["value"]);
ASSERT_EQ(500, (int) results["facet_counts"][1]["counts"][0]["count"]);
ASSERT_EQ("29.99", results["facet_counts"][1]["counts"][1]["value"]);
ASSERT_EQ(500, (int) results["facet_counts"][1]["counts"][1]["count"]);
}
TEST_F(CollectionFacetingTest, TopKFacetValidation) {
std::vector<field> fields = {field("name", field_types::STRING, true, false, true, "", 1),
field("price", field_types::FLOAT, true, false, true, "", 1)};
Collection* coll2 = collectionManager.create_collection(
"coll2", 1, fields, "", 0, "",
{},{}).get();
//'=' separator instead of ":"
auto results = coll2->search("jeans", {"name"}, "",
{"name(top_k=true)"}, {}, {2},
10, 1, FREQUENCY, {true});
ASSERT_FALSE(results.ok());
ASSERT_EQ("Invalid facet format.", results.error());
//typo in top_k
results = coll2->search("jeans", {"name"}, "",
{"name(top-k:true)"}, {}, {2},
10, 1, FREQUENCY, {true});
ASSERT_FALSE(results.ok());
ASSERT_EQ("Invalid facet param `top-k`.", results.error());
results = coll2->search("jeans", {"name"}, "",
{"name(topk:true)"}, {}, {2},
10, 1, FREQUENCY, {true});
ASSERT_FALSE(results.ok());
ASSERT_EQ("Invalid facet param `topk`.", results.error());
//value should be boolean
results = coll2->search("jeans", {"name"}, "",
{"name(top_k:10)"}, {}, {2},
10, 1, FREQUENCY, {true});
ASSERT_FALSE(results.ok());
ASSERT_EQ("top_k string format is invalid.", results.error());
//correct val
results = coll2->search("jeans", {"name"}, "",
{"name(top_k:false)"}, {}, {2},
10, 1, FREQUENCY, {true});
ASSERT_TRUE(results.ok());
//with sort params
results = coll2->search("jeans", {"name"}, "",
{"name(top_k:false, sort_by:_alpha:desc)"}, {}, {2},
10, 1, FREQUENCY, {true});
ASSERT_TRUE(results.ok());
results = coll2->search("jeans", {"name"}, "",
{"name(top_k:false, sort_by:price:desc)"}, {}, {2},
10, 1, FREQUENCY, {true});
ASSERT_TRUE(results.ok());
//with range facets
results = coll2->search("jeans", {"name"}, "",
{"price(top_k:false, economic:[0, 30], Luxury:[30, 50])"}, {}, {2},
10, 1, FREQUENCY, {true});
ASSERT_TRUE(results.ok());
results = coll2->search("jeans", {"name"}, "",
{"price(economic:[0, 30], top_k:true, Luxury:[30, 50])"}, {}, {2},
10, 1, FREQUENCY, {true});
ASSERT_TRUE(results.ok());
results = coll2->search("jeans", {"name"}, "",
{"price(economic:[0, 30], Luxury:[30, 50], top_k:true)"}, {}, {2},
10, 1, FREQUENCY, {true});
ASSERT_TRUE(results.ok());
//missing , seperator
results = coll2->search("jeans", {"name"}, "",
{"price(economic:[0, 30], Luxury:[30, 50] top_k:true)"}, {}, {2},
10, 1, FREQUENCY, {true});
ASSERT_FALSE(results.ok());
ASSERT_EQ("Invalid facet format.", results.error());
results = coll2->search("jeans", {"name"}, "",
{"name(top_k:false sort_by:_alpha:desc)"}, {}, {2},
10, 1, FREQUENCY, {true});
ASSERT_FALSE(results.ok());
ASSERT_EQ("top_k string format is invalid.", results.error());
}
TEST_F(CollectionFacetingTest, IgnoreMissingFacetByFields) {
nlohmann::json schema = R"({
"name": "test",
"enable_nested_fields": true,
"fields": [
{
"name": "count-.*",
"type": "int64",
"facet": true
}
]
})"_json;
auto collection_create_op = collectionManager.create_collection(schema);
ASSERT_TRUE(collection_create_op.ok());
auto coll = collection_create_op.get();
auto add_op = coll->add(R"({
"count-100": 123
})"_json.dump());
ASSERT_TRUE(add_op.ok());
bool validate_field_names = true;
auto res_op = coll->search("*", {}, "", {"count-200"},
{}, {2}, 10, 1,FREQUENCY, {true},
Index::DROP_TOKENS_THRESHOLD, spp::sparse_hash_set<std::string>(),
{"embedding"}, 10, "",
30, 4, "", 40,
{}, {}, {}, 0,"<mark>",
"</mark>", {}, 1000,true,
false, true, "", false,
6000*1000, 4, 7, fallback, 4,
{off}, INT16_MAX, INT16_MAX,2,
2, false, "", true,
0, max_score, 100, 0, 0,
"exhaustive", 30000, 2, "",
{},{}, "right_to_left", true,
true, false, "", "", "",
"", true, true, false, 0, true,
true, DEFAULT_FILTER_BY_CANDIDATES, false, validate_field_names);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Could not find a facet field named `count-200` in the schema.", res_op.error());
validate_field_names = false;
res_op = coll->search("*", {}, "", {"count-200"},
{}, {2}, 10, 1,FREQUENCY, {true},
Index::DROP_TOKENS_THRESHOLD, spp::sparse_hash_set<std::string>(),
{"embedding"}, 10, "",
30, 4, "", 40,
{}, {}, {}, 0,"<mark>",
"</mark>", {}, 1000,true,
false, true, "", false,
6000*1000, 4, 7, fallback, 4,
{off}, INT16_MAX, INT16_MAX,2,
2, false, "", true,
0, max_score, 100, 0, 0,
"exhaustive", 30000, 2, "",
{},{}, "right_to_left", true,
true, false, "", "", "",
"", true, true, false, 0, true,
true, DEFAULT_FILTER_BY_CANDIDATES, false, validate_field_names);
ASSERT_TRUE(res_op.ok());
auto res = res_op.get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ(0, res["facet_counts"].size());
}
| 160,407
|
C++
|
.cpp
| 2,929
| 42.68351
| 146
| 0.51808
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,714
|
collection_test.cpp
|
typesense_typesense/test/collection_test.cpp
|
#include <gtest/gtest.h>
#include <string>
#include <vector>
#include <fstream>
#include <algorithm>
#include <filesystem>
#include <cstdlib>
#include <collection_manager.h>
#include <validator.h>
#include "collection.h"
#include "embedder_manager.h"
#include "http_client.h"
class CollectionTest : public ::testing::Test {
protected:
Collection *collection;
std::vector<std::string> query_fields;
Store *store;
CollectionManager & collectionManager = CollectionManager::get_instance();
std::atomic<bool> quit = false;
std::vector<sort_by> sort_fields;
// used for generating random text
std::vector<std::string> words;
void setupCollection() {
std::string state_dir_path = "/tmp/typesense_test/collection";
LOG(INFO) << "Truncating and creating: " << state_dir_path;
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
system("mkdir -p /tmp/typesense_test/models");
store = new Store(state_dir_path);
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
std::ifstream infile(std::string(ROOT_DIR)+"test/documents.jsonl");
std::vector<field> search_fields = {
field("title", field_types::STRING, false),
field("points", field_types::INT32, false)
};
query_fields = {"title"};
sort_fields = { sort_by(sort_field_const::text_match, "DESC"), sort_by("points", "DESC") };
collection = collectionManager.get_collection("collection").get();
if(collection == nullptr) {
collection = collectionManager.create_collection("collection", 4, search_fields, "points").get();
}
std::string json_line;
// dummy record for record id 0: to make the test record IDs to match with line numbers
json_line = "{\"points\":10,\"title\":\"z\"}";
collection->add(json_line);
while (std::getline(infile, json_line)) {
collection->add(json_line);
}
infile.close();
std::ifstream words_file(std::string(ROOT_DIR)+"test/resources/common100_english.txt");
std::stringstream strstream;
strstream << words_file.rdbuf();
words_file.close();
StringUtils::split(strstream.str(), words, "\n");
}
virtual void SetUp() {
setupCollection();
}
virtual void TearDown() {
collectionManager.drop_collection("collection");
collectionManager.dispose();
delete store;
}
std::string get_text(size_t num_words) {
time_t t;
srand((unsigned) time(&t));
std::vector<std::string> strs;
for(size_t i = 0 ; i < num_words ; i++ ) {
int word_index = rand() % words.size();
strs.push_back(words[word_index]);
}
return StringUtils::join(strs, " ");
}
};
TEST_F(CollectionTest, VerifyCountOfDocuments) {
// we have 1 dummy record to match the line numbers on the fixtures file with sequence numbers
ASSERT_EQ(24+1, collection->get_num_documents());
// check default no specific dirty values option is sent for a collection that has explicit schema
std::string empty_dirty_values;
ASSERT_EQ(DIRTY_VALUES::REJECT, collection->parse_dirty_values_option(empty_dirty_values));
}
TEST_F(CollectionTest, RetrieveADocumentById) {
Option<nlohmann::json> doc_option = collection->get("1");
ASSERT_TRUE(doc_option.ok());
nlohmann::json doc = doc_option.get();
std::string id = doc["id"];
doc_option = collection->get("foo");
ASSERT_TRUE(doc_option.ok());
doc = doc_option.get();
id = doc["id"];
ASSERT_STREQ("foo", id.c_str());
doc_option = collection->get("baz");
ASSERT_FALSE(doc_option.ok());
}
TEST_F(CollectionTest, ExactSearchShouldBeStable) {
std::vector<std::string> facets;
nlohmann::json results = collection->search("the", query_fields, "", facets, sort_fields, {0}, 10,
1, FREQUENCY, {false}).get();
ASSERT_EQ(7, results["hits"].size());
ASSERT_EQ(7, results["found"].get<int>());
ASSERT_STREQ("collection", results["request_params"]["collection_name"].get<std::string>().c_str());
ASSERT_STREQ("the", results["request_params"]["q"].get<std::string>().c_str());
ASSERT_EQ(10, results["request_params"]["per_page"].get<size_t>());
// For two documents of the same score, the larger doc_id appears first
std::vector<std::string> ids = {"1", "6", "foo", "13", "10", "8", "16"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string id = ids.at(i);
std::string result_id = result["document"]["id"];
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// check ASC sorting
std::vector<sort_by> sort_fields_asc = { sort_by("points", "ASC") };
results = collection->search("the", query_fields, "", facets, sort_fields_asc, {0}, 10,
1, FREQUENCY, {false}).get();
ASSERT_EQ(7, results["hits"].size());
ASSERT_EQ(7, results["found"].get<int>());
ids = {"16", "13", "10", "8", "6", "foo", "1"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string id = ids.at(i);
std::string result_id = result["document"]["id"];
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// when a query does not return results, hits and found fields should still exist in response
results = collection->search("zxsadqewsad", query_fields, "", facets, sort_fields_asc, {0}, 10,
1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
ASSERT_EQ(0, results["found"].get<int>());
}
TEST_F(CollectionTest, MultiTokenSearch) {
std::vector<std::string> facets;
nlohmann::json results = collection->search("rocket launch", query_fields, "", facets, sort_fields, {0}, 10,
1, FREQUENCY,
{false}, 10,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10).get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(5, results["found"].get<uint32_t>());
/*
Sort by (match, diff, score)
8: score: 12, diff: 0
1: score: 15, diff: 4
17: score: 8, diff: 4
16: score: 10, diff: 5
13: score: 12, (single word match)
*/
std::vector<std::string> ids = {"8", "1", "17", "16", "13"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string id = ids.at(i);
std::string result_id = result["document"]["id"];
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
ASSERT_EQ(results["hits"][0]["highlights"].size(), (unsigned long) 1);
ASSERT_STREQ(results["hits"][0]["highlights"][0]["field"].get<std::string>().c_str(), "title");
ASSERT_STREQ(results["hits"][0]["highlights"][0]["snippet"].get<std::string>().c_str(),
"What is the power, requirement of a <mark>rocket</mark> <mark>launch</mark> these days?");
// Check ASC sort order
std::vector<sort_by> sort_fields_asc = { sort_by(sort_field_const::text_match, "DESC"), sort_by("points", "ASC") };
results = collection->search("rocket launch", query_fields, "", facets, sort_fields_asc, {0}, 10,
1, FREQUENCY,
{false}, 10,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10).get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(5, results["found"].get<uint32_t>());
ids = {"8", "17", "1", "16", "13"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string id = ids.at(i);
std::string result_id = result["document"]["id"];
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// Check pagination
results = collection->search("rocket launch", query_fields, "", facets, sort_fields, {0}, 3,
1, FREQUENCY,
{false}, 10,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10).get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ(5, results["found"].get<uint32_t>());
ASSERT_EQ(3, results["request_params"]["per_page"].get<size_t>());
ids = {"8", "1", "17"};
for(size_t i = 0; i < 3; i++) {
nlohmann::json result = results["hits"].at(i);
std::string id = ids.at(i);
std::string result_id = result["document"]["id"];
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
}
TEST_F(CollectionTest, SearchWithExcludedTokens) {
std::vector<std::string> facets;
nlohmann::json results = collection->search("how -propellants -are", query_fields, "", facets, sort_fields, {0}, 10,
1, FREQUENCY,
{false}, 10,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ(2, results["found"].get<uint32_t>());
std::vector<std::string> ids = {"9", "17"};
for (size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string id = ids.at(i);
std::string result_id = result["document"]["id"];
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
results = collection->search("-rocket", query_fields, "", facets, sort_fields, {0}, 50).get();
ASSERT_EQ(21, results["found"].get<uint32_t>());
ASSERT_EQ(21, results["hits"].size());
results = collection->search("-rocket -cryovolcanism", query_fields, "", facets, sort_fields, {0}, 50).get();
ASSERT_EQ(20, results["found"].get<uint32_t>());
}
TEST_F(CollectionTest, SkipUnindexedTokensDuringMultiTokenSearch) {
// Tokens that are not found in the index should be skipped
std::vector<std::string> facets;
nlohmann::json results = collection->search("DoesNotExist from", query_fields, "", facets, sort_fields, {0}, 10).get();
ASSERT_EQ(2, results["hits"].size());
std::vector<std::string> ids = {"2", "17"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string id = ids.at(i);
std::string result_id = result["document"]["id"];
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// with non-zero cost
results = collection->search("DoesNotExist from", query_fields, "", facets, sort_fields, {1}, 10).get();
ASSERT_EQ(2, results["hits"].size());
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string id = ids.at(i);
std::string result_id = result["document"]["id"];
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// with 2 indexed words
results = collection->search("from DoesNotExist insTruments", query_fields, "", facets, sort_fields, {1}, 10).get();
ASSERT_EQ(2, results["hits"].size());
ids = {"2", "17"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string id = ids.at(i);
std::string result_id = result["document"]["id"];
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// exhaustive search should give same results
results = collection->search("from DoesNotExist insTruments", query_fields, "", facets, sort_fields, {1}, 10,
1, FREQUENCY, {true},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 1, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000,
true, false, true, "", true).get();
ASSERT_EQ(2, results["hits"].size());
ids = {"2", "17"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string id = ids.at(i);
std::string result_id = result["document"]["id"];
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// should not try to drop tokens to expand query
results.clear();
results = collection->search("the a", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}, 10,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10).get();
ASSERT_EQ(9, results["hits"].size());
results.clear();
results = collection->search("the a", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}, 0).get();
ASSERT_EQ(3, results["hits"].size());
ids = {"8", "16", "10"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string id = ids.at(i);
std::string result_id = result["document"]["id"];
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
results.clear();
results = collection->search("the a insurance", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}, 0).get();
ASSERT_EQ(0, results["hits"].size());
// with no indexed word
results.clear();
results = collection->search("DoesNotExist1 DoesNotExist2", query_fields, "", facets, sort_fields, {0}, 10).get();
ASSERT_EQ(0, results["hits"].size());
results.clear();
results = collection->search("DoesNotExist1 DoesNotExist2", query_fields, "", facets, sort_fields, {2}, 10).get();
ASSERT_EQ(0, results["hits"].size());
}
TEST_F(CollectionTest, PartialMultiTokenSearch) {
std::vector<std::string> facets;
nlohmann::json results = collection->search("rocket research", query_fields, "", facets,
sort_fields, {0}, 10, 1, FREQUENCY, {false}, 10).get();
ASSERT_EQ(6, results["hits"].size());
std::vector<std::string> ids = {"19", "1", "10", "8", "16", "17"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
}
TEST_F(CollectionTest, QueryWithTypo) {
std::vector<std::string> facets;
nlohmann::json results = collection->search("kind biologcal", query_fields, "", facets, sort_fields, {2}, 3,
1, FREQUENCY,
{false}, 10,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10).get();
ASSERT_EQ(3, results["hits"].size());
std::vector<std::string> ids = {"19", "3", "20"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
results.clear();
results = collection->search("lauxnch rcket", query_fields, "", facets, sort_fields, {1}, 3,
1, FREQUENCY,
{false}, 10,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10).get();
ids = {"8", "1", "17"};
ASSERT_EQ(3, results["hits"].size());
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
}
TEST_F(CollectionTest, TypoTokenRankedByScoreAndFrequency) {
std::vector<std::string> facets;
nlohmann::json results = collection->search("loox", query_fields, "", facets, sort_fields, {1}, 2, 1, MAX_SCORE, {false}).get();
ASSERT_EQ(2, results["hits"].size());
std::vector<std::string> ids = {"22", "3"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
results = collection->search("loox", query_fields, "", facets, sort_fields, {1}, 3, 1, FREQUENCY, {false}).get();
ASSERT_EQ(3, results["hits"].size());
ids = {"22", "3", "12"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// Check pagination
results = collection->search("loox", query_fields, "", facets, sort_fields, {1}, 1, 1, FREQUENCY, {false}).get();
ASSERT_EQ(5, results["found"].get<int>());
ASSERT_EQ(1, results["hits"].size());
std::string solo_id = results["hits"].at(0)["document"]["id"];
ASSERT_STREQ("22", solo_id.c_str());
results = collection->search("loox", query_fields, "", facets, sort_fields, {1}, 2, 1, FREQUENCY, {false}).get();
ASSERT_EQ(5, results["found"].get<int>());
ASSERT_EQ(2, results["hits"].size());
// Check total ordering
results = collection->search("loox", query_fields, "", facets, sort_fields, {1}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(5, results["hits"].size());
ids = {"22", "3", "12", "23", "24"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
results = collection->search("loox", query_fields, "", facets, sort_fields, {1}, 10, 1, MAX_SCORE, {false}).get();
ASSERT_EQ(5, results["hits"].size());
ids = {"22", "3", "12", "23", "24"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
}
TEST_F(CollectionTest, TextContainingAnActualTypo) {
// A line contains "ISSX" but not "what" - need to ensure that correction to "ISSS what" happens
std::vector<std::string> facets;
nlohmann::json results = collection->search("ISSX what", query_fields, "", facets, sort_fields, {1}, 4, 1, FREQUENCY, {false},
20, spp::sparse_hash_set<std::string>(), spp::sparse_hash_set<std::string>(),
10, "", 30, 5, "", 20).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ(11, results["found"].get<uint32_t>());
std::vector<std::string> ids = {"19", "6", "21", "22"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// Record containing exact token match should appear first
results = collection->search("ISSX", query_fields, "", facets, sort_fields, {1}, 10, 1, FREQUENCY, {false}, 10,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10).get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(5, results["found"].get<uint32_t>());
ids = {"20", "19", "6", "3", "21"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
}
TEST_F(CollectionTest, Pagination) {
nlohmann::json results = collection->search("the", query_fields, "", {}, sort_fields, {0}, 3, 1, FREQUENCY, {false}).get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ(7, results["found"].get<uint32_t>());
std::vector<std::string> ids = {"1", "6", "foo"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
results = collection->search("the", query_fields, "", {}, sort_fields, {0}, 3, 2, FREQUENCY, {false}).get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ(7, results["found"].get<uint32_t>());
ids = {"13", "10", "8"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
results = collection->search("the", query_fields, "", {}, sort_fields, {0}, 3, 3, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(7, results["found"].get<uint32_t>());
ids = {"16"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
}
TEST_F(CollectionTest, WildcardQuery) {
nlohmann::json results = collection->search("*", query_fields, "points:>0", {}, sort_fields, {0}, 3, 1, FREQUENCY,
{false}).get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ(25, results["found"].get<uint32_t>());
// when no filter is specified, fall back on default sorting field based catch-all filter
Option<nlohmann::json> results_op = collection->search("*", query_fields, "", {}, sort_fields, {0}, 3, 1, FREQUENCY,
{false});
ASSERT_TRUE(results_op.ok());
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ(25, results["found"].get<uint32_t>());
// wildcard query with no filters and ASC sort
std::vector<sort_by> sort_fields = { sort_by("points", "ASC") };
results = collection->search("*", query_fields, "", {}, sort_fields, {0}, 3, 1, FREQUENCY, {false}).get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ(25, results["found"].get<uint32_t>());
std::vector<std::string> ids = {"21", "24", "17"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// wildcard query should not require a search field
results_op = collection->search("*", {}, "", {}, sort_fields, {0}, 3, 1, FREQUENCY, {false});
ASSERT_TRUE(results_op.ok());
results = results_op.get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ(25, results["found"].get<uint32_t>());
// non-wildcard query should require a search field
results_op = collection->search("the", {}, "", {}, sort_fields, {0}, 3, 1, FREQUENCY, {false});
ASSERT_FALSE(results_op.ok());
ASSERT_STREQ("No search fields specified for the query.", results_op.error().c_str());
Collection* empty_coll;
std::vector<field> fields = {field("title", field_types::STRING, false)};
empty_coll = collectionManager.get_collection("empty_coll").get();
if(empty_coll == nullptr) {
empty_coll = collectionManager.create_collection("empty_coll", 1, fields).get();
}
results = empty_coll->search("*", {}, "title:!= foo", {}, {}, {0}, 3, 1).get();
ASSERT_EQ(0, results["hits"].size());
ASSERT_EQ(0, results["found"]);
}
TEST_F(CollectionTest, PrefixSearching) {
std::vector<std::string> facets;
nlohmann::json results = collection->search("ex", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(2, results["hits"].size());
std::vector<std::string> ids = {"6", "12"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
results = collection->search("ex", query_fields, "", facets, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(2, results["hits"].size());
ids = {"6", "12"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
results = collection->search("what ex", query_fields, "", facets, sort_fields, {0}, 10, 1, MAX_SCORE, {true}, 10,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10).get();
ASSERT_EQ(9, results["hits"].size());
ids = {"6", "12", "19", "22", "13", "8", "15", "24", "21"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// restrict to only 2 results and differentiate between MAX_SCORE and FREQUENCY
results = collection->search("t", query_fields, "", facets, sort_fields, {0}, 2, 1, MAX_SCORE, {true}, 10,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10).get();
ASSERT_EQ(2, results["hits"].size());
ids = {"19", "22"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
results = collection->search("t", query_fields, "", facets, sort_fields, {0}, 2, 1, FREQUENCY, {true}, 10,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10).get();
ASSERT_EQ(2, results["hits"].size());
ids = {"1", "2"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// only the last token in the query should be used for prefix search - so, "math" should not match "mathematics"
results = collection->search("math fx", query_fields, "", facets, sort_fields, {0}, 1, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(0, results["hits"].size());
// single and double char prefixes should set a ceiling on the num_typos possible
results = collection->search("x", query_fields, "", facets, sort_fields, {2}, 2, 1, FREQUENCY, {true}).get();
ASSERT_EQ(0, results["hits"].size());
// prefix with a typo
results = collection->search("late propx", query_fields, "", facets, sort_fields, {2}, 1, 1, FREQUENCY, {true}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("16", results["hits"].at(0)["document"]["id"]);
}
TEST_F(CollectionTest, TypoTokensThreshold) {
// Typo correction should happen only based on the `typo_tokens_threshold` value
auto results = collection->search("redundant", {"title"}, "", {}, sort_fields, {2}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 5, 5, "", 0).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(1, results["found"].get<size_t>());
results = collection->search("redundant", {"title"}, "", {}, sort_fields, {2}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 5, 5, "", 10).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ(2, results["found"].get<size_t>());
}
TEST_F(CollectionTest, MultiOccurrenceString) {
Collection *coll_multi_string;
std::vector<field> fields = {
field("title", field_types::STRING, false),
field("points", field_types::INT32, false)
};
coll_multi_string = collectionManager.get_collection("coll_multi_string").get();
if (coll_multi_string == nullptr) {
coll_multi_string = collectionManager.create_collection("coll_multi_string", 4, fields, "points").get();
}
nlohmann::json document;
document["title"] = "The brown fox was the tallest of the lot and the quickest of the trot.";
document["points"] = 100;
coll_multi_string->add(document.dump()).get();
query_fields = {"title"};
nlohmann::json results = coll_multi_string->search("the", query_fields, "", {}, sort_fields, {0}, 10, 1,
FREQUENCY, {false}, 0).get();
ASSERT_EQ(1, results["hits"].size());
collectionManager.drop_collection("coll_multi_string");
}
TEST_F(CollectionTest, ArrayStringFieldHighlight) {
Collection *coll_array_text;
std::ifstream infile(std::string(ROOT_DIR)+"test/array_text_documents.jsonl");
std::vector<field> fields = {
field("title", field_types::STRING, false),
field("tags", field_types::STRING_ARRAY, false),
field("points", field_types::INT32, false)
};
coll_array_text = collectionManager.get_collection("coll_array_text").get();
if (coll_array_text == nullptr) {
coll_array_text = collectionManager.create_collection("coll_array_text", 4, fields, "points").get();
}
std::string json_line;
while (std::getline(infile, json_line)) {
coll_array_text->add(json_line);
}
infile.close();
query_fields = {"tags"};
std::vector<std::string> facets;
nlohmann::json results = coll_array_text->search("truth about", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, 0).get();
ASSERT_EQ(1, results["hits"].size());
std::vector<std::string> ids = {"0"};
for (size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
ASSERT_EQ(results["hits"][0]["highlights"].size(), 1);
ASSERT_STREQ(results["hits"][0]["highlights"][0]["field"].get<std::string>().c_str(), "tags");
// an array's snippets must be sorted on match score, if match score is same, priority to be given to lower indices
ASSERT_EQ(3, results["hits"][0]["highlights"][0]["snippets"].size());
ASSERT_STREQ("<mark>truth</mark> <mark>about</mark>", results["hits"][0]["highlights"][0]["snippets"][0].get<std::string>().c_str());
ASSERT_STREQ("the <mark>truth</mark>", results["hits"][0]["highlights"][0]["snippets"][1].get<std::string>().c_str());
ASSERT_STREQ("<mark>about</mark> forever", results["hits"][0]["highlights"][0]["snippets"][2].get<std::string>().c_str());
ASSERT_EQ(3, results["hits"][0]["highlights"][0]["indices"].size());
ASSERT_EQ(2, results["hits"][0]["highlights"][0]["indices"][0]);
ASSERT_EQ(0, results["hits"][0]["highlights"][0]["indices"][1]);
ASSERT_EQ(1, results["hits"][0]["highlights"][0]["indices"][2]);
results = coll_array_text->search("forever truth", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, 0).get();
ASSERT_EQ(1, results["hits"].size());
ids = {"0"};
for (size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
ASSERT_STREQ(results["hits"][0]["highlights"][0]["field"].get<std::string>().c_str(), "tags");
ASSERT_EQ(3, results["hits"][0]["highlights"][0]["snippets"].size());
ASSERT_STREQ("the <mark>truth</mark>", results["hits"][0]["highlights"][0]["snippets"][0].get<std::string>().c_str());
ASSERT_STREQ("about <mark>forever</mark>", results["hits"][0]["highlights"][0]["snippets"][1].get<std::string>().c_str());
ASSERT_STREQ("<mark>truth</mark> about", results["hits"][0]["highlights"][0]["snippets"][2].get<std::string>().c_str());
ASSERT_EQ(3, results["hits"][0]["highlights"][0]["indices"].size());
ASSERT_EQ(0, results["hits"][0]["highlights"][0]["indices"][0]);
ASSERT_EQ(1, results["hits"][0]["highlights"][0]["indices"][1]);
ASSERT_EQ(2, results["hits"][0]["highlights"][0]["indices"][2]);
results = coll_array_text->search("truth", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, 0).get();
ASSERT_EQ(2, results["hits"].size());
ids = {"1", "0"};
for (size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
results = coll_array_text->search("asdadasd", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, 0).get();
ASSERT_EQ(0, results["hits"].size());
query_fields = {"title", "tags"};
results = coll_array_text->search("truth", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, 0).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ(2, results["hits"][0]["highlights"].size());
ids = {"1", "0"};
for (size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
ASSERT_EQ(4, results["hits"][0]["highlights"][0].size());
ASSERT_STREQ(results["hits"][0]["highlights"][0]["field"].get<std::string>().c_str(), "tags");
ASSERT_EQ(2, results["hits"][0]["highlights"][0]["snippets"].size());
ASSERT_STREQ("<mark>truth</mark>", results["hits"][0]["highlights"][0]["snippets"][0].get<std::string>().c_str());
ASSERT_STREQ("plain <mark>truth</mark>", results["hits"][0]["highlights"][0]["snippets"][1].get<std::string>().c_str());
ASSERT_EQ(2, results["hits"][0]["highlights"][0]["matched_tokens"].size());
ASSERT_STREQ("truth", results["hits"][0]["highlights"][0]["matched_tokens"][0][0].get<std::string>().c_str());
ASSERT_STREQ("truth", results["hits"][0]["highlights"][0]["matched_tokens"][1][0].get<std::string>().c_str());
ASSERT_EQ(2, results["hits"][0]["highlights"][0]["indices"].size());
ASSERT_EQ(1, results["hits"][0]["highlights"][0]["indices"][0]);
ASSERT_EQ(2, results["hits"][0]["highlights"][0]["indices"][1]);
ASSERT_EQ(3, results["hits"][0]["highlights"][1].size());
ASSERT_STREQ("title", results["hits"][0]["highlights"][1]["field"].get<std::string>().c_str());
ASSERT_STREQ("Plain <mark>Truth</mark>", results["hits"][0]["highlights"][1]["snippet"].get<std::string>().c_str());
ASSERT_EQ(1, results["hits"][0]["highlights"][1]["matched_tokens"].size());
ASSERT_STREQ("Truth", results["hits"][0]["highlights"][1]["matched_tokens"][0].get<std::string>().c_str());
ASSERT_EQ(3, results["hits"][1]["highlights"][0].size());
ASSERT_STREQ("title", results["hits"][1]["highlights"][0]["field"].get<std::string>().c_str());
ASSERT_STREQ("The <mark>Truth</mark> About Forever", results["hits"][1]["highlights"][0]["snippet"].get<std::string>().c_str());
ASSERT_EQ(1, results["hits"][1]["highlights"][0]["matched_tokens"].size());
ASSERT_STREQ("Truth", results["hits"][1]["highlights"][0]["matched_tokens"][0].get<std::string>().c_str());
ASSERT_EQ(4, results["hits"][1]["highlights"][1].size());
ASSERT_STREQ(results["hits"][1]["highlights"][1]["field"].get<std::string>().c_str(), "tags");
ASSERT_EQ(2, results["hits"][1]["highlights"][1]["snippets"].size());
ASSERT_STREQ("the <mark>truth</mark>", results["hits"][1]["highlights"][1]["snippets"][0].get<std::string>().c_str());
ASSERT_STREQ("<mark>truth</mark> about", results["hits"][1]["highlights"][1]["snippets"][1].get<std::string>().c_str());
ASSERT_EQ(2, results["hits"][1]["highlights"][1]["matched_tokens"].size());
ASSERT_STREQ("truth", results["hits"][1]["highlights"][1]["matched_tokens"][0][0].get<std::string>().c_str());
ASSERT_STREQ("truth", results["hits"][1]["highlights"][1]["matched_tokens"][1][0].get<std::string>().c_str());
ASSERT_EQ(2, results["hits"][1]["highlights"][1]["indices"].size());
ASSERT_EQ(0, results["hits"][1]["highlights"][1]["indices"][0]);
ASSERT_EQ(2, results["hits"][1]["highlights"][1]["indices"][1]);
// highlight fields must be ordered based on match score
results = coll_array_text->search("amazing movie", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, 0).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(2, results["hits"][0]["highlights"].size());
ASSERT_EQ(4, results["hits"][0]["highlights"][0].size());
ASSERT_STREQ("tags", results["hits"][0]["highlights"][0]["field"].get<std::string>().c_str());
ASSERT_STREQ("<mark>amazing</mark> <mark>movie</mark>", results["hits"][0]["highlights"][0]["snippets"][0].get<std::string>().c_str());
ASSERT_EQ(1, results["hits"][0]["highlights"][0]["indices"].size());
ASSERT_EQ(0, results["hits"][0]["highlights"][0]["indices"][0]);
ASSERT_EQ(1, results["hits"][0]["highlights"][0]["matched_tokens"].size());
ASSERT_STREQ("amazing", results["hits"][0]["highlights"][0]["matched_tokens"][0][0].get<std::string>().c_str());
ASSERT_EQ(3, results["hits"][0]["highlights"][1].size());
ASSERT_STREQ(results["hits"][0]["highlights"][1]["field"].get<std::string>().c_str(), "title");
ASSERT_STREQ(results["hits"][0]["highlights"][1]["snippet"].get<std::string>().c_str(),
"<mark>Amazing</mark> Spiderman is <mark>amazing</mark>"); // should highlight duplicating tokens
ASSERT_EQ(2, results["hits"][0]["highlights"][1]["matched_tokens"].size());
ASSERT_STREQ("Amazing", results["hits"][0]["highlights"][1]["matched_tokens"][0].get<std::string>().c_str());
ASSERT_STREQ("amazing", results["hits"][0]["highlights"][1]["matched_tokens"][1].get<std::string>().c_str());
// when query tokens are not found in an array field they should be ignored
results = coll_array_text->search("winds", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY,
{false}, 0).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(1, results["hits"][0]["highlights"].size());
collectionManager.drop_collection("coll_array_text");
}
TEST_F(CollectionTest, MultipleFields) {
Collection *coll_mul_fields;
std::ifstream infile(std::string(ROOT_DIR)+"test/multi_field_documents.jsonl");
std::vector<field> fields = {
field("title", field_types::STRING, false),
field("starring", field_types::STRING, false),
field("starring_facet", field_types::STRING, true),
field("cast", field_types::STRING_ARRAY, false),
field("points", field_types::INT32, false)
};
coll_mul_fields = collectionManager.get_collection("coll_mul_fields").get();
if(coll_mul_fields == nullptr) {
coll_mul_fields = collectionManager.create_collection("coll_mul_fields", 4, fields, "points").get();
}
std::string json_line;
while (std::getline(infile, json_line)) {
coll_mul_fields->add(json_line);
}
infile.close();
query_fields = {"title", "starring"};
std::vector<std::string> facets;
nlohmann::json results = coll_mul_fields->search("Will", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(4, results["hits"].size());
std::vector<std::string> ids = {"3", "2", "1", "0"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// when "starring" takes higher priority than "title"
query_fields = {"starring", "title"};
results = coll_mul_fields->search("thomas", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {2, 1}).get();
ASSERT_EQ(4, results["hits"].size());
ids = {"15", "12", "13", "14"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
query_fields = {"starring", "title", "cast"};
results = coll_mul_fields->search("ben affleck", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
query_fields = {"cast"};
results = coll_mul_fields->search("chris", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(3, results["hits"].size());
ids = {"6", "1", "7"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
query_fields = {"cast"};
results = coll_mul_fields->search("chris pine", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ids = {"7"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// filtering on unfaceted multi-valued string field
query_fields = {"title"};
results = coll_mul_fields->search("captain", query_fields, "cast: chris", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ids = {"6"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// when a token exists in multiple fields of the same document, document and facet should be returned only once
query_fields = {"starring", "title", "cast"};
facets = {"starring_facet"};
results = coll_mul_fields->search("myers", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ids = {"17"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_STREQ("starring_facet", results["facet_counts"][0]["field_name"].get<std::string>().c_str());
size_t facet_count = results["facet_counts"][0]["counts"][0]["count"];
ASSERT_EQ(1, facet_count);
collectionManager.drop_collection("coll_mul_fields");
}
TEST_F(CollectionTest, KeywordQueryReturnsResultsBasedOnPerPageParam) {
Collection *coll_mul_fields;
std::ifstream infile(std::string(ROOT_DIR)+"test/multi_field_documents.jsonl");
std::vector<field> fields = {
field("title", field_types::STRING, false),
field("starring", field_types::STRING, false),
field("starring_facet", field_types::STRING, true),
field("cast", field_types::STRING_ARRAY, false),
field("points", field_types::INT32, false)
};
coll_mul_fields = collectionManager.get_collection("coll_mul_fields").get();
if(coll_mul_fields == nullptr) {
coll_mul_fields = collectionManager.create_collection("coll_mul_fields", 4, fields, "points").get();
}
std::string json_line;
while (std::getline(infile, json_line)) {
coll_mul_fields->add(json_line);
}
infile.close();
query_fields = {"title", "starring"};
std::vector<std::string> facets;
spp::sparse_hash_set<std::string> empty;
nlohmann::json results = coll_mul_fields->search("w", query_fields, "", facets, sort_fields, {0}, 3, 1,
FREQUENCY, {true}, 1000, empty, empty, 10).get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ(6, results["found"].get<int>());
// cannot fetch more than in-built limit of 250
auto res_op = coll_mul_fields->search("w", query_fields, "", facets, sort_fields, {0}, 251, 1,
FREQUENCY, {true}, 1000, empty, empty, 10);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ(422, res_op.code());
ASSERT_STREQ("Only upto 250 hits can be fetched per page.", res_op.error().c_str());
// when page number is zero, use the first page
results = coll_mul_fields->search("w", query_fields, "", facets, sort_fields, {0}, 3, 0,
FREQUENCY, {true}, 1000, empty, empty, 10).get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ(6, results["found"].get<int>());
// do pagination
results = coll_mul_fields->search("w", query_fields, "", facets, sort_fields, {0}, 3, 1,
FREQUENCY, {true}, 1000, empty, empty, 10).get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ(6, results["found"].get<int>());
results = coll_mul_fields->search("w", query_fields, "", facets, sort_fields, {0}, 3, 2,
FREQUENCY, {true}, 1000, empty, empty, 10).get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ(6, results["found"].get<int>());
collectionManager.drop_collection("coll_mul_fields");
}
std::vector<nlohmann::json> import_res_to_json(const std::vector<std::string>& imported_results) {
std::vector<nlohmann::json> out;
for(const auto& imported_result: imported_results) {
out.emplace_back(nlohmann::json::parse(imported_result));
}
return out;
}
TEST_F(CollectionTest, ImportDocumentsUpsert) {
Collection *coll_mul_fields;
std::ifstream infile(std::string(ROOT_DIR)+"test/multi_field_documents.jsonl");
std::stringstream strstream;
strstream << infile.rdbuf();
infile.close();
std::vector<std::string> import_records;
StringUtils::split(strstream.str(), import_records, "\n");
std::vector<field> fields = {
field("title", field_types::STRING, false),
field("starring", field_types::STRING, true),
field("cast", field_types::STRING_ARRAY, false),
field("points", field_types::INT32, false)
};
coll_mul_fields = collectionManager.get_collection("coll_mul_fields").get();
if(coll_mul_fields == nullptr) {
coll_mul_fields = collectionManager.create_collection("coll_mul_fields", 1, fields, "points").get();
}
// try importing records
nlohmann::json document;
nlohmann::json import_response = coll_mul_fields->add_many(import_records, document);
ASSERT_TRUE(import_response["success"].get<bool>());
ASSERT_EQ(18, import_response["num_imported"].get<int>());
// try searching with filter
auto results = coll_mul_fields->search("*", query_fields, "starring:= [Will Ferrell]", {"starring"}, sort_fields, {0}, 30, 1, FREQUENCY, {false}).get();
ASSERT_EQ(2, results["hits"].size());
// update existing record verbatim
std::vector<std::string> existing_records = {R"({"id": "0", "title": "Wake Up, Ron Burgundy: The Lost Movie"})"};
import_response = coll_mul_fields->add_many(existing_records, document, UPDATE);
ASSERT_TRUE(import_response["success"].get<bool>());
ASSERT_EQ(1, import_response["num_imported"].get<int>());
// update + upsert records
std::vector<std::string> more_records = {R"({"id": "0", "title": "The Fifth Harry", "starring": "Will Ferrell", "points":62, "cast":["Adam McKay","Steve Carell","Paul Rudd"]})",
R"({"id": "2", "cast": ["Chris Fisher", "Rand Alan"], "points":81, "starring":"Daniel Day-Lewis","title":"There Will Be Blood"})",
R"({"id": "18", "title": "Back Again Forest", "points": 45, "starring": "Ronald Wells", "cast": ["Dant Saren"]})",
R"({"id": "6", "points": 77, "cast":["Chris Evans","Scarlett Johansson"], "starring":"Samuel L. Jackson","title":"Captain America: The Winter Soldier"})"};
import_response = coll_mul_fields->add_many(more_records, document, UPSERT);
ASSERT_TRUE(import_response["success"].get<bool>());
ASSERT_EQ(4, import_response["num_imported"].get<int>());
std::vector<nlohmann::json> import_results = import_res_to_json(more_records);
ASSERT_EQ(4, import_results.size());
for(size_t i=0; i<4; i++) {
ASSERT_TRUE(import_results[i]["success"].get<bool>());
ASSERT_EQ(1, import_results[i].size());
}
// try with filters again
results = coll_mul_fields->search("*", query_fields, "starring:= [Will Ferrell]", {"starring"}, sort_fields, {0}, 30, 1, FREQUENCY, {false}).get();
ASSERT_EQ(2, results["hits"].size());
results = coll_mul_fields->search("*", query_fields, "", {"starring"}, sort_fields, {0}, 30, 1, FREQUENCY, {false}).get();
ASSERT_EQ(19, results["hits"].size());
ASSERT_EQ(19, coll_mul_fields->get_num_documents());
results = coll_mul_fields->search("back again forest", query_fields, "", {"starring"}, sort_fields, {0}, 30, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("Back Again Forest", coll_mul_fields->get("18").get()["title"].get<std::string>().c_str());
results = coll_mul_fields->search("fifth", query_fields, "", {"starring"}, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_STREQ("The <mark>Fifth</mark> Harry", results["hits"][0]["highlights"][0]["snippet"].get<std::string>().c_str());
ASSERT_STREQ("The Woman in the <mark>Fifth</mark> from Kristin", results["hits"][1]["highlights"][0]["snippet"].get<std::string>().c_str());
results = coll_mul_fields->search("burgundy", query_fields, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
results = coll_mul_fields->search("harry", query_fields, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
results = coll_mul_fields->search("captain america", query_fields, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(77, results["hits"][0]["document"]["points"].get<size_t>());
// upserting with some bad docs
more_records = {R"({"id": "1", "title": "Wake up, Harry", "cast":["Josh Lawson","Chris Parnell"],"points":63,"starring":"Will Ferrell"})",
R"({"id": "90", "cast": ["Kim Werrel", "Random Wake"]})", // missing fields
R"({"id": "5", "points": 60, "cast":["Logan Lerman","Alexandra Daddario"],"starring":"Ron Perlman","starring_facet":"Ron Perlman","title":"Percy Jackson: Sea of Monsters"})",
R"({"id": "24", "starring": "John", "cast": ["John Kim"], "points": 11})"}; // missing fields
bool return_id = true;
import_response = coll_mul_fields->add_many(more_records, document, UPSERT, "",
DIRTY_VALUES::COERCE_OR_REJECT, false, return_id);
ASSERT_FALSE(import_response["success"].get<bool>());
ASSERT_EQ(2, import_response["num_imported"].get<int>());
import_results = import_res_to_json(more_records);
ASSERT_FALSE(import_results[1]["success"].get<bool>());
ASSERT_FALSE(import_results[3]["success"].get<bool>());
ASSERT_STREQ("Field `points` has been declared as a default sorting field, but is not found in the document.", import_results[1]["error"].get<std::string>().c_str());
ASSERT_STREQ("Field `title` has been declared in the schema, but is not found in the document.", import_results[3]["error"].get<std::string>().c_str());
ASSERT_EQ("1", import_results[0]["id"].get<std::string>());
ASSERT_EQ("90", import_results[1]["id"].get<std::string>());
ASSERT_EQ("5", import_results[2]["id"].get<std::string>());
ASSERT_EQ("24", import_results[3]["id"].get<std::string>());
// try to duplicate records without upsert option
more_records = {R"({"id": "1", "title": "Wake up, Harry"})",
R"({"id": "5", "points": 60})"};
import_response = coll_mul_fields->add_many(more_records, document, CREATE, "",
DIRTY_VALUES::COERCE_OR_REJECT, false);
ASSERT_FALSE(import_response["success"].get<bool>());
ASSERT_EQ(0, import_response["num_imported"].get<int>());
import_results = import_res_to_json(more_records);
ASSERT_FALSE(import_results[0]["success"].get<bool>());
ASSERT_FALSE(import_results[1]["success"].get<bool>());
ASSERT_STREQ("A document with id 1 already exists.", import_results[0]["error"].get<std::string>().c_str());
ASSERT_STREQ("A document with id 5 already exists.", import_results[1]["error"].get<std::string>().c_str());
// doc should not be returned, since return_doc = false
ASSERT_FALSE(import_results[0].contains("document"));
// update document with verbatim fields, except for points
more_records = {R"({"id": "3", "cast":["Matt Damon","Ben Affleck","Minnie Driver"],
"points":70,"starring":"Robin Williams","starring_facet":"Robin Williams",
"title":"Good Will Hunting"})"};
import_response = coll_mul_fields->add_many(more_records, document, UPDATE);
ASSERT_TRUE(import_response["success"].get<bool>());
ASSERT_EQ(1, import_response["num_imported"].get<int>());
results = coll_mul_fields->search("Good Will Hunting", query_fields, "", {"starring"}, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(70, results["hits"][0]["document"]["points"].get<uint32_t>());
// updating a document that does not exist should fail, others should succeed
more_records = {R"({"id": "20", "points": 51})",
R"({"id": "1", "points": 64})"};
import_response = coll_mul_fields->add_many(more_records, document, UPDATE);
ASSERT_FALSE(import_response["success"].get<bool>());
ASSERT_EQ(1, import_response["num_imported"].get<int>());
import_results = import_res_to_json(more_records);
ASSERT_FALSE(import_results[0]["success"].get<bool>());
ASSERT_TRUE(import_results[1]["success"].get<bool>());
ASSERT_STREQ("Could not find a document with id: 20", import_results[0]["error"].get<std::string>().c_str());
ASSERT_EQ(404, import_results[0]["code"].get<size_t>());
results = coll_mul_fields->search("wake up harry", query_fields, "", {"starring"}, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(64, results["hits"][0]["document"]["points"].get<uint32_t>());
// trying to create documents with existing IDs should fail
more_records = {R"({"id": "2", "points": 51})",
R"({"id": "1", "points": 64})"};
import_response = coll_mul_fields->add_many(more_records, document, CREATE);
ASSERT_FALSE(import_response["success"].get<bool>());
ASSERT_EQ(0, import_response["num_imported"].get<int>());
import_results = import_res_to_json(more_records);
ASSERT_FALSE(import_results[0]["success"].get<bool>());
ASSERT_FALSE(import_results[1]["success"].get<bool>());
ASSERT_STREQ("A document with id 2 already exists.", import_results[0]["error"].get<std::string>().c_str());
ASSERT_STREQ("A document with id 1 already exists.", import_results[1]["error"].get<std::string>().c_str());
ASSERT_EQ(409, import_results[0]["code"].get<size_t>());
ASSERT_EQ(409, import_results[1]["code"].get<size_t>());
}
TEST_F(CollectionTest, ImportDocumentsEmplace) {
Collection* coll1;
std::vector<field> fields = {
field("title", field_types::STRING, false, false),
field("points", field_types::INT32, false, false)
};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields).get();
}
nlohmann::json document;
std::vector<std::string> records = {R"({"id": "0", "title": "The Matrix", "points":0})",
R"({"id": "1", "title": "Inception", "points":1})"};
std::vector<nlohmann::json> docs = import_res_to_json(records);
// use `emplace` mode for creating documents
auto import_response = coll1->add_many(records, document, EMPLACE, "", DIRTY_VALUES::COERCE_OR_REJECT, true, true);
ASSERT_TRUE(import_response["success"].get<bool>());
ASSERT_EQ(2, import_response["num_imported"].get<int>());
std::vector<nlohmann::json> import_results = import_res_to_json(records);
ASSERT_EQ(2, import_results.size());
for (size_t i = 0; i < 2; i++) {
ASSERT_TRUE(import_results[i]["success"].get<bool>());
ASSERT_EQ(3, import_results[i].size());
ASSERT_EQ(docs[i], import_results[i]["document"]);
ASSERT_EQ(docs[i]["id"], import_results[i]["id"]);
}
auto res = coll1->search("*", {}, "", {}, {}, {0}, 10, 1, token_ordering::FREQUENCY, {true}, 10).get();
ASSERT_EQ(2, res["found"].get<size_t>());
// emplace both update + create
records = {R"({"id": "1", "title": "The Inception"})",
R"({"id": "2", "title": "Spiderman", "points":2})"};
import_response = coll1->add_many(records, document, EMPLACE);
ASSERT_TRUE(import_response["success"].get<bool>());
ASSERT_EQ(2, import_response["num_imported"].get<int>());
import_results = import_res_to_json(records);
ASSERT_EQ(2, import_results.size());
for (size_t i = 0; i < 2; i++) {
ASSERT_TRUE(import_results[i]["success"].get<bool>());
ASSERT_EQ(1, import_results[i].size());
}
res = coll1->search("*", {}, "", {}, {}, {0}, 10, 1, token_ordering::FREQUENCY, {true}, 10).get();
ASSERT_EQ(3, res["found"].get<size_t>());
ASSERT_EQ("2", res["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ(2, res["hits"][0]["document"]["points"].get<size_t>());
ASSERT_EQ("1", res["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ(1, res["hits"][1]["document"]["points"].get<size_t>());
ASSERT_EQ("The Inception", res["hits"][1]["document"]["title"].get<std::string>());
ASSERT_EQ("0", res["hits"][2]["document"]["id"].get<std::string>());
ASSERT_EQ(0, res["hits"][2]["document"]["points"].get<size_t>());
// emplace with an error due to bad data
records = {R"({"id": "2", "points": "abcd"})",
R"({"id": "3", "title": "Superman", "points":3})"};
import_response = coll1->add_many(records, document, EMPLACE);
ASSERT_FALSE(import_response["success"].get<bool>());
ASSERT_EQ(1, import_response["num_imported"].get<int>());
import_results = import_res_to_json(records);
ASSERT_EQ(2, import_results.size());
ASSERT_FALSE(import_results[0]["success"].get<bool>());
ASSERT_TRUE(import_results[1]["success"].get<bool>());
ASSERT_EQ(1, import_results[1].size());
ASSERT_EQ(1, import_results[1].size());
// can update individual document via "emplace" with only partial field (missing points)
std::string doc_3_update = R"({"id": "3", "title": "The Superman"})";
auto add_op = coll1->add(doc_3_update, EMPLACE);
ASSERT_TRUE(add_op.ok());
res = coll1->search("superman", {"title"}, "", {}, {}, {0}, 10, 1, token_ordering::FREQUENCY, {true}, 10).get();
ASSERT_EQ(1, res["found"].get<size_t>());
ASSERT_EQ("3", res["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ(3, res["hits"][0]["document"]["points"].get<size_t>());
ASSERT_EQ("The Superman", res["hits"][0]["document"]["title"].get<std::string>());
// can create individual document via "emplace"
std::string doc_4_create = R"({"id": "4", "title": "The Avengers", "points": 4})";
add_op = coll1->add(doc_4_create, EMPLACE);
ASSERT_TRUE(add_op.ok());
res = coll1->search("*", {}, "", {}, {}, {0}, 10, 1, token_ordering::FREQUENCY, {true}, 10).get();
ASSERT_EQ(5, res["found"].get<size_t>());
}
TEST_F(CollectionTest, DISABLED_CrashTroubleshooting) {
Collection *coll1;
std::vector<field> fields = {
field("title", field_types::STRING_ARRAY, false, true),
field("points", field_types::INT32, false)
};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "points").get();
}
std::ifstream create_file("/tmp/create.jsonl");
std::string json_line;
std::vector<std::string> create_records;
while (std::getline(create_file, json_line)) {
create_records.push_back(json_line);
}
create_file.close();
nlohmann::json document;
auto import_response = coll1->add_many(create_records, document, CREATE);
ASSERT_TRUE(import_response["success"].get<bool>());
ASSERT_EQ(1000, import_response["num_imported"].get<int>());
// now try to upsert
std::ifstream upsert_file("/tmp/upsert.jsonl");
std::vector<std::string> upsert_records;
while (std::getline(upsert_file, json_line)) {
upsert_records.push_back(json_line);
}
upsert_file.close();
import_response = coll1->add_many(upsert_records, document, UPSERT);
ASSERT_TRUE(import_response["success"].get<bool>());
ASSERT_EQ(1000, import_response["num_imported"].get<int>());
}
TEST_F(CollectionTest, ImportDocumentsUpsertOptional) {
Collection *coll1;
std::vector<field> fields = {
field("title", field_types::STRING_ARRAY, false, true),
field("points", field_types::INT32, false)
};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "points").get();
}
std::vector<std::string> records;
size_t NUM_RECORDS = 1000;
for(size_t i=0; i<NUM_RECORDS; i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["points"] = i;
records.push_back(doc.dump());
}
// import records without title
nlohmann::json document;
nlohmann::json import_response = coll1->add_many(records, document, CREATE);
ASSERT_TRUE(import_response["success"].get<bool>());
ASSERT_EQ(1000, import_response["num_imported"].get<int>());
// upsert documents with title
records.clear();
for(size_t i=0; i<NUM_RECORDS; i++) {
nlohmann::json updoc;
updoc["id"] = std::to_string(i);
updoc["points"] = i;
updoc["title"] = {
get_text(10),
get_text(10),
get_text(10),
get_text(10),
};
records.push_back(updoc.dump());
}
auto begin = std::chrono::high_resolution_clock::now();
import_response = coll1->add_many(records, document, UPSERT);
auto time_micros = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::high_resolution_clock::now() - begin).count();
//LOG(INFO) << "Time taken for first upsert: " << time_micros;
ASSERT_TRUE(import_response["success"].get<bool>());
ASSERT_EQ(1000, import_response["num_imported"].get<int>());
// run upsert again with title override
records.clear();
for(size_t i=0; i<NUM_RECORDS; i++) {
nlohmann::json updoc;
updoc["id"] = std::to_string(i);
updoc["points"] = i;
updoc["title"] = {
get_text(10),
get_text(10),
get_text(10),
get_text(10),
};
records.push_back(updoc.dump());
}
begin = std::chrono::high_resolution_clock::now();
import_response = coll1->add_many(records, document, UPSERT);
time_micros = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::high_resolution_clock::now() - begin).count();
//LOG(INFO) << "Time taken for second upsert: " << time_micros;
ASSERT_TRUE(import_response["success"].get<bool>());
ASSERT_EQ(1000, import_response["num_imported"].get<int>());
// update records (can contain partial fields)
records.clear();
for(size_t i=0; i<NUM_RECORDS; i++) {
nlohmann::json updoc;
updoc["id"] = std::to_string(i);
// no points field
updoc["title"] = {
get_text(10),
get_text(10),
get_text(10),
get_text(10),
};
records.push_back(updoc.dump());
}
import_response = coll1->add_many(records, document, UPDATE);
ASSERT_TRUE(import_response["success"].get<bool>());
ASSERT_EQ(1000, import_response["num_imported"].get<int>());
}
TEST_F(CollectionTest, ImportDocuments) {
Collection *coll_mul_fields;
std::ifstream infile(std::string(ROOT_DIR)+"test/multi_field_documents.jsonl");
std::stringstream strstream;
strstream << infile.rdbuf();
infile.close();
std::vector<std::string> import_records;
StringUtils::split(strstream.str(), import_records, "\n");
std::vector<field> fields = {
field("title", field_types::STRING, false),
field("starring", field_types::STRING, false),
field("cast", field_types::STRING_ARRAY, false),
field("points", field_types::INT32, false)
};
coll_mul_fields = collectionManager.get_collection("coll_mul_fields").get();
if(coll_mul_fields == nullptr) {
coll_mul_fields = collectionManager.create_collection("coll_mul_fields", 4, fields, "points").get();
}
// try importing records
nlohmann::json document;
nlohmann::json import_response = coll_mul_fields->add_many(import_records, document);
ASSERT_TRUE(import_response["success"].get<bool>());
ASSERT_EQ(18, import_response["num_imported"].get<int>());
// now try searching for records
query_fields = {"title", "starring"};
std::vector<std::string> facets;
auto x = coll_mul_fields->search("Will", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false});
nlohmann::json results = coll_mul_fields->search("Will", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(4, results["hits"].size());
std::vector<std::string> ids = {"3", "2", "1", "0"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// verify that empty import is handled gracefully
std::vector<std::string> empty_records;
import_response = coll_mul_fields->add_many(empty_records, document);
ASSERT_TRUE(import_response["success"].get<bool>());
ASSERT_EQ(0, import_response["num_imported"].get<int>());
// verify that only bad records are rejected, rest must be imported (records 2 and 4 are bad)
std::vector<std::string> more_records = {"{\"id\": \"id1\", \"title\": \"Test1\", \"starring\": \"Rand Fish\", \"points\": 12, "
"\"cast\": [\"Tom Skerritt\"] }",
"{\"title\": 123, \"starring\": \"Jazz Gosh\", \"points\": 23, "
"\"cast\": [\"Tom Skerritt\"] }",
"{\"title\": \"Test3\", \"starring\": \"Brad Fin\", \"points\": 11, "
"\"cast\": [\"Tom Skerritt\"] }",
"{\"title\": \"Test4\", \"points\": 55, "
"\"cast\": [\"Tom Skerritt\"] }"};
import_response = coll_mul_fields->add_many(more_records, document, CREATE, "", DIRTY_VALUES::REJECT, true);
ASSERT_FALSE(import_response["success"].get<bool>());
ASSERT_EQ(2, import_response["num_imported"].get<int>());
std::vector<nlohmann::json> import_results = import_res_to_json(more_records);
ASSERT_EQ(4, import_results.size());
ASSERT_TRUE(import_results[0]["success"].get<bool>());
ASSERT_FALSE(import_results[1]["success"].get<bool>());
ASSERT_TRUE(import_results[2]["success"].get<bool>());
ASSERT_FALSE(import_results[3]["success"].get<bool>());
ASSERT_STREQ("Field `title` must be a string.", import_results[1]["error"].get<std::string>().c_str());
ASSERT_STREQ("Field `starring` has been declared in the schema, but is not found in the document.",
import_results[3]["error"].get<std::string>().c_str());
ASSERT_STREQ("{\"title\": 123, \"starring\": \"Jazz Gosh\", \"points\": 23, \"cast\": [\"Tom Skerritt\"] }",
import_results[1]["document"].get<std::string>().c_str());
// record with duplicate IDs
more_records = {"{\"id\": \"id2\", \"title\": \"Test1\", \"starring\": \"Rand Fish\", \"points\": 12, "
"\"cast\": [\"Tom Skerritt\"] }",
"{\"id\": \"id1\", \"title\": \"Test1\", \"starring\": \"Rand Fish\", \"points\": 12, "
"\"cast\": [\"Tom Skerritt\"] }"};
import_response = coll_mul_fields->add_many(more_records, document, CREATE, "", DIRTY_VALUES::COERCE_OR_REJECT, true);
ASSERT_FALSE(import_response["success"].get<bool>());
ASSERT_EQ(1, import_response["num_imported"].get<int>());
import_results = import_res_to_json(more_records);
ASSERT_EQ(2, import_results.size());
ASSERT_TRUE(import_results[0]["success"].get<bool>());
ASSERT_FALSE(import_results[1]["success"].get<bool>());
ASSERT_STREQ("A document with id id1 already exists.", import_results[1]["error"].get<std::string>().c_str());
ASSERT_STREQ("{\"id\": \"id1\", \"title\": \"Test1\", \"starring\": \"Rand Fish\", \"points\": 12, "
"\"cast\": [\"Tom Skerritt\"] }",import_results[1]["document"].get<std::string>().c_str());
// handle bad import json
// valid JSON but not a document
more_records = {"[]"};
import_response = coll_mul_fields->add_many(more_records, document, CREATE, "", DIRTY_VALUES::COERCE_OR_REJECT, true);
ASSERT_FALSE(import_response["success"].get<bool>());
ASSERT_EQ(0, import_response["num_imported"].get<int>());
import_results = import_res_to_json(more_records);
ASSERT_EQ(1, import_results.size());
ASSERT_EQ(false, import_results[0]["success"].get<bool>());
ASSERT_STREQ("Bad JSON: not a properly formed document.", import_results[0]["error"].get<std::string>().c_str());
ASSERT_STREQ("[]", import_results[0]["document"].get<std::string>().c_str());
// invalid JSON
more_records = {"{"};
import_response = coll_mul_fields->add_many(more_records, document, CREATE, "", DIRTY_VALUES::COERCE_OR_REJECT, true);
ASSERT_FALSE(import_response["success"].get<bool>());
ASSERT_EQ(0, import_response["num_imported"].get<int>());
import_results = import_res_to_json(more_records);
ASSERT_EQ(1, import_results.size());
ASSERT_EQ(false, import_results[0]["success"].get<bool>());
ASSERT_STREQ("Bad JSON: [json.exception.parse_error.101] parse error at line 1, column 2: syntax error "
"while parsing object key - unexpected end of input; expected string literal",
import_results[0]["error"].get<std::string>().c_str());
ASSERT_STREQ("{", import_results[0]["document"].get<std::string>().c_str());
collectionManager.drop_collection("coll_mul_fields");
}
TEST_F(CollectionTest, SearchingWithMissingFields) {
// return error without crashing when searching for fields that do not conform to the schema
Collection *coll_array_fields;
std::ifstream infile(std::string(ROOT_DIR)+"test/numeric_array_documents.jsonl");
std::vector<field> fields = {field("name", field_types::STRING, false),
field("age", field_types::INT32, false),
field("years", field_types::INT32_ARRAY, false),
field("timestamps", field_types::INT64_ARRAY, false),
field("tags", field_types::STRING_ARRAY, true)};
std::vector<sort_by> sort_fields = { sort_by("age", "DESC") };
coll_array_fields = collectionManager.get_collection("coll_array_fields").get();
if(coll_array_fields == nullptr) {
coll_array_fields = collectionManager.create_collection("coll_array_fields", 4, fields, "age").get();
}
std::string json_line;
while (std::getline(infile, json_line)) {
coll_array_fields->add(json_line);
}
infile.close();
// when a query field mentioned in schema does not exist
std::vector<std::string> facets;
std::vector<std::string> query_fields_not_found = {"titlez"};
Option<nlohmann::json> res_op = coll_array_fields->search("the", query_fields_not_found, "", facets, sort_fields, {0}, 10);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ(404, res_op.code());
ASSERT_STREQ("Could not find a field named `titlez` in the schema.", res_op.error().c_str());
// when a query field is an integer field
res_op = coll_array_fields->search("the", {"age"}, "", facets, sort_fields, {0}, 10);
ASSERT_EQ(400, res_op.code());
ASSERT_STREQ("Field `age` should be a string or a string array.", res_op.error().c_str());
// when a facet field is not defined in the schema
res_op = coll_array_fields->search("the", {"name"}, "", {"timestamps"}, sort_fields, {0}, 10);
ASSERT_EQ(404, res_op.code());
ASSERT_STREQ("Could not find a facet field named `timestamps` in the schema.", res_op.error().c_str());
// when a rank field is not defined in the schema
res_op = coll_array_fields->search("the", {"name"}, "", {}, { sort_by("timestamps", "ASC") }, {0}, 10);
ASSERT_EQ(404, res_op.code());
ASSERT_STREQ("Could not find a field named `timestamps` in the schema for sorting.", res_op.error().c_str());
res_op = coll_array_fields->search("the", {"name"}, "", {}, { sort_by("_rank", "ASC") }, {0}, 10);
ASSERT_EQ(404, res_op.code());
ASSERT_STREQ("Could not find a field named `_rank` in the schema for sorting.", res_op.error().c_str());
collectionManager.drop_collection("coll_array_fields");
}
TEST_F(CollectionTest, IndexingWithBadData) {
// should not crash when document to-be-indexed doesn't match schema
Collection *sample_collection;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("tags", field_types::STRING_ARRAY, true),
field("age", field_types::INT32, false),
field("average", field_types::INT32, false) };
std::vector<sort_by> sort_fields = { sort_by("age", "DESC"), sort_by("average", "DESC") };
sample_collection = collectionManager.get_collection("sample_collection").get();
if(sample_collection == nullptr) {
sample_collection = collectionManager.create_collection("sample_collection", 4, fields, "age").get();
}
const Option<nlohmann::json> & search_fields_missing_op1 = sample_collection->add("{\"name\": \"foo\", \"age\": 29, \"average\": 78}");
ASSERT_FALSE(search_fields_missing_op1.ok());
ASSERT_STREQ("Field `tags` has been declared in the schema, but is not found in the document.",
search_fields_missing_op1.error().c_str());
const Option<nlohmann::json> & search_fields_missing_op2 = sample_collection->add("{\"namez\": \"foo\", \"tags\": [], \"age\": 34, \"average\": 78}");
ASSERT_FALSE(search_fields_missing_op2.ok());
ASSERT_STREQ("Field `name` has been declared in the schema, but is not found in the document.",
search_fields_missing_op2.error().c_str());
const Option<nlohmann::json> & facet_fields_missing_op1 = sample_collection->add("{\"name\": \"foo\", \"age\": 34, \"average\": 78}");
ASSERT_FALSE(facet_fields_missing_op1.ok());
ASSERT_STREQ("Field `tags` has been declared in the schema, but is not found in the document.",
facet_fields_missing_op1.error().c_str());
const char *doc_str = "{\"name\": \"foo\", \"age\": 34, \"avg\": 78, \"tags\": [\"red\", \"blue\"]}";
const Option<nlohmann::json> & sort_fields_missing_op1 = sample_collection->add(doc_str);
ASSERT_FALSE(sort_fields_missing_op1.ok());
ASSERT_STREQ("Field `average` has been declared in the schema, but is not found in the document.",
sort_fields_missing_op1.error().c_str());
// Handle type errors
doc_str = "{\"name\": \"foo\", \"age\": 34, \"tags\": 22, \"average\": 78}";
const Option<nlohmann::json> & bad_facet_field_op = sample_collection->add(doc_str);
ASSERT_FALSE(bad_facet_field_op.ok());
ASSERT_STREQ("Field `tags` must be an array.", bad_facet_field_op.error().c_str());
doc_str = "{\"name\": \"foo\", \"age\": 34, \"tags\": [\"red\", 22], \"average\": 78}";
const Option<nlohmann::json> & bad_array_field_op = sample_collection->add(doc_str, CREATE, "",
DIRTY_VALUES::REJECT);
ASSERT_FALSE(bad_array_field_op.ok());
ASSERT_STREQ("Field `tags` must be an array of string.", bad_array_field_op.error().c_str());
// with coercion should work
doc_str = "{\"name\": \"foo\", \"age\": 34, \"tags\": [\"red\", 22], \"average\": 78}";
const Option<nlohmann::json> &bad_array_field_coercion_op = sample_collection->add(doc_str, CREATE, "",
DIRTY_VALUES::COERCE_OR_REJECT);
ASSERT_TRUE(bad_array_field_coercion_op.ok());
doc_str = "{\"name\": \"foo\", \"age\": 34, \"tags\": [], \"average\": 34}";
const Option<nlohmann::json> & empty_facet_field_op = sample_collection->add(doc_str);
ASSERT_TRUE(empty_facet_field_op.ok());
doc_str = "{\"name\": \"foo\", \"age\": [\"34\"], \"tags\": [], \"average\": 34 }";
const Option<nlohmann::json> & bad_default_sorting_field_op1 = sample_collection->add(doc_str);
ASSERT_FALSE(bad_default_sorting_field_op1.ok());
ASSERT_STREQ("Field `age` must be an int32.", bad_default_sorting_field_op1.error().c_str());
doc_str = "{\"name\": \"foo\", \"tags\": [], \"average\": 34 }";
const Option<nlohmann::json> & bad_default_sorting_field_op3 = sample_collection->add(doc_str);
ASSERT_FALSE(bad_default_sorting_field_op3.ok());
ASSERT_STREQ("Field `age` has been declared as a default sorting field, but is not found in the document.",
bad_default_sorting_field_op3.error().c_str());
doc_str = "{\"name\": \"foo\", \"age\": 34, \"tags\": [], \"average\": \"34\"}";
const Option<nlohmann::json> & bad_rank_field_op = sample_collection->add(doc_str, CREATE, "", DIRTY_VALUES::REJECT);
ASSERT_FALSE(bad_rank_field_op.ok());
ASSERT_STREQ("Field `average` must be an int32.", bad_rank_field_op.error().c_str());
doc_str = "{\"name\": \"foo\", \"age\": asdadasd, \"tags\": [], \"average\": 34 }";
const Option<nlohmann::json> & bad_default_sorting_field_op4 = sample_collection->add(doc_str);
ASSERT_FALSE(bad_default_sorting_field_op4.ok());
ASSERT_STREQ("Bad JSON: [json.exception.parse_error.101] parse error at line 1, column 24: syntax error "
"while parsing value - invalid literal; last read: '\"age\": a'",
bad_default_sorting_field_op4.error().c_str());
// should return an error when a document with pre-existing id is being added
std::string doc = "{\"id\": \"100\", \"name\": \"foo\", \"age\": 29, \"tags\": [], \"average\": 78}";
Option<nlohmann::json> add_op = sample_collection->add(doc);
ASSERT_TRUE(add_op.ok());
add_op = sample_collection->add(doc);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ(409, add_op.code());
ASSERT_STREQ("A document with id 100 already exists.", add_op.error().c_str());
collectionManager.drop_collection("sample_collection");
}
TEST_F(CollectionTest, EmptyIndexShouldNotCrash) {
Collection *empty_coll;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("tags", field_types::STRING_ARRAY, false),
field("age", field_types::INT32, false),
field("average", field_types::INT32, false)};
std::vector<sort_by> sort_fields = { sort_by("age", "DESC"), sort_by("average", "DESC") };
empty_coll = collectionManager.get_collection("empty_coll").get();
if(empty_coll == nullptr) {
empty_coll = collectionManager.create_collection("empty_coll", 4, fields, "age").get();
}
nlohmann::json results = empty_coll->search("a", {"name"}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
collectionManager.drop_collection("empty_coll");
}
TEST_F(CollectionTest, IdFieldShouldBeAString) {
Collection *coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("tags", field_types::STRING_ARRAY, false),
field("age", field_types::INT32, false),
field("average", field_types::INT32, false)};
std::vector<sort_by> sort_fields = { sort_by("age", "DESC"), sort_by("average", "DESC") };
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "age").get();
}
nlohmann::json doc;
doc["id"] = 101010;
doc["name"] = "Jane";
doc["age"] = 25;
doc["average"] = 98;
doc["tags"] = nlohmann::json::array();
doc["tags"].push_back("tag1");
Option<nlohmann::json> inserted_id_op = coll1->add(doc.dump());
ASSERT_FALSE(inserted_id_op.ok());
ASSERT_STREQ("Document's `id` field should be a string.", inserted_id_op.error().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionTest, AnIntegerCanBePassedToAFloatField) {
Collection *coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("average", field_types::FLOAT, false)};
std::vector<sort_by> sort_fields = { sort_by("average", "DESC") };
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "average").get();
}
nlohmann::json doc;
doc["id"] = "101010";
doc["name"] = "Jane";
doc["average"] = 98;
Option<nlohmann::json> inserted_id_op = coll1->add(doc.dump());
EXPECT_TRUE(inserted_id_op.ok());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionTest, DeletionOfADocument) {
collectionManager.drop_collection("collection");
std::ifstream infile(std::string(ROOT_DIR)+"test/documents.jsonl");
std::vector<field> search_fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false)};
std::vector<std::string> query_fields = {"title"};
std::vector<sort_by> sort_fields = { sort_by("points", "DESC") };
Collection *collection_for_del;
collection_for_del = collectionManager.get_collection("collection_for_del").get();
if(collection_for_del == nullptr) {
collection_for_del = collectionManager.create_collection("collection_for_del", 4, search_fields, "points").get();
}
std::string json_line;
rocksdb::Iterator* it;
size_t num_keys = 0;
// dummy record for record id 0: to make the test record IDs to match with line numbers
json_line = "{\"points\":10,\"title\":\"z\"}";
collection_for_del->add(json_line);
while (std::getline(infile, json_line)) {
collection_for_del->add(json_line);
}
ASSERT_EQ(25, collection_for_del->get_num_documents());
infile.close();
nlohmann::json results;
// asserts before removing any record
results = collection_for_del->search("cryogenic", query_fields, "", {}, sort_fields, {0}, 5, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
it = store->get_iterator();
num_keys = 0;
for (it->SeekToFirst(); it->Valid(); it->Next()) {
num_keys += 1;
}
ASSERT_EQ(25+25+3, num_keys); // 25 records, 25 id mapping, 3 meta keys
delete it;
// actually remove a record now
collection_for_del->remove("1");
results = collection_for_del->search("cryogenic", query_fields, "", {}, sort_fields, {0}, 5, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
ASSERT_EQ(0, results["found"]);
results = collection_for_del->search("archives", query_fields, "", {}, sort_fields, {0}, 5, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(1, results["found"]);
collection_for_del->remove("foo"); // custom id record
results = collection_for_del->search("martian", query_fields, "", {}, sort_fields, {0}, 5, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
ASSERT_EQ(0, results["found"]);
// delete all records
for(int id = 0; id <= 25; id++) {
collection_for_del->remove(std::to_string(id));
}
ASSERT_EQ(0, collection_for_del->get_num_documents());
it = store->get_iterator();
num_keys = 0;
for (it->SeekToFirst(); it->Valid(); it->Next()) {
num_keys += 1;
}
delete it;
ASSERT_EQ(3, num_keys);
collectionManager.drop_collection("collection_for_del");
}
TEST_F(CollectionTest, DeletionOfDocumentSingularFields) {
Collection *coll1;
std::vector<field> fields = {field("str", field_types::STRING, false),
field("int32", field_types::INT32, false),
field("int64", field_types::INT64, false),
field("float", field_types::FLOAT, false),
field("bool", field_types::BOOL, false)};
std::vector<sort_by> sort_fields = { sort_by("int32", "DESC") };
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "int32").get();
}
nlohmann::json doc;
doc["id"] = "100";
doc["str"] = "[NEW] Cell Phone Cases, Holders & Clips!";
doc["int32"] = 100032;
doc["int64"] = 1582369739000;
doc["float"] = -293.24;
doc["bool"] = true;
Option<nlohmann::json> add_op = coll1->add(doc.dump());
ASSERT_TRUE(add_op.ok());
nlohmann::json res = coll1->search("phone", {"str"}, "", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10).get();
ASSERT_EQ(1, res["found"]);
Option<std::string> rem_op = coll1->remove("100");
ASSERT_TRUE(rem_op.ok());
res = coll1->search("phone", {"str"}, "", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10).get();
ASSERT_EQ(0, res["found"].get<int32_t>());
// also assert against the actual index
const Index *index = coll1->_get_index(); // seq id will always be zero for first document
auto search_index = index->_get_search_index();
auto numerical_index = index->_get_numerical_index();
auto str_tree = search_index["str"];
auto int32_tree = numerical_index["int32"];
auto int64_tree = numerical_index["int64"];
auto float_tree = numerical_index["float"];
auto bool_tree = numerical_index["bool"];
ASSERT_EQ(0, art_size(str_tree));
ASSERT_EQ(0, int32_tree->size());
ASSERT_EQ(0, int64_tree->size());
ASSERT_EQ(0, float_tree->size());
ASSERT_EQ(0, bool_tree->size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionTest, DeletionOfDocumentArrayFields) {
Collection *coll1;
std::vector<field> fields = {field("strarray", field_types::STRING_ARRAY, false),
field("int32array", field_types::INT32_ARRAY, false),
field("int64array", field_types::INT64_ARRAY, false),
field("floatarray", field_types::FLOAT_ARRAY, false),
field("boolarray", field_types::BOOL_ARRAY, false),
field("points", field_types::INT32, false)};
std::vector<sort_by> sort_fields = { sort_by("points", "DESC") };
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "points").get();
}
nlohmann::json doc;
doc["id"] = "100";
doc["strarray"] = {"Cell Phones", "Cell Phone Accessories", "Cell Phone Cases & Clips"};
doc["int32array"] = {100, 200, 300};
doc["int64array"] = {1582369739000, 1582369739000, 1582369739000};
doc["floatarray"] = {19.99, 400.999};
doc["boolarray"] = {true, false, true};
doc["points"] = 25;
Option<nlohmann::json> add_op = coll1->add(doc.dump());
ASSERT_TRUE(add_op.ok());
nlohmann::json res = coll1->search("phone", {"strarray"}, "", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10).get();
ASSERT_EQ(1, res["found"].get<size_t>());
Option<std::string> rem_op = coll1->remove("100");
ASSERT_TRUE(rem_op.ok());
res = coll1->search("phone", {"strarray"}, "", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10).get();
ASSERT_EQ(0, res["found"].get<int32_t>());
// also assert against the actual index
const Index *index = coll1->_get_index(); // seq id will always be zero for first document
auto search_index = index->_get_search_index();
auto numerical_index = index->_get_numerical_index();
auto strarray_tree = search_index["strarray"];
auto int32array_tree = numerical_index["int32array"];
auto int64array_tree = numerical_index["int64array"];
auto floatarray_tree = numerical_index["floatarray"];
auto boolarray_tree = numerical_index["boolarray"];
ASSERT_EQ(0, art_size(strarray_tree));
ASSERT_EQ(0, int32array_tree->size());
ASSERT_EQ(0, int64array_tree->size());
ASSERT_EQ(0, floatarray_tree->size());
ASSERT_EQ(0, boolarray_tree->size());
collectionManager.drop_collection("coll1");
}
nlohmann::json get_prune_doc() {
nlohmann::json document;
document["one"] = 1;
document["two"] = 2;
document["three"] = 3;
document["four"] = 4;
return document;
}
TEST_F(CollectionTest, SearchLargeTextField) {
Collection *coll_large_text;
std::vector<field> fields = {field("text", field_types::STRING, false),
field("age", field_types::INT32, false),
};
std::vector<sort_by> sort_fields = { sort_by(sort_field_const::text_match, "DESC"), sort_by("age", "DESC") };
coll_large_text = collectionManager.get_collection("coll_large_text").get();
if(coll_large_text == nullptr) {
coll_large_text = collectionManager.create_collection("coll_large_text", 4, fields, "age").get();
}
std::string json_line;
std::ifstream infile(std::string(ROOT_DIR)+"test/large_text_field.jsonl");
while (std::getline(infile, json_line)) {
coll_large_text->add(json_line);
}
infile.close();
Option<nlohmann::json> res_op = coll_large_text->search("eguilazer", {"text"}, "", {}, sort_fields, {0}, 10);
ASSERT_TRUE(res_op.ok());
nlohmann::json results = res_op.get();
ASSERT_EQ(1, results["hits"].size());
res_op = coll_large_text->search("tristique", {"text"}, "", {}, sort_fields, {0}, 10);
ASSERT_TRUE(res_op.ok());
results = res_op.get();
ASSERT_EQ(2, results["hits"].size());
// query whose length exceeds maximum highlight window (match score's WINDOW_SIZE)
res_op = coll_large_text->search(
"Phasellus non tristique elit Praesent non arcu id lectus accumsan venenatis at",
{"text"}, "", {}, sort_fields, {0}, 10
);
ASSERT_TRUE(res_op.ok());
results = res_op.get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("1", results["hits"][0]["document"]["id"].get<std::string>().c_str());
// only single matched token in match window
res_op = coll_large_text->search("molestie maecenas accumsan", {"text"}, "", {}, sort_fields, {0}, 10);
ASSERT_TRUE(res_op.ok());
results = res_op.get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("non arcu id lectus <mark>accumsan</mark> venenatis at at justo.",
results["hits"][0]["highlights"][0]["snippet"].get<std::string>().c_str());
collectionManager.drop_collection("coll_large_text");
}
TEST_F(CollectionTest, PruneFieldsFromDocument) {
nlohmann::json document = get_prune_doc();
Collection::prune_doc(document, {"one", "two"}, tsl::htrie_set<char>());
ASSERT_EQ(2, document.size());
ASSERT_EQ(1, document["one"]);
ASSERT_EQ(2, document["two"]);
// exclude takes precedence
document = get_prune_doc();
Collection::prune_doc(document, {"one"}, {"one"});
ASSERT_EQ(0, document.size());
// when no inclusion is specified, should return all fields not mentioned by exclusion list
document = get_prune_doc();
Collection::prune_doc(document, tsl::htrie_set<char>(), tsl::htrie_set<char>({"three"}), "");
ASSERT_EQ(3, document.size());
ASSERT_EQ(1, document["one"]);
ASSERT_EQ(2, document["two"]);
ASSERT_EQ(4, document["four"]);
document = get_prune_doc();
Collection::prune_doc(document, tsl::htrie_set<char>(), tsl::htrie_set<char>(), "");
ASSERT_EQ(4, document.size());
// when included field does not exist
document = get_prune_doc();
Collection::prune_doc(document, {"notfound"}, tsl::htrie_set<char>(), "");
ASSERT_EQ(0, document.size());
// when excluded field does not exist
document = get_prune_doc();
Collection::prune_doc(document, tsl::htrie_set<char>(), {"notfound"}, "");
ASSERT_EQ(4, document.size());
// included set is prefix of allowed fields
document = get_prune_doc();
Collection::prune_doc(document, {"ones"}, tsl::htrie_set<char>(), "");
ASSERT_EQ(0, document.size());
}
TEST_F(CollectionTest, StringArrayFieldShouldNotAllowPlainString) {
Collection *coll1;
std::vector<field> fields = {field("categories", field_types::STRING_ARRAY, true),
field("points", field_types::INT32, false)};
std::vector<sort_by> sort_fields = {sort_by("points", "DESC")};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "points").get();
}
nlohmann::json doc;
doc["id"] = "100";
doc["categories"] = "Should not be allowed!";
doc["points"] = 25;
auto add_op = coll1->add(doc.dump());
ASSERT_FALSE(add_op.ok());
ASSERT_STREQ("Field `categories` must be an array.", add_op.error().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionTest, SearchHighlightShouldFollowThreshold) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, true),
field("points", field_types::INT32, false)};
std::vector<sort_by> sort_fields = {sort_by("points", "DESC")};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "points").get();
}
nlohmann::json doc;
doc["id"] = "100";
doc["title"] = "The quick brown fox jumped over the lazy dog and ran straight to the forest to sleep.";
doc["points"] = 25;
auto add_op = coll1->add(doc.dump());
ASSERT_TRUE(add_op.ok());
// first with a large threshold
auto res = coll1->search("lazy", {"title"}, "", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "").get();
ASSERT_STREQ("The quick brown fox jumped over the <mark>lazy</mark> dog and ran straight to the forest to sleep.",
res["hits"][0]["highlights"][0]["snippet"].get<std::string>().c_str());
// now with with a small threshold (will show only 4 words either side of the matched token)
res = coll1->search("lazy", {"title"}, "", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 5).get();
ASSERT_STREQ("fox jumped over the <mark>lazy</mark> dog and ran straight",
res["hits"][0]["highlights"][0]["snippet"].get<std::string>().c_str());
// specify the number of surrounding tokens to return
size_t highlight_affix_num_tokens = 2;
res = coll1->search("lazy", {"title"}, "", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 5, highlight_affix_num_tokens).get();
ASSERT_STREQ("over the <mark>lazy</mark> dog and",
res["hits"][0]["highlights"][0]["snippet"].get<std::string>().c_str());
highlight_affix_num_tokens = 0;
res = coll1->search("lazy", {"title"}, "", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 5, highlight_affix_num_tokens).get();
ASSERT_STREQ("<mark>lazy</mark>",
res["hits"][0]["highlights"][0]["snippet"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionTest, SearchHighlightShouldUseHighlightTags) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, true),
field("points", field_types::INT32, false)};
std::vector<sort_by> sort_fields = {sort_by("points", "DESC")};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "points").get();
}
nlohmann::json doc;
doc["id"] = "100";
doc["title"] = "The quick brown fox jumped over the lazy fox. "; // adding some extra spaces
doc["points"] = 25;
auto add_op = coll1->add(doc.dump());
ASSERT_TRUE(add_op.ok());
// use non-default highlighting tags
auto res = coll1->search("lazy", {"title"}, "", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<em class=\"h\">", "</em>").get();
ASSERT_STREQ("The quick brown fox jumped over the <em class=\"h\">lazy</em> fox. ",
res["hits"][0]["highlights"][0]["snippet"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionTest, SearchHighlightWithNewLine) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, true),
field("points", field_types::INT32, false)};
std::vector<sort_by> sort_fields = {sort_by("points", "DESC")};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "points").get();
}
nlohmann::json doc;
doc["id"] = "100";
doc["title"] = "Blah, blah\nStark Industries";
doc["points"] = 25;
auto add_op = coll1->add(doc.dump());
ASSERT_TRUE(add_op.ok());
auto res = coll1->search("stark", {"title"}, "", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0).get();
ASSERT_STREQ("Blah, blah\n<mark>Stark</mark> Industries",
res["hits"][0]["highlights"][0]["snippet"].get<std::string>().c_str());
ASSERT_STREQ("Stark", res["hits"][0]["highlights"][0]["matched_tokens"][0].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionTest, UpdateDocument) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, true),
field("tags", field_types::STRING_ARRAY, true, true),
field("points", field_types::INT32, false)};
std::vector<sort_by> sort_fields = {sort_by("points", "DESC")};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
nlohmann::json doc;
doc["id"] = "100";
doc["title"] = "The quick brown fox jumped over the lazy dog and ran straight to the forest to sleep.";
doc["tags"] = {"NEWS", "LAZY"};
doc["points"] = 25;
auto add_op = coll1->add(doc.dump());
ASSERT_TRUE(add_op.ok());
auto res = coll1->search("lazy", {"title"}, "", {"tags"}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 5, 5, "title").get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_STREQ("The quick brown fox jumped over the lazy dog and ran straight to the forest to sleep.",
res["hits"][0]["document"]["title"].get<std::string>().c_str());
// reindex the document entirely again verbatim and try querying
add_op = coll1->add(doc.dump(), UPSERT);
ASSERT_TRUE(add_op.ok());
ASSERT_EQ(1, coll1->get_num_documents());
res = coll1->search("lazy", {"title"}, "", {"tags"}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 5, 5, "title").get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ(1, res["facet_counts"].size());
ASSERT_STREQ("tags", res["facet_counts"][0]["field_name"].get<std::string>().c_str());
ASSERT_EQ(2, res["facet_counts"][0]["counts"].size());
ASSERT_STREQ("NEWS", res["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_EQ(1, (int) res["facet_counts"][0]["counts"][0]["count"]);
ASSERT_STREQ("LAZY", res["facet_counts"][0]["counts"][1]["value"].get<std::string>().c_str());
ASSERT_EQ(1, (int) res["facet_counts"][0]["counts"][1]["count"]);
// upsert only part of the document -- document should be REPLACED
nlohmann::json partial_doc = doc;
partial_doc.erase("tags");
add_op = coll1->add(partial_doc.dump(), UPSERT);
ASSERT_TRUE(add_op.ok());
res = coll1->search("lazy", {"title"}, "", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 5, 5, "title").get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_FALSE(res["hits"][0].contains("tags"));
// upserting without a mandatory field should be an error
partial_doc = doc;
partial_doc.erase("title");
add_op = coll1->add(partial_doc.dump(), UPSERT);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `title` has been declared in the schema, but is not found in the document.", add_op.error());
// try changing the title and searching for an older token
doc["title"] = "The quick brown fox.";
add_op = coll1->add(doc.dump(), UPSERT);
ASSERT_TRUE(add_op.ok());
ASSERT_EQ(1, coll1->get_num_documents());
res = coll1->search("lazy", {"title"}, "", {"tags"}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 5, 5, "title").get();
ASSERT_EQ(0, res["hits"].size());
res = coll1->search("quick", {"title"}, "", {"title"}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 5, 5, "title").get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_STREQ("The quick brown fox.", res["hits"][0]["document"]["title"].get<std::string>().c_str());
// try to update document tags without `id`
nlohmann::json doc2;
doc2["tags"] = {"SENTENCE"};
add_op = coll1->add(doc2.dump(), UPDATE);
ASSERT_FALSE(add_op.ok());
ASSERT_STREQ("For update, the `id` key must be provided.", add_op.error().c_str());
// now change tags with id
doc2["id"] = "100";
add_op = coll1->add(doc2.dump(), UPDATE);
ASSERT_TRUE(add_op.ok());
// check for old tag
res = coll1->search("NEWS", {"tags"}, "", {"tags"}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 5, 5, "title").get();
ASSERT_EQ(0, res["hits"].size());
// now check for new tag and also try faceting on that field
res = coll1->search("SENTENCE", {"tags"}, "", {"tags"}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 5, 5, "title").get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_STREQ("SENTENCE", res["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
// try changing points
nlohmann::json doc3;
doc3["points"] = 99;
doc3["id"] = "100";
add_op = coll1->add(doc3.dump(), UPDATE);
ASSERT_TRUE(add_op.ok());
res = coll1->search("*", {"tags"}, "points: > 90", {"tags"}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 5, 5, "title").get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ(99, res["hits"][0]["document"]["points"].get<size_t>());
// id can be passed by param
nlohmann::json doc4;
doc4["points"] = 105;
add_op = coll1->add(doc4.dump(), UPDATE, "100");
ASSERT_TRUE(add_op.ok());
res = coll1->search("*", {"tags"}, "points: > 101", {"tags"}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 5, 5, "title").get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ(105, res["hits"][0]["document"]["points"].get<size_t>());
// try to change a field with bad value and verify that old document is put back
doc4["points"] = "abc";
add_op = coll1->add(doc4.dump(), UPDATE, "100");
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `points` must be an int32.", add_op.error());
res = coll1->search("*", {"tags"}, "points: > 101", {"tags"}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 5, 5, "title").get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ(105, res["hits"][0]["document"]["points"].get<size_t>());
// when explicit path id does not match doc id, error should be returned
nlohmann::json doc5;
doc5["id"] = "800";
doc5["title"] = "The Secret Seven";
doc5["points"] = 250;
doc5["tags"] = {"BOOK", "ENID BLYTON"};
add_op = coll1->add(doc5.dump(), UPSERT, "799");
ASSERT_FALSE(add_op.ok());
ASSERT_EQ(400, add_op.code());
ASSERT_STREQ("The `id` of the resource does not match the `id` in the JSON body.", add_op.error().c_str());
// passing an empty id should not succeed
nlohmann::json doc6;
doc6["id"] = "";
doc6["title"] = "The Secret Seven";
doc6["points"] = 250;
doc6["tags"] = {"BOOK", "ENID BLYTON"};
add_op = coll1->add(doc6.dump(), UPDATE);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ(400, add_op.code());
ASSERT_STREQ("The `id` should not be empty.", add_op.error().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionTest, UpdateDocuments) {
nlohmann::json schema = R"({
"name": "update_docs_collection",
"enable_nested_fields": true,
"fields": [
{"name": "user_name", "type": "string", "facet": true},
{"name": "likes", "type": "int32"},
{"name": "content", "type": "object"}
],
"default_sorting_field": "likes"
})"_json;
Collection *update_docs_collection = collectionManager.get_collection("update_docs_collection").get();
if (update_docs_collection == nullptr) {
auto op = CollectionManager::create_collection(schema);
ASSERT_TRUE(op.ok());
update_docs_collection = op.get();
}
std::vector<std::string> json_lines = {
R"({"user_name": "fat_cat","likes": 5215,"content": {"title": "cat data 1", "body": "cd1"}})",
R"({"user_name": "fast_dog","likes": 273,"content": {"title": "dog data 1", "body": "dd1"}})",
R"({"user_name": "fat_cat","likes": 2133,"content": {"title": "cat data 2", "body": "cd2"}})",
R"({"user_name": "fast_dog","likes": 9754,"content": {"title": "dog data 2", "body": "dd2"}})",
R"({"user_name": "fast_dog","likes": 576,"content": {"title": "dog data 3", "body": "dd3"}})"
};
for (auto const& json: json_lines){
auto add_op = update_docs_collection->add(json);
if (!add_op.ok()) {
std::cout << add_op.error() << std::endl;
}
ASSERT_TRUE(add_op.ok());
}
std::vector<sort_by> sort_fields = { sort_by("likes", "DESC") };
auto res = update_docs_collection->search("cat data", {"content"}, "", {}, sort_fields, {0}, 10).get();
ASSERT_EQ(2, res["hits"].size());
for (size_t i = 0; i < res["hits"].size(); i++) {
ASSERT_EQ("fat_cat", res["hits"][i]["document"]["user_name"].get<std::string>());
}
nlohmann::json document;
document["user_name"] = "slim_cat";
std::string dirty_values;
auto update_op = update_docs_collection->update_matching_filter("user_name:=fat_cat", document.dump(), dirty_values);
ASSERT_TRUE(update_op.ok());
ASSERT_EQ(2, update_op.get()["num_updated"]);
res = update_docs_collection->search("cat data", {"content"}, "", {}, sort_fields, {0}, 10).get();
ASSERT_EQ(2, res["hits"].size());
for (size_t i = 0; i < res["hits"].size(); i++) {
ASSERT_EQ("slim_cat", res["hits"][i]["document"]["user_name"].get<std::string>());
}
// Test batching
res = update_docs_collection->search("dog data", {"content"}, "", {}, sort_fields, {0}, 10).get();
ASSERT_EQ(3, res["hits"].size());
for (size_t i = 0; i < res["hits"].size(); i++) {
ASSERT_EQ("fast_dog", res["hits"][i]["document"]["user_name"].get<std::string>());
}
document["user_name"] = "lazy_dog";
update_op = update_docs_collection->update_matching_filter("user_name:=fast_dog", document.dump(), dirty_values, 2);
ASSERT_TRUE(update_op.ok());
ASSERT_EQ(3, update_op.get()["num_updated"]);
res = update_docs_collection->search("dog data", {"content"}, "", {}, sort_fields, {0}, 10).get();
ASSERT_EQ(3, res["hits"].size());
for (size_t i = 0; i < res["hits"].size(); i++) {
ASSERT_EQ("lazy_dog", res["hits"][i]["document"]["user_name"].get<std::string>());
}
// Test nested fields updation
res = update_docs_collection->search("*", {}, "user_name:=slim_cat", {}, sort_fields, {0}, 10).get();
ASSERT_EQ(2, res["hits"].size());
for (size_t i = 0; i < res["hits"].size(); i++) {
ASSERT_EQ("cat data " + std::to_string(i + 1), res["hits"][i]["document"]["content"]["title"].get<std::string>());
}
document.clear();
document["content"]["title"] = "fancy cat title";
update_op = update_docs_collection->update_matching_filter("user_name:=slim_cat", document.dump(), dirty_values, 2);
ASSERT_TRUE(update_op.ok());
ASSERT_EQ(2, update_op.get()["num_updated"]);
res = update_docs_collection->search("*", {}, "user_name:=slim_cat", {}, sort_fields, {0}, 10).get();
ASSERT_EQ(2, res["hits"].size());
for (size_t i = 0; i < res["hits"].size(); i++) {
ASSERT_EQ("fancy cat title", res["hits"][i]["document"]["content"]["title"].get<std::string>());
}
// Test all document updation
res = update_docs_collection->search("*", {}, "", {}, sort_fields, {0}, 10).get();
ASSERT_EQ(5, res["hits"].size());
for (size_t i = 0; i < res["hits"].size(); i++) {
ASSERT_NE(0, res["hits"][i]["document"]["likes"].get<int>());
}
document.clear();
document["likes"] = 0;
update_op = update_docs_collection->update_matching_filter("*", document.dump(), dirty_values, 2);
ASSERT_TRUE(update_op.ok());
ASSERT_EQ(5, update_op.get()["num_updated"]);
res = update_docs_collection->search("*", {}, "", {}, sort_fields, {0}, 10).get();
ASSERT_EQ(5, res["hits"].size());
for (size_t i = 0; i < res["hits"].size(); i++) {
ASSERT_EQ(0, res["hits"][i]["document"]["likes"].get<int>());
}
collectionManager.drop_collection("update_docs_collection");
}
TEST_F(CollectionTest, UpdateDocumentSorting) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, true),
field("tags", field_types::STRING_ARRAY, true),
field("points", field_types::INT32, false)};
std::vector<sort_by> sort_fields = {sort_by("points", "DESC")};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
nlohmann::json doc1;
doc1["id"] = "100";
doc1["title"] = "The quick brown fox jumped over the lazy dog and ran straight to the forest to sleep.";
doc1["tags"] = {"NEWS", "LAZY"};
doc1["points"] = 100;
nlohmann::json doc2;
doc2["id"] = "101";
doc2["title"] = "The random sentence.";
doc2["tags"] = {"RANDOM"};
doc2["points"] = 101;
auto add_op = coll1->add(doc1.dump());
coll1->add(doc2.dump());
auto res = coll1->search("*", {"tags"}, "", {"tags"}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 5, 5, "title").get();
ASSERT_EQ(2, res["hits"].size());
ASSERT_EQ(101, res["hits"][0]["document"]["points"].get<size_t>());
ASSERT_STREQ("101", res["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(100, res["hits"][1]["document"]["points"].get<size_t>());
ASSERT_STREQ("100", res["hits"][1]["document"]["id"].get<std::string>().c_str());
// now update doc1 points from 100 -> 1000 and it should bubble up
doc1["points"] = 1000;
coll1->add(doc1.dump(), UPDATE);
res = coll1->search("*", {"tags"}, "", {"tags"}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 5, 5, "title").get();
ASSERT_EQ(2, res["hits"].size());
ASSERT_EQ(1000, res["hits"][0]["document"]["points"].get<size_t>());
ASSERT_STREQ("100", res["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(101, res["hits"][1]["document"]["points"].get<size_t>());
ASSERT_STREQ("101", res["hits"][1]["document"]["id"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionTest, UpdateDocumentUnIndexedField) {
Collection* coll1;
std::vector<field> fields = {field("title", field_types::STRING, true),
field("points", field_types::INT32, false)};
std::vector<sort_by> sort_fields = {sort_by("points", "DESC")};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
nlohmann::json doc;
doc["id"] = "100";
doc["title"] = "The quick brown fox jumped over the lazy dog and ran straight to the forest to sleep.";
doc["foo"] = "foo1";
doc["points"] = 25;
auto add_op = coll1->add(doc.dump());
ASSERT_TRUE(add_op.ok());
auto res = coll1->search("lazy", {"title"}, "", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 5, 5, "title").get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_STREQ("The quick brown fox jumped over the lazy dog and ran straight to the forest to sleep.",
res["hits"][0]["document"]["title"].get<std::string>().c_str());
// reindex the document again by changing only the unindexed field
doc["foo"] = "foo2";
add_op = coll1->add(doc.dump(), UPSERT);
ASSERT_TRUE(add_op.ok());
res = coll1->search("lazy", {"title"}, "", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 5, 5, "title").get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_STREQ("foo2", res["hits"][0]["document"]["foo"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionTest, SearchHighlightFieldFully) {
Collection *coll1;
std::vector<field> fields = { field("title", field_types::STRING, true),
field("tags", field_types::STRING_ARRAY, true),
field("points", field_types::INT32, false)};
std::vector<sort_by> sort_fields = {sort_by("points", "DESC")};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "points").get();
}
nlohmann::json doc;
doc["id"] = "100";
doc["title"] = "The quick brown fox jumped over the lazy dog and ran straight to the forest to sleep.";
doc["tags"] = {"NEWS", "LAZY"};
doc["points"] = 25;
auto add_op = coll1->add(doc.dump());
ASSERT_TRUE(add_op.ok());
// look for fully highlighted value in response
auto res = coll1->search("lazy", {"title"}, "", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 5, 5, "title").get();
ASSERT_EQ(1, res["hits"][0]["highlights"].size());
ASSERT_STREQ("The quick brown fox jumped over the <mark>lazy</mark> dog and ran straight to the forest to sleep.",
res["hits"][0]["highlights"][0]["value"].get<std::string>().c_str());
// should not return value key when highlight_full_fields is not specified
res = coll1->search("lazy", {"title"}, "", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 5, 5, "").get();
ASSERT_EQ(3, res["hits"][0]["highlights"][0].size());
// query multiple fields
res = coll1->search("lazy", {"title", "tags"}, "", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 5, 5, "title, tags").get();
ASSERT_EQ(2, res["hits"][0]["highlights"].size());
ASSERT_EQ("tags", res["hits"][0]["highlights"][0]["field"]);
ASSERT_EQ(1, res["hits"][0]["highlights"][0]["values"].size());
ASSERT_EQ("<mark>LAZY</mark>", res["hits"][0]["highlights"][0]["values"][0].get<std::string>());
ASSERT_EQ(1, res["hits"][0]["highlights"][0]["snippets"].size());
ASSERT_EQ("<mark>LAZY</mark>", res["hits"][0]["highlights"][0]["snippets"][0].get<std::string>());
ASSERT_EQ("The quick brown fox jumped over the <mark>lazy</mark> dog and ran straight to the forest to sleep.",
res["hits"][0]["highlights"][1]["value"].get<std::string>());
ASSERT_EQ("title", res["hits"][0]["highlights"][1]["field"]);
ASSERT_EQ(1, res["hits"][0]["highlights"][1]["matched_tokens"].size());
ASSERT_STREQ("lazy", res["hits"][0]["highlights"][1]["matched_tokens"][0].get<std::string>().c_str());
// excluded fields should not be returned in highlights section
spp::sparse_hash_set<std::string> excluded_fields = {"tags"};
res = coll1->search("lazy", {"title", "tags"}, "", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
excluded_fields, 10, "", 5, 5, "title, tags").get();
ASSERT_EQ(1, res["hits"][0]["highlights"].size());
ASSERT_STREQ("The quick brown fox jumped over the <mark>lazy</mark> dog and ran straight to the forest to sleep.",
res["hits"][0]["highlights"][0]["value"].get<std::string>().c_str());
// when all fields are excluded
excluded_fields = {"tags", "title"};
res = coll1->search("lazy", {"title", "tags"}, "", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
excluded_fields, 10, "", 5, 5, "title, tags").get();
ASSERT_EQ(0, res["hits"][0]["highlights"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionTest, OptionalFields) {
Collection *coll1;
std::vector<field> fields = {
field("title", field_types::STRING, false),
field("description", field_types::STRING, true, true),
field("max", field_types::INT32, false),
field("scores", field_types::INT64_ARRAY, false, true),
field("average", field_types::FLOAT, false, true),
field("is_valid", field_types::BOOL, false, true),
};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "max").get();
}
std::ifstream infile(std::string(ROOT_DIR)+"test/optional_fields.jsonl");
std::string json_line;
while (std::getline(infile, json_line)) {
auto add_op = coll1->add(json_line);
if(!add_op.ok()) {
std::cout << add_op.error() << std::endl;
}
ASSERT_TRUE(add_op.ok());
}
infile.close();
// first must be able to fetch all records (i.e. all must have been indexed)
auto res = coll1->search("*", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(6, res["found"].get<size_t>());
// search on optional `description` field
res = coll1->search("book", {"description"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(5, res["found"].get<size_t>());
// filter on optional `average` field
res = coll1->search("the", {"title"}, "average: >0", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(5, res["found"].get<size_t>());
// facet on optional `description` field
res = coll1->search("the", {"title"}, "", {"description"}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(6, res["found"].get<size_t>());
ASSERT_EQ(5, res["facet_counts"][0]["counts"][0]["count"].get<size_t>());
ASSERT_STREQ("description", res["facet_counts"][0]["field_name"].get<std::string>().c_str());
// sort_by optional `average` field should be allowed (default used for missing values)
std::vector<sort_by> sort_fields = { sort_by("average", "DESC") };
auto res_op = coll1->search("*", {"title"}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {false});
ASSERT_TRUE(res_op.ok());
res = res_op.get();
ASSERT_EQ(6, res["found"].get<size_t>());
ASSERT_EQ(0, res["hits"][5]["document"].count("average")); // record with missing average is last
// try deleting a record having optional field
Option<std::string> remove_op = coll1->remove("1");
ASSERT_TRUE(remove_op.ok());
// try fetching the schema (should contain optional field)
nlohmann::json coll_summary = coll1->get_summary_json();
ASSERT_STREQ("title", coll_summary["fields"][0]["name"].get<std::string>().c_str());
ASSERT_STREQ("string", coll_summary["fields"][0]["type"].get<std::string>().c_str());
ASSERT_FALSE(coll_summary["fields"][0]["facet"].get<bool>());
ASSERT_FALSE(coll_summary["fields"][0]["optional"].get<bool>());
ASSERT_STREQ("description", coll_summary["fields"][1]["name"].get<std::string>().c_str());
ASSERT_STREQ("string", coll_summary["fields"][1]["type"].get<std::string>().c_str());
ASSERT_TRUE(coll_summary["fields"][1]["facet"].get<bool>());
ASSERT_TRUE(coll_summary["fields"][1]["optional"].get<bool>());
// default sorting field should not be declared optional
fields = {
field("title", field_types::STRING, false),
field("score", field_types::INT32, false, true),
};
auto create_op = collectionManager.create_collection("coll2", 4, fields, "score");
ASSERT_FALSE(create_op.ok());
ASSERT_STREQ("Default sorting field `score` cannot be an optional field.", create_op.error().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionTest, OptionalFieldCanBeNull) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("artist", field_types::STRING, false, true),
field("genres", field_types::STRING_ARRAY, false, true),
field("launch_year", field_types::INT32, false, true),
field("updated_at", field_types::INT64, false, true),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "points").get();
}
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "Beat it";
doc["artist"] = nullptr;
doc["genres"] = nullptr;
doc["launch_year"] = nullptr;
doc["updated_at"] = nullptr;
doc["points"] = 100;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
ASSERT_EQ(2, coll1->_get_index()->_get_search_index().at("title")->size);
ASSERT_EQ(0, coll1->_get_index()->_get_search_index().at("artist")->size);
ASSERT_EQ(0, coll1->_get_index()->_get_search_index().at("genres")->size);
auto results = coll1->search("beat",
{"title", "artist"}, "", {}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionTest, EmptyStringNotIndexed) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("artist", field_types::STRING, false, true),
field("genres", field_types::STRING_ARRAY, false, true),
field("launch_year", field_types::STRING, false, true),
field("labels", field_types::STRING_ARRAY, false, true),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "points").get();
}
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "Beat it";
doc["artist"] = "";
doc["launch_year"] = " ";
doc["genres"] = {""};
doc["labels"] = {"song", " ", ""};
doc["points"] = 100;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("beat",
{"title", "artist"}, "", {}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(2, coll1->_get_index()->_get_search_index().at("title")->size);
ASSERT_EQ(0, coll1->_get_index()->_get_search_index().at("artist")->size);
ASSERT_EQ(0, coll1->_get_index()->_get_search_index().at("launch_year")->size);
ASSERT_EQ(0, coll1->_get_index()->_get_search_index().at("genres")->size);
ASSERT_EQ(1, coll1->_get_index()->_get_search_index().at("labels")->size);
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionTest, WildcardQueryReturnsResultsBasedOnPerPageParam) {
std::vector<std::string> facets;
spp::sparse_hash_set<std::string> empty;
nlohmann::json results = collection->search("*", query_fields, "", facets, sort_fields, {0}, 12, 1,
FREQUENCY, {false}, 1000, empty, empty, 10).get();
ASSERT_EQ(12, results["hits"].size());
ASSERT_EQ(25, results["found"].get<int>());
// should match collection size
results = collection->search("*", query_fields, "", facets, sort_fields, {0}, 100, 1,
FREQUENCY, {false}, 1000, empty, empty, 10).get();
ASSERT_EQ(25, results["hits"].size());
ASSERT_EQ(25, results["found"].get<int>());
// cannot fetch more than in-built limit of 250
auto res_op = collection->search("*", query_fields, "", facets, sort_fields, {0}, 251, 1,
FREQUENCY, {false}, 1000, empty, empty, 10);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ(422, res_op.code());
ASSERT_STREQ("Only upto 250 hits can be fetched per page.", res_op.error().c_str());
// when page number is 0, just fetch first page
results = collection->search("*", query_fields, "", facets, sort_fields, {0}, 10, 0,
FREQUENCY, {false}, 1000, empty, empty, 10).get();
ASSERT_EQ(10, results["hits"].size());
ASSERT_EQ(25, results["found"].get<int>());
// do pagination
results = collection->search("*", query_fields, "", facets, sort_fields, {0}, 10, 1,
FREQUENCY, {false}, 1000, empty, empty, 10).get();
ASSERT_EQ(10, results["hits"].size());
ASSERT_EQ(25, results["found"].get<int>());
results = collection->search("*", query_fields, "", facets, sort_fields, {0}, 10, 2,
FREQUENCY, {false}, 1000, empty, empty, 10).get();
ASSERT_EQ(10, results["hits"].size());
ASSERT_EQ(25, results["found"].get<int>());
results = collection->search("*", query_fields, "", facets, sort_fields, {0}, 10, 3,
FREQUENCY, {false}, 1000, empty, empty, 10).get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(25, results["found"].get<int>());
// enforce limit_hits
auto limit_hits = 20;
results = collection->search("*", query_fields, "", facets, sort_fields, {0}, 10, 3,
FREQUENCY, {false}, 1000,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {1}, limit_hits).get();
ASSERT_EQ(0, results["hits"].size());
ASSERT_EQ(25, results["found"].get<int>());
results = collection->search("*", query_fields, "", facets, sort_fields, {0}, 15, 2,
FREQUENCY, {false}, 1000,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {1}, limit_hits).get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(25, results["found"].get<int>());
}
TEST_F(CollectionTest, RemoveIfFound) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, true),
field("points", field_types::INT32, false)};
std::vector<sort_by> sort_fields = {sort_by("points", "DESC")};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "points").get();
}
for(size_t i=0; i<10; i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = "Title " + std::to_string(i);
doc["points"] = i;
coll1->add(doc.dump());
}
auto res = coll1->search("*", {"title"}, "", {}, sort_fields, {0}, 10, 1,
token_ordering::FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0).get();
ASSERT_EQ(10, res["found"].get<int>());
// removing found doc
Option<bool> found_op = coll1->remove_if_found(0);
ASSERT_TRUE(found_op.ok());
ASSERT_TRUE(found_op.get());
auto get_op = coll1->get("0");
ASSERT_FALSE(get_op.ok());
ASSERT_EQ(404, get_op.code());
// removing doc not found
found_op = coll1->remove_if_found(100);
ASSERT_TRUE(found_op.ok());
ASSERT_FALSE(found_op.get());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionTest, CreateCollectionInvalidFieldType) {
std::vector<field> fields = {field("title", "blah", true),
field("points", "int", false)};
std::vector<sort_by> sort_fields = {sort_by("points", "DESC")};
collectionManager.drop_collection("coll1");
auto create_op = collectionManager.create_collection("coll1", 4, fields, "points");
ASSERT_FALSE(create_op.ok());
ASSERT_STREQ("Field `title` has an invalid data type `blah`, see docs for supported data types.",
create_op.error().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionTest, MultiFieldRelevance) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("artist", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"Down There by the Train", "Dustin Kensrue"},
{"Down There by the Train", "Gord Downie"},
{"State Trooper", "Dustin Kensrue"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["artist"] = records[i][1];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("Dustin Kensrue Down There by the Train",
{"title", "artist"}, "", {}, {}, {0}, 10, 1, FREQUENCY,
{true}, 10).get();
ASSERT_EQ(3, results["found"].get<size_t>());
ASSERT_EQ(3, results["hits"].size());
std::vector<size_t> expected_ids = {0, 1, 2};
for(size_t i=0; i<expected_ids.size(); i++) {
ASSERT_EQ(expected_ids[i], std::stoi(results["hits"][i]["document"]["id"].get<std::string>()));
}
ASSERT_STREQ("<mark>Down</mark> <mark>There</mark> <mark>by</mark> <mark>the</mark> <mark>Train</mark>",
results["hits"][0]["highlights"][0]["snippet"].get<std::string>().c_str());
ASSERT_STREQ("<mark>Down</mark> <mark>There</mark> <mark>by</mark> <mark>the</mark> <mark>Train</mark>",
results["hits"][1]["highlights"][0]["snippet"].get<std::string>().c_str());
ASSERT_STREQ("<mark>Dustin</mark> <mark>Kensrue</mark>",
results["hits"][2]["highlights"][0]["snippet"].get<std::string>().c_str());
// remove documents, reindex in another order and search again
for(size_t i=0; i<expected_ids.size(); i++) {
coll1->remove_if_found(i, true);
}
records = {
{"State Trooper", "Dustin Kensrue"},
{"Down There by the Train", "Gord Downie"},
{"Down There by the Train", "Dustin Kensrue"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["artist"] = records[i][1];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
results = coll1->search("Dustin Kensrue Down There by the Train",
{"title", "artist"}, "", {}, {}, {0}, 10, 1, FREQUENCY,
{true}, 10).get();
ASSERT_EQ(3, results["found"].get<size_t>());
ASSERT_EQ(3, results["hits"].size());
expected_ids = {2, 1, 0};
for(size_t i=0; i<expected_ids.size(); i++) {
ASSERT_EQ(expected_ids[i], std::stoi(results["hits"][i]["document"]["id"].get<std::string>()));
}
// with exclude token syntax
results = coll1->search("-downie dustin kensrue down there by the train",
{"title", "artist"}, "", {}, {}, {0}, 10, 1, FREQUENCY,
{true}, 10).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
expected_ids = {2, 0};
for(size_t i=0; i<expected_ids.size(); i++) {
ASSERT_EQ(expected_ids[i], std::stoi(results["hits"][i]["document"]["id"].get<std::string>()));
}
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionTest, MultiFieldRelevance2) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("artist", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"A Daikon Freestyle", "Ghosts on a Trampoline"},
{"Leaving on a Jetplane", "Coby Grant"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["artist"] = records[i][1];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("on a jetplane",
{"title", "artist"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
ASSERT_STREQ("1", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(0, results["hits"][0]["text_match_info"]["num_tokens_dropped"]);
ASSERT_STREQ("0", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(1, results["hits"][1]["text_match_info"]["num_tokens_dropped"]);
// changing weights to favor artist still favors title because it contains all tokens of the query
results = coll1->search("on a jetplane",
{"title", "artist"}, "", {}, {}, {0}, 10, 1, FREQUENCY,
{true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {1, 4}).get();
ASSERT_STREQ("1", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(0, results["hits"][0]["text_match_info"]["num_tokens_dropped"]);
ASSERT_STREQ("0", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(1, results["hits"][1]["text_match_info"]["num_tokens_dropped"]);
// use same weights
results = coll1->search("on a jetplane",
{"title", "artist"}, "", {}, {}, {0}, 10, 1, FREQUENCY,
{true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {1, 1}).get();
ASSERT_STREQ("1", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(0, results["hits"][0]["text_match_info"]["num_tokens_dropped"]);
ASSERT_STREQ("0", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(1, results["hits"][1]["text_match_info"]["num_tokens_dropped"]);
// add weights to favor artist without all tokens in a query being found in a field
results = coll1->search("on a helicopter",
{"title", "artist"}, "", {}, {}, {0}, 10, 1, FREQUENCY,
{true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {1, 4}).get();
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(1, results["hits"][0]["text_match_info"]["num_tokens_dropped"]);
ASSERT_STREQ("1", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(1, results["hits"][1]["text_match_info"]["num_tokens_dropped"]);
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionTest, FieldWeightsNotProper) {
// when weights are not given properly
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("artist", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
auto results_op = coll1->search("on a jetplane",
{"title", "artist"}, "", {}, {}, {0}, 10, 1, FREQUENCY,
{true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {1});
ASSERT_FALSE(results_op.ok());
ASSERT_STREQ("Number of weights in `query_by_weights` does not match number "
"of `query_by` fields.", results_op.error().c_str());
results_op = coll1->search("on a jetplane",
{"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY,
{true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {2, 1});
ASSERT_FALSE(results_op.ok());
ASSERT_STREQ("Number of weights in `query_by_weights` does not match number "
"of `query_by` fields.", results_op.error().c_str());
// empty weights are fine (will be defaulted to)
results_op = coll1->search("on a jetplane",
{"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY,
{true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {});
ASSERT_TRUE(results_op.ok());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionTest, MultiFieldRelevance3) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("artist", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"Taylor Swift Karaoke: reputation", "Taylor Swift"},
{"Style", "Taylor Swift"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["artist"] = records[i][1];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("style taylor swift",
{"title", "artist"}, "", {}, {}, {0}, 10, 1, FREQUENCY,
{true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {1, 1}).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
ASSERT_STREQ("1", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(0, results["hits"][0]["text_match_info"]["num_tokens_dropped"]);
ASSERT_STREQ("0", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(1, results["hits"][1]["text_match_info"]["num_tokens_dropped"]);
results = coll1->search("swift",
{"title", "artist"}, "", {}, {}, {0}, 10, 1, FREQUENCY,
{true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {1, 1}).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(0, results["hits"][0]["text_match_info"]["num_tokens_dropped"]);
ASSERT_STREQ("1", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(0, results["hits"][1]["text_match_info"]["num_tokens_dropped"]);
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionTest, MultiFieldRelevance4) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("artist", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"Maddras Dreams", "Chennai King"},
{"Maddurai Express", "Maddura Maddy"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["artist"] = records[i][1];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("maddras",
{"title", "artist"}, "", {}, {}, {2}, 10, 1, FREQUENCY,
{true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {1, 1}).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("1", results["hits"][1]["document"]["id"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionTest, MultiFieldRelevance5) {
Collection *coll1;
std::vector<field> fields = {field("company_name", field_types::STRING, false),
field("country", field_types::STRING, false),
field("field_a", field_types::STRING, false),
field("num_employees", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "num_employees").get();
}
std::vector<std::vector<std::string>> records = {
{"Stark Industries ™", "Canada", "Canadia", "5215"},
{"Canaida Corp", "United States", "Canadoo", "200"},
{"Acme Corp", "Mexico", "Canadoo", "300"}
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["company_name"] = records[i][0];
doc["country"] = records[i][1];
doc["field_a"] = records[i][2];
doc["num_employees"] = std::stoi(records[i][3]);
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("Canada",
{"company_name","country","field_a"}, "", {}, {}, {2}, 10, 1, FREQUENCY,
{true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {1, 1, 1}).get();
ASSERT_EQ(3, results["found"].get<size_t>());
ASSERT_EQ(3, results["hits"].size());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("2", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("1", results["hits"][2]["document"]["id"].get<std::string>().c_str());
results = coll1->search("Canada",
{"company_name","field_a","country"}, "", {}, {}, {2}, 10, 1, FREQUENCY,
{true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {1, 1, 1}).get();
ASSERT_EQ(3, results["found"].get<size_t>());
ASSERT_EQ(3, results["hits"].size());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("2", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("1", results["hits"][2]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(2, results["hits"][0]["highlights"].size());
ASSERT_EQ("field_a", results["hits"][0]["highlights"][0]["field"].get<std::string>());
ASSERT_EQ("<mark>Canadia</mark>", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
ASSERT_EQ("country", results["hits"][0]["highlights"][1]["field"].get<std::string>());
ASSERT_EQ("<mark>Canada</mark>", results["hits"][0]["highlights"][1]["snippet"].get<std::string>());
ASSERT_EQ(1, results["hits"][1]["highlights"].size());
ASSERT_EQ("field_a", results["hits"][1]["highlights"][0]["field"].get<std::string>());
ASSERT_EQ("<mark>Canadoo</mark>", results["hits"][1]["highlights"][0]["snippet"].get<std::string>());
ASSERT_EQ(2, results["hits"][2]["highlights"].size());
ASSERT_EQ("field_a", results["hits"][2]["highlights"][0]["field"].get<std::string>());
ASSERT_EQ("<mark>Canadoo</mark>", results["hits"][2]["highlights"][0]["snippet"].get<std::string>());
ASSERT_EQ("company_name", results["hits"][2]["highlights"][1]["field"].get<std::string>());
ASSERT_EQ("<mark>Canaida</mark> Corp", results["hits"][2]["highlights"][1]["snippet"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionTest, MultiFieldRelevance6) {
// with exact match, the number of fields with exact match will not be considered as a ranking signal
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("artist", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"Taylor Swift", "Taylor Swift"},
{"Taylor Swift Song", "Taylor Swift"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["artist"] = records[i][1];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("taylor swift",
{"title", "artist"}, "", {}, {}, {2}, 10, 1, FREQUENCY,
{true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {1, 1}).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
ASSERT_STREQ("1", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("0", results["hits"][1]["document"]["id"].get<std::string>().c_str());
// when exact matches are disabled
results = coll1->search("taylor swift",
{"title", "artist"}, "", {}, {}, {2}, 10, 1, FREQUENCY,
{true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {1, 1}, 100, false).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
ASSERT_STREQ("1", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("0", results["hits"][1]["document"]["id"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionTest, ExactMatch) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("artist", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"Alpha", "DJ"},
{"Alpha Beta", "DJ"},
{"Alpha Beta Gamma", "DJ"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["artist"] = records[i][1];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("alpha beta",
{"title"}, "", {}, {}, {2}, 10, 1, FREQUENCY,
{true}, 10).get();
ASSERT_EQ(3, results["found"].get<size_t>());
ASSERT_EQ(3, results["hits"].size());
ASSERT_STREQ("1", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("2", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("0", results["hits"][2]["document"]["id"].get<std::string>().c_str());
results = coll1->search("alpha", {"title"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(3, results["found"].get<size_t>());
ASSERT_EQ(3, results["hits"].size());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("2", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("1", results["hits"][2]["document"]["id"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionTest, MultiFieldHighlighting) {
Collection *coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("description", field_types::STRING, false),
field("categories", field_types::STRING_ARRAY, false),
field("points", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"Best Wireless Vehicle Charger",
"Easily replenish your cell phone with this wireless charger.",
"Cell Phones > Cell Phone Accessories > Car Chargers"},
{"Annie's Song",
"John Denver",
"Album > Compilation"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
std::vector<std::string> categories;
StringUtils::split(records[i][2], categories, ">");
doc["id"] = std::to_string(i);
doc["name"] = records[i][0];
doc["description"] = records[i][1];
doc["categories"] = categories;
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("charger",
{"name","description","categories"}, "", {}, {}, {2}, 10, 1, FREQUENCY,
{true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {1, 1, 1}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(3, results["hits"][0]["highlights"].size());
ASSERT_EQ("name", results["hits"][0]["highlights"][0]["field"].get<std::string>());
ASSERT_EQ("Best Wireless Vehicle <mark>Charger</mark>",
results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
ASSERT_EQ("description", results["hits"][0]["highlights"][1]["field"].get<std::string>());
ASSERT_EQ("Easily replenish your cell phone with this wireless <mark>charger</mark>.",
results["hits"][0]["highlights"][1]["snippet"].get<std::string>());
ASSERT_EQ("categories", results["hits"][0]["highlights"][2]["field"].get<std::string>());
ASSERT_EQ("Car <mark>Charger</mark>s", results["hits"][0]["highlights"][2]["snippets"][0].get<std::string>());
results = coll1->search("John With Denver",
{"description"}, "", {}, {}, {0}, 10, 1, FREQUENCY,
{true}, 1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {1}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("1", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(1, results["hits"][0]["highlights"].size());
ASSERT_EQ("description", results["hits"][0]["highlights"][0]["field"].get<std::string>());
ASSERT_EQ("<mark>John</mark> <mark>Denver</mark>",
results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
results = coll1->search("Annies song John Denver",
{"name","description"}, "", {}, {}, {0}, 10, 1, FREQUENCY,
{true}, 1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {1, 1}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("1", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(2, results["hits"][0]["highlights"].size());
ASSERT_EQ("name", results["hits"][0]["highlights"][0]["field"].get<std::string>());
ASSERT_EQ("<mark>Annie's</mark> <mark>Song</mark>",
results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
ASSERT_EQ("description", results["hits"][0]["highlights"][1]["field"].get<std::string>());
ASSERT_EQ("<mark>John</mark> <mark>Denver</mark>",
results["hits"][0]["highlights"][1]["snippet"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionTest, MultiFieldMatchRanking) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("artist", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"Style", "Taylor Swift"},
{"Blank Space", "Taylor Swift"},
{"Balance Overkill", "Taylor Swift"},
{"Cardigan", "Taylor Swift"},
{"Invisible String", "Taylor Swift"},
{"The Last Great American Dynasty", "Taylor Swift"},
{"Mirrorball", "Taylor Swift"},
{"Peace", "Taylor Swift"},
{"Betty", "Taylor Swift"},
{"Mad Woman", "Taylor Swift"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["artist"] = records[i][1];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("taylor swift style",
{"artist", "title"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5).get();
ASSERT_EQ(10, results["found"].get<size_t>());
ASSERT_EQ(3, results["hits"].size());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("9", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("8", results["hits"][2]["document"]["id"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionTest, MultiFieldMatchRankingOnArray) {
Collection *coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("strong_skills", field_types::STRING_ARRAY, false),
field("skills", field_types::STRING_ARRAY, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::vector<std::string>>> records = {
{{"John Snow"}, {"Golang", "Vue", "React"}, {"Docker", "Goa", "Elixir"}},
{{"Jack Dan"}, {"Golang", "Phoenix", "React"}, {"Docker", "Vue", "Kubernetes"}},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["name"] = records[i][0][0];
doc["strong_skills"] = records[i][1];
doc["skills"] = records[i][2];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("golang vue",
{"strong_skills", "skills"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 1).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("1", results["hits"][1]["document"]["id"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionTest, MultiFieldMatchRankingOnFieldOrder) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("artist", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"Toxic", "Britney Spears"},
{"Bad", "Michael Jackson"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["artist"] = records[i][1];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("michael jackson toxic",
{"title", "artist"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {1, 6}).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
ASSERT_STREQ("1", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("0", results["hits"][1]["document"]["id"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionTest, PrefixRankedAfterExactMatch) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"Rotini Puttanesca"},
{"Poulet Roti Tout Simple"},
{"Chapatis (Roti)"},
{"School Days Rotini Pasta Salad"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("roti", {"title"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5).get();
ASSERT_EQ(4, results["found"].get<size_t>());
ASSERT_EQ(3, results["hits"].size());
ASSERT_STREQ("2", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("1", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("3", results["hits"][2]["document"]["id"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionTest, HighlightWithAccentedCharacters) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"Mise T.J. à jour Timy depuis PC"},
{"Down There by the T.r.a.i.n"},
{"State Trooper"},
{"The Google Nexus Q Is Baffling"},
};
for (size_t i = 0; i < records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("à jour", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("Mise T.J. <mark>à</mark> <mark>jour</mark> Timy depuis PC",
results["hits"][0]["highlights"][0]["snippet"].get<std::string>().c_str());
ASSERT_EQ(2, results["hits"][0]["highlights"][0]["matched_tokens"].size());
ASSERT_STREQ("à", results["hits"][0]["highlights"][0]["matched_tokens"][0].get<std::string>().c_str());
ASSERT_STREQ("jour", results["hits"][0]["highlights"][0]["matched_tokens"][1].get<std::string>().c_str());
results = coll1->search("by train", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY,
{true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title").get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("Down There <mark>by</mark> the <mark>T.r.a.i.n</mark>",
results["hits"][0]["highlights"][0]["snippet"].get<std::string>().c_str());
ASSERT_STREQ("Down There <mark>by</mark> the <mark>T.r.a.i.n</mark>",
results["hits"][0]["highlights"][0]["value"].get<std::string>().c_str());
results = coll1->search("state trooper", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("<mark>State</mark> <mark>Trooper</mark>",
results["hits"][0]["highlights"][0]["snippet"].get<std::string>().c_str());
// test single character highlight
results = coll1->search("q", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_STREQ("The Google Nexus <mark>Q</mark> Is Baffling",
results["hits"][0]["highlights"][0]["snippet"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionTest, DISABLED_SearchingForRecordsWithSpecialChars) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("url", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"Amazon Home", "https://amazon.com/"},
{"Google Home", "https://google.com///"},
{"Github Issue", "https://github.com/typesense/typesense/issues/241"},
{"Amazon Search", "https://www.amazon.com/s?k=phone&ref=nb_sb_noss_2"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["url"] = records[i][1];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("google",
{"title", "url"}, "", {}, {}, {2}, 10, 1, FREQUENCY).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_STREQ("1", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(2, results["hits"][0]["highlights"].size());
ASSERT_EQ("<mark>Google</mark> Home", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
ASSERT_EQ("https://<mark>google</mark>.com///", results["hits"][0]["highlights"][1]["snippet"].get<std::string>());
results = coll1->search("amazon.com",
{"title", "url"}, "", {}, {}, {2}, 10, 1, FREQUENCY).get();
ASSERT_EQ(3, results["found"].get<size_t>());
ASSERT_STREQ("3", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("0", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("1", results["hits"][2]["document"]["id"].get<std::string>().c_str());
results = coll1->search("typesense",
{"title", "url"}, "", {}, {}, {2}, 10, 1, FREQUENCY).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_STREQ("2", results["hits"][0]["document"]["id"].get<std::string>().c_str());
results = coll1->search("nb_sb_noss_2",
{"title", "url"}, "", {}, {}, {2}, 10, 1, FREQUENCY).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_STREQ("3", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(1, results["hits"][0]["highlights"].size());
ASSERT_EQ("https://www.amazon.com/s?k=phone&ref=<mark>nb</mark>_<mark>sb</mark>_<mark>noss</mark>_<mark>2</mark>",
results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionTest, FieldSpecificNumTypos) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("artist", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"Taylor Swift Karaoke: reputation", "Taylor Swift"},
{"Taylor & Friends", "Adam Smith"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["artist"] = records[i][1];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("tayylor",
{"title", "artist"}, "", {}, {}, {1, 1}, 10, 1, FREQUENCY,
{true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {1, 1}).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("1", results["hits"][1]["document"]["id"].get<std::string>().c_str());
results = coll1->search("tayylor",
{"title", "artist"}, "", {}, {}, {0, 1}, 10, 1, FREQUENCY,
{true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {1, 1}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
// must return error when num_typos does not match length of search fields queried
auto res_op = coll1->search("tayylor",
{"title"}, "", {}, {}, {0, 1}, 10, 1, FREQUENCY,
{true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {1, 1});
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Number of weights in `query_by_weights` does not match number of `query_by` fields.", res_op.error());
// can use a single typo param for multiple fields
results = coll1->search("tayylor",
{"title", "artist"}, "", {}, {}, {1}, 10, 1, FREQUENCY,
{true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {1, 1}).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
// wildcard search with typos
results = coll1->search("*",
{}, "", {}, {}, {1}, 10, 1, FREQUENCY,
{true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {1, 1}).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionTest, BadHighlightingOnText) {
Collection *coll1;
std::vector<field> fields = {field("text", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
nlohmann::json doc;
doc["id"] = "0";
doc["text"] = "include destruction of natural marine and estuarine\\nhabitats, loss of productive agricultural "
"land,\\nand soil erosion. 90 When interviewed, multiple\\nexperts stated that inappropriate land use "
"and\\nmanagement is a central factor contributing to\\nenvironmental degradation in the "
"Castries-Gros\\nIslet Corridor. 91 The construction is placing greater\\nstress on natural resources "
"and biodiversity, and\\nthe capacity to produce food and retain freshwater\\nhas been diminished. "
"92 Moreover, increased\\nwater consumption by the tourism sector, when\\ncompounded by climate "
"change, is increasing food\\nand water insecurity throughout Saint Lucia, as well\\nas suppressing "
"long-term growth prospects. 93";
doc["points"] = 0;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("natural saint lucia", {"text"}, "", {}, {}, {1}, 10, 1, FREQUENCY,
{true}, 10).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("food\\nand water insecurity throughout <mark>Saint</mark> <mark>Lucia</mark>, as well\\nas suppressing long-term",
results["hits"][0]["highlights"][0]["snippet"].get<std::string>().c_str());
ASSERT_EQ(2, results["hits"][0]["highlights"][0]["matched_tokens"].size());
ASSERT_STREQ("Saint", results["hits"][0]["highlights"][0]["matched_tokens"][0].get<std::string>().c_str());
ASSERT_STREQ("Lucia", results["hits"][0]["highlights"][0]["matched_tokens"][1].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionTest, FieldLevelPrefixConfiguration) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("artist", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"Taylor Swift Karaoke: reputation", "Taylor Swift"},
{"Style", "Taylor Swift"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["artist"] = records[i][1];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("taylo",
{"title", "artist"}, "", {}, {}, {0}, 10, 1, FREQUENCY,
{true, false}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {1, 1}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
results = coll1->search("taylo",
{"title", "artist"}, "", {}, {}, {0}, 10, 1, FREQUENCY,
{true, true}, 10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {1, 1}).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("1", results["hits"][1]["document"]["id"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionTest, QueryParsingForPhraseSearch) {
Collection* coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::string> q_include_tokens, q_unstemmed_tokens;
std::vector<std::vector<std::string>> q_exclude_tokens;
std::vector<std::vector<std::string>> q_phrases;
std::string q = R"(the "phrase search" query)";
/*coll1->parse_search_query(q, q_include_tokens, q_unstemmed_tokens, q_exclude_tokens, q_phrases, "en", false);
ASSERT_EQ(2, q_include_tokens.size());
ASSERT_EQ("the", q_include_tokens[0]);
ASSERT_EQ("query", q_include_tokens[1]);
ASSERT_EQ(1, q_phrases.size());
ASSERT_EQ(2, q_phrases[0].size());
ASSERT_EQ("phrase", q_phrases[0][0]);
ASSERT_EQ("search", q_phrases[0][1]);
*/
// quoted string has trailing padded space
q = R"("space padded " query)";
q_include_tokens.clear();
q_exclude_tokens.clear();
q_unstemmed_tokens.clear();
q_phrases.clear();
coll1->parse_search_query(q, q_include_tokens, q_unstemmed_tokens, q_exclude_tokens, q_phrases, "en", false);
ASSERT_EQ(1, q_include_tokens.size());
ASSERT_EQ("query", q_include_tokens[0]);
ASSERT_EQ(1, q_phrases.size());
ASSERT_EQ(2, q_phrases[0].size());
ASSERT_EQ("space", q_phrases[0][0]);
ASSERT_EQ("padded", q_phrases[0][1]);
// multiple quoted strings
q = R"("first phrase" "second phrase")";
q_include_tokens.clear();
q_exclude_tokens.clear();
q_phrases.clear();
coll1->parse_search_query(q, q_include_tokens, q_unstemmed_tokens, q_exclude_tokens, q_phrases, "en", false);
ASSERT_EQ(1, q_include_tokens.size());
ASSERT_EQ("*", q_include_tokens[0]);
ASSERT_EQ(2, q_phrases.size());
ASSERT_EQ(2, q_phrases[0].size());
ASSERT_EQ("first", q_phrases[0][0]);
ASSERT_EQ("phrase", q_phrases[0][1]);
ASSERT_EQ("second", q_phrases[1][0]);
ASSERT_EQ("phrase", q_phrases[1][1]);
// single quoted string
q = R"("hello")";
q_include_tokens.clear();
q_exclude_tokens.clear();
q_phrases.clear();
coll1->parse_search_query(q, q_include_tokens, q_unstemmed_tokens, q_exclude_tokens, q_phrases, "en", false);
ASSERT_EQ(1, q_include_tokens.size());
ASSERT_EQ("*", q_include_tokens[0]);
ASSERT_EQ(1, q_phrases.size());
ASSERT_EQ(1, q_phrases[0].size());
ASSERT_EQ("hello", q_phrases[0][0]);
// stray trailing quote
q = R"(hello")";
q_include_tokens.clear();
q_exclude_tokens.clear();
q_phrases.clear();
coll1->parse_search_query(q, q_include_tokens, q_unstemmed_tokens, q_exclude_tokens, q_phrases, "en", false);
ASSERT_EQ(1, q_include_tokens.size());
ASSERT_EQ("hello", q_include_tokens[0]);
ASSERT_EQ(0, q_phrases.size());
// padded space one either side of quote
q = R"("some query " here)";
q_include_tokens.clear();
q_exclude_tokens.clear();
q_phrases.clear();
coll1->parse_search_query(q, q_include_tokens, q_unstemmed_tokens, q_exclude_tokens, q_phrases, "en", false);
ASSERT_EQ(1, q_include_tokens.size());
ASSERT_EQ("here", q_include_tokens[0]);
ASSERT_EQ(1, q_phrases.size());
ASSERT_EQ(2, q_phrases[0].size());
ASSERT_EQ("some", q_phrases[0][0]);
ASSERT_EQ("query", q_phrases[0][1]);
// with exclude operator
q = R"(-"some phrase" here)";
q_include_tokens.clear();
q_exclude_tokens.clear();
q_phrases.clear();
coll1->parse_search_query(q, q_include_tokens, q_unstemmed_tokens, q_exclude_tokens, q_phrases, "en", false);
ASSERT_EQ(1, q_include_tokens.size());
ASSERT_EQ("here", q_include_tokens[0]);
ASSERT_EQ(0, q_phrases.size());
ASSERT_EQ(1, q_exclude_tokens.size());
ASSERT_EQ(2, q_exclude_tokens[0].size());
ASSERT_EQ("some", q_exclude_tokens[0][0]);
ASSERT_EQ("phrase", q_exclude_tokens[0][1]);
// with multiple exclude operators
q = R"(-"some phrase" here -token)";
q_include_tokens.clear();
q_exclude_tokens.clear();
q_phrases.clear();
coll1->parse_search_query(q, q_include_tokens, q_unstemmed_tokens, q_exclude_tokens, q_phrases, "en", false);
ASSERT_EQ(1, q_include_tokens.size());
ASSERT_EQ("here", q_include_tokens[0]);
ASSERT_EQ(0, q_phrases.size());
ASSERT_EQ(2, q_exclude_tokens.size());
ASSERT_EQ(2, q_exclude_tokens[0].size());
ASSERT_EQ("some", q_exclude_tokens[0][0]);
ASSERT_EQ("phrase", q_exclude_tokens[0][1]);
ASSERT_EQ(1, q_exclude_tokens[1].size());
ASSERT_EQ("token", q_exclude_tokens[1][0]);
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionTest, WildcardQueryBy) {
nlohmann::json schema = R"({
"name": "posts",
"enable_nested_fields": true,
"fields": [
{"name": "username", "type": "string", "facet": true},
{"name": "user.rank", "type": "int32", "facet": true},
{"name": "user.bio", "type": "string"},
{"name": "likes", "type": "int32"},
{"name": "content", "type": "object"}
],
"default_sorting_field": "likes"
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll = op.get();
std::vector<std::string> json_lines = {
R"({"id": "124","username": "user_a","user": {"rank": 100,"bio": "Hi! I'm user_a"},"likes": 5215,"content": {"title": "title 1","body": "body 1 user_a"}})",
R"({"id": "125","username": "user_b","user": {"rank": 50,"bio": "user_b here, nice to meet you!"},"likes": 5215,"content": {"title": "title 2","body": "body 2 user_b"}})"
};
for (auto const& json: json_lines){
auto add_op = coll->add(json);
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
// * matches username, user.bio, content.title, content.body
auto result = coll->search("user_a", {"*"}, "", {}, {}, {0}).get();
ASSERT_EQ(1, result["found"].get<size_t>());
ASSERT_EQ(1, result["hits"].size());
ASSERT_EQ("Hi! I'm <mark>user_a</mark>",
result["hits"][0]["highlight"]["user"]["bio"]["snippet"].get<std::string>());
ASSERT_EQ("<mark>user_a</mark>",
result["hits"][0]["highlight"]["username"]["snippet"].get<std::string>());
// ASSERT_EQ("body 1 <mark>user_a</mark>",
// result["hits"][0]["highlight"]["content"]["body"]["snippet"].get<std::string>());
// user* matches username and user.bio
result = coll->search("user_a", {"user*"}, "", {}, {}, {0}).get();
ASSERT_EQ(1, result["found"].get<size_t>());
ASSERT_EQ(1, result["hits"].size());
ASSERT_EQ("Hi! I'm <mark>user_a</mark>",
result["hits"][0]["highlight"]["user"]["bio"]["snippet"].get<std::string>());
ASSERT_EQ("<mark>user_a</mark>",
result["hits"][0]["highlight"]["username"]["snippet"].get<std::string>());
// user.* matches user.bio
result = coll->search("user_a", {"user.*"}, "", {}, {}, {0}).get();
ASSERT_EQ(1, result["found"].get<size_t>());
ASSERT_EQ(1, result["hits"].size());
ASSERT_EQ("Hi! I'm <mark>user_a</mark>",
result["hits"][0]["highlight"]["user"]["bio"]["snippet"].get<std::string>());
// user.rank cannot be queried
result = coll->search("100", {"user*"}, "", {}, {}, {0}).get();
ASSERT_EQ(0, result["found"].get<size_t>());
ASSERT_EQ(0, result["hits"].size());
// No matching field for query_by
auto error = coll->search("user_a", {"foo*"}, "", {}, {}, {0}).error();
ASSERT_EQ("No string or string array field found matching the pattern `foo*` in the schema.", error);
}
TEST_F(CollectionTest, WildcardHighlightFields) {
nlohmann::json schema = R"({
"name": "posts",
"enable_nested_fields": true,
"fields": [
{"name": "user_name", "type": "string", "facet": true},
{"name": "user", "type": "object"}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll = op.get();
auto add_op = coll->add(R"({"id": "124","user_name": "user_a","user": {"rank": 100,"phone": "+91 123123123"}})");
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
spp::sparse_hash_set<std::string> dummy_include_exclude;
std::string highlight_fields = "user*";
// user* matches user_name, user.rank and user.phone
auto result = coll->search("123", {"user"}, "", {}, {}, {0},
10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD, dummy_include_exclude, dummy_include_exclude, 10, "",
30, 4, "", Index::TYPO_TOKENS_THRESHOLD, "", "", {}, 3, "<mark>", "</mark>", {}, UINT32_MAX,
true, false, true, highlight_fields).get();
ASSERT_EQ(1, result["found"].get<size_t>());
ASSERT_EQ(1, result["hits"].size());
ASSERT_EQ(1, result["hits"][0]["highlight"].size());
ASSERT_EQ("+91 <mark>123</mark>123123", result["hits"][0]["highlight"]["user"]["phone"]["snippet"].get<std::string>());
highlight_fields = "user.*";
// user.* matches user.rank and user.phone
result = coll->search("+91", {"user"}, "", {}, {}, {0},
10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD, dummy_include_exclude, dummy_include_exclude, 10, "",
30, 4, "", Index::TYPO_TOKENS_THRESHOLD, "", "", {}, 3, "<mark>", "</mark>", {}, UINT32_MAX,
true, false, true, highlight_fields).get();
ASSERT_EQ(1, result["found"].get<size_t>());
ASSERT_EQ(1, result["hits"].size());
ASSERT_EQ(1, result["hits"][0]["highlight"].size());
ASSERT_EQ("+<mark>91</mark> 123123123",
result["hits"][0]["highlight"]["user"]["phone"]["snippet"].get<std::string>());
highlight_fields = "user*";
// user* matches user_name, user.rank and user.phone
result = coll->search("user_a", {"user_name"}, "", {}, {}, {0},
10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD, dummy_include_exclude, dummy_include_exclude, 10, "",
30, 4, "", Index::TYPO_TOKENS_THRESHOLD, "", "", {}, 3, "<mark>", "</mark>", {}, UINT32_MAX,
true, false, true, highlight_fields).get();
ASSERT_EQ(1, result["found"].get<size_t>());
ASSERT_EQ(1, result["hits"].size());
ASSERT_EQ(1, result["hits"][0]["highlight"].size());
ASSERT_EQ("<mark>user_a</mark>",
result["hits"][0]["highlight"]["user_name"]["snippet"].get<std::string>());
highlight_fields = "user.*";
// user.* matches user.rank and user.phone
result = coll->search("user_a", {"user_name"}, "", {}, {}, {0},
10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD, dummy_include_exclude, dummy_include_exclude, 10, "",
30, 4, "", Index::TYPO_TOKENS_THRESHOLD, "", "", {}, 3, "<mark>", "</mark>", {}, UINT32_MAX,
true, false, true, highlight_fields).get();
ASSERT_EQ(1, result["found"].get<size_t>());
ASSERT_EQ(1, result["hits"].size());
ASSERT_EQ(0, result["hits"][0]["highlight"].size());
highlight_fields = "foo*";
// No matching field for highlight_fields
result = coll->search("user_a", {"user_name"}, "", {}, {}, {0},
10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD, dummy_include_exclude, dummy_include_exclude, 10, "",
30, 4, "", Index::TYPO_TOKENS_THRESHOLD, "", "", {}, 3, "<mark>", "</mark>", {}, UINT32_MAX,
true, false, true, highlight_fields).get();
ASSERT_EQ(1, result["found"].get<size_t>());
ASSERT_EQ(1, result["hits"].size());
ASSERT_EQ(0, result["hits"][0]["highlight"].size());
}
TEST_F(CollectionTest, WildcardHighlightFullFields) {
nlohmann::json schema = R"({
"name": "posts",
"enable_nested_fields": true,
"fields": [
{"name": "user_name", "type": "string", "facet": true},
{"name": "user.rank", "type": "int32", "facet": true},
{"name": "user.phone", "type": "string"},
{"name": "user.bio", "type": "string"}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll = op.get();
auto json = R"({
"id": "124",
"user_name": "user_a",
"user": {
"rank": 100,
"phone": "+91 123123123"
}
})"_json;
std::string bio = "Once there was a middle-aged boy named User_a who was an avid swimmer."
"He had been swimming competitively for most of his life, and had even competed in several national competitions."
"However, despite his passion and talent for the sport, he had never quite managed to win that elusive gold medal."
"Determined to change that, User_a began training harder than ever before."
"He woke up early every morning to swim laps before work and spent his evenings at the pool as well."
"Despite the grueling schedule, he never once complained."
"Instead, he reminded himself of his goal: to become a national champion.";
json["user"]["bio"] = bio;
auto add_op = coll->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
spp::sparse_hash_set<std::string> dummy_include_exclude;
std::string highlight_full_fields = "user*";
// user* matches user_name, user.bio
auto result = coll->search("user_a", {"*"}, "", {}, {}, {0},
10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD, dummy_include_exclude, dummy_include_exclude, 10, "",
30, 4, highlight_full_fields).get();
ASSERT_EQ(1, result["found"].get<size_t>());
ASSERT_EQ(1, result["hits"].size());
ASSERT_EQ("a middle-aged boy named <mark>User_a</mark> who was an avid",
result["hits"][0]["highlight"]["user"]["bio"]["snippet"].get<std::string>());
std::string highlighted_value = "Once there was a middle-aged boy named <mark>User_a</mark> who was an avid swimmer."
"He had been swimming competitively for most of his life, and had even competed in several national competitions."
"However, despite his passion and talent for the sport, he had never quite managed to win that elusive gold medal."
"Determined to change that, <mark>User_a</mark> began training harder than ever before."
"He woke up early every morning to swim laps before work and spent his evenings at the pool as well."
"Despite the grueling schedule, he never once complained."
"Instead, he reminded himself of his goal: to become a national champion.";
ASSERT_EQ( highlighted_value, result["hits"][0]["highlight"]["user"]["bio"]["value"].get<std::string>());
ASSERT_EQ("<mark>user_a</mark>",
result["hits"][0]["highlight"]["user_name"]["value"].get<std::string>());
highlight_full_fields = "user.*";
// user.* matches user.bio
result = coll->search("user_a", {"*"}, "", {}, {}, {0},
10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD, dummy_include_exclude, dummy_include_exclude, 10, "",
30, 4, highlight_full_fields).get();
ASSERT_EQ(1, result["found"].get<size_t>());
ASSERT_EQ(1, result["hits"].size());
ASSERT_EQ(highlighted_value, result["hits"][0]["highlight"]["user"]["bio"]["value"].get<std::string>());
ASSERT_EQ(0, result["hits"][0]["highlight"]["user_name"].count("value"));
highlight_full_fields = "foo*";
// No matching field for highlight_fields
result = coll->search("user_a", {"*"}, "", {}, {}, {0},
10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD, dummy_include_exclude, dummy_include_exclude, 10, "",
30, 4, highlight_full_fields).get();
ASSERT_EQ(0, result["hits"][0]["highlight"]["user"]["bio"].count("value"));
ASSERT_EQ(0, result["hits"][0]["highlight"]["user_name"].count("value"));
}
TEST_F(CollectionTest, SemanticSearchTest) {
nlohmann::json schema = R"({
"name": "objects",
"fields": [
{"name": "name", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll = op.get();
nlohmann::json object;
object["name"] = "apple";
auto add_op = coll->add(object.dump());
ASSERT_TRUE(add_op.ok());
ASSERT_EQ("apple", add_op.get()["name"]);
ASSERT_EQ(384, add_op.get()["embedding"].size());
spp::sparse_hash_set<std::string> dummy_include_exclude;
auto search_res_op = coll->search("apple", {"embedding"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD, dummy_include_exclude, dummy_include_exclude, 10, "", 30, 4, "");
ASSERT_TRUE(search_res_op.ok());
auto search_res = search_res_op.get();
ASSERT_EQ(1, search_res["found"].get<size_t>());
ASSERT_EQ(1, search_res["hits"].size());
ASSERT_EQ("apple", search_res["hits"][0]["document"]["name"].get<std::string>());
ASSERT_EQ(384, search_res["hits"][0]["document"]["embedding"].size());
}
TEST_F(CollectionTest, InvalidSemanticSearch) {
nlohmann::json schema = R"({
"name": "objects",
"fields": [
{"name": "name", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto op = collectionManager.create_collection(schema);
LOG(INFO) << "op.error(): " << op.error();
ASSERT_TRUE(op.ok());
Collection* coll = op.get();
nlohmann::json object;
object["name"] = "apple";
auto add_op = coll->add(object.dump());
ASSERT_TRUE(add_op.ok());
ASSERT_EQ("apple", add_op.get()["name"]);
ASSERT_EQ(384, add_op.get()["embedding"].size());
spp::sparse_hash_set<std::string> dummy_include_exclude;
auto search_res_op = coll->search("apple", {"embedding", "embedding"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD, dummy_include_exclude, dummy_include_exclude, 10, "", 30, 4, "");
ASSERT_FALSE(search_res_op.ok());
}
TEST_F(CollectionTest, HybridSearch) {
nlohmann::json schema = R"({
"name": "objects",
"fields": [
{"name": "name", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll = op.get();
nlohmann::json object;
object["name"] = "apple";
auto add_op = coll->add(object.dump());
LOG(INFO) << "add_op.error(): " << add_op.error();
ASSERT_TRUE(add_op.ok());
ASSERT_EQ("apple", add_op.get()["name"]);
ASSERT_EQ(384, add_op.get()["embedding"].size());
spp::sparse_hash_set<std::string> dummy_include_exclude;
auto search_res_op = coll->search("apple", {"name","embedding"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD, dummy_include_exclude, dummy_include_exclude, 10, "", 30, 4, "");
ASSERT_TRUE(search_res_op.ok());
auto search_res = search_res_op.get();
ASSERT_EQ(1, search_res["found"].get<size_t>());
ASSERT_EQ(1, search_res["hits"].size());
ASSERT_EQ("apple", search_res["hits"][0]["document"]["name"].get<std::string>());
ASSERT_EQ(384, search_res["hits"][0]["document"]["embedding"].size());
}
// TEST_F(CollectionTest, EmbedFielsTest) {
// nlohmann::json schema = R"({
// "name": "objects",
// "fields": [
// {"name": "name", "type": "string"},
// {"name": "embedding", "type":"float[]", "embed":{"from": ["name"]}
// ]
// })"_json;
// EmbedderManager::set_model_dir("/tmp/typesense_test/models");
//
// auto op = collectionManager.create_collection(schema);
// ASSERT_TRUE(op.ok());
// Collection* coll = op.get();
// nlohmann::json object = R"({
// "name": "apple"
// })"_json;
// auto embed_op = coll->embed_fields(object);
// ASSERT_TRUE(embed_op.ok());
// ASSERT_EQ("apple", object["name"]);
// ASSERT_EQ(384, object["embedding"].get<std::vector<float>>().size());
// }
TEST_F(CollectionTest, HybridSearchRankFusionTest) {
nlohmann::json schema = R"({
"name": "objects",
"fields": [
{"name": "name", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll = op.get();
nlohmann::json object;
object["name"] = "butter";
auto add_op = coll->add(object.dump());
ASSERT_TRUE(add_op.ok());
object["name"] = "butterball";
add_op = coll->add(object.dump());
ASSERT_TRUE(add_op.ok());
object["name"] = "butterfly";
add_op = coll->add(object.dump());
ASSERT_TRUE(add_op.ok());
spp::sparse_hash_set<std::string> dummy_include_exclude;
auto search_res_op = coll->search("butter", {"embedding"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD, dummy_include_exclude, dummy_include_exclude, 10, "", 30, 4, "");
ASSERT_TRUE(search_res_op.ok());
auto search_res = search_res_op.get();
ASSERT_EQ(3, search_res["found"].get<size_t>());
ASSERT_EQ(3, search_res["hits"].size());
// Vector search order:
// 1. butter
// 2. butterball
// 3. butterfly
ASSERT_EQ("butter", search_res["hits"][0]["document"]["name"].get<std::string>());
ASSERT_EQ("butterball", search_res["hits"][1]["document"]["name"].get<std::string>());
ASSERT_EQ("butterfly", search_res["hits"][2]["document"]["name"].get<std::string>());
search_res_op = coll->search("butter", {"name"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD, dummy_include_exclude, dummy_include_exclude, 10, "", 30, 4, "");
ASSERT_TRUE(search_res_op.ok());
search_res = search_res_op.get();
ASSERT_EQ(3, search_res["found"].get<size_t>());
ASSERT_EQ(3, search_res["hits"].size());
// Keyword search order:
// 1. butter
// 2. butterfly
// 3. butterball
ASSERT_EQ("butter", search_res["hits"][0]["document"]["name"].get<std::string>());
ASSERT_EQ("butterfly", search_res["hits"][1]["document"]["name"].get<std::string>());
ASSERT_EQ("butterball", search_res["hits"][2]["document"]["name"].get<std::string>());
search_res_op = coll->search("butter", {"name","embedding"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD, dummy_include_exclude, dummy_include_exclude, 10, "", 30, 4, "");
ASSERT_TRUE(search_res_op.ok());
search_res = search_res_op.get();
ASSERT_EQ(3, search_res["found"].get<size_t>());
ASSERT_EQ(3, search_res["hits"].size());
// Hybrid search with rank fusion order:
// 1. butter (1/1 * 0.7) + (1/1 * 0.3) = 1
// 2. butterfly (1/2 * 0.7) + (1/3 * 0.3) = 0.45
// 3. butterball (1/3 * 0.7) + (1/2 * 0.3) = 0.383
ASSERT_EQ("butter", search_res["hits"][0]["document"]["name"].get<std::string>());
ASSERT_EQ("butterfly", search_res["hits"][1]["document"]["name"].get<std::string>());
ASSERT_EQ("butterball", search_res["hits"][2]["document"]["name"].get<std::string>());
ASSERT_FLOAT_EQ((1.0/1.0 * 0.7) + (1.0/1.0 * 0.3), search_res["hits"][0]["hybrid_search_info"]["rank_fusion_score"].get<float>());
ASSERT_FLOAT_EQ((1.0/2.0 * 0.7) + (1.0/3.0 * 0.3), search_res["hits"][1]["hybrid_search_info"]["rank_fusion_score"].get<float>());
ASSERT_FLOAT_EQ((1.0/3.0 * 0.7) + (1.0/2.0 * 0.3), search_res["hits"][2]["hybrid_search_info"]["rank_fusion_score"].get<float>());
}
TEST_F(CollectionTest, WildcardSearchWithEmbeddingField) {
nlohmann::json schema = R"({
"name": "objects",
"fields": [
{"name": "name", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll = op.get();
spp::sparse_hash_set<std::string> dummy_include_exclude;
auto search_res_op = coll->search("*", {"name","embedding"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD, dummy_include_exclude, dummy_include_exclude, 10, "", 30, 4, "");
ASSERT_TRUE(search_res_op.ok());
}
TEST_F(CollectionTest, CreateModelDirIfNotExists) {
system("mkdir -p /tmp/typesense_test/new_models_dir");
system("rm -rf /tmp/typesense_test/new_models_dir");
EmbedderManager::set_model_dir("/tmp/typesense_test/new_models_dir");
// check if model dir is created
ASSERT_TRUE(std::filesystem::exists("/tmp/typesense_test/new_models_dir"));
}
TEST_F(CollectionTest, EmbedStringArrayField) {
nlohmann::json schema = R"({
"name": "objects",
"fields": [
{"name": "names", "type": "string[]"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["names"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll = op.get();
nlohmann::json doc;
doc["names"].push_back("butter");
doc["names"].push_back("butterfly");
doc["names"].push_back("butterball");
auto add_op = coll->add(doc.dump());
ASSERT_TRUE(add_op.ok());
}
TEST_F(CollectionTest, MissingFieldForEmbedding) {
nlohmann::json schema = R"({
"name": "objects",
"fields": [
{"name": "names", "type": "string[]"},
{"name": "category", "type": "string", "optional": true},
{"name": "embedding", "type":"float[]", "embed":{"from": ["names", "category"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll = op.get();
nlohmann::json doc;
doc["names"].push_back("butter");
doc["names"].push_back("butterfly");
doc["names"].push_back("butterball");
auto add_op = coll->add(doc.dump());
ASSERT_TRUE(add_op.ok());
}
TEST_F(CollectionTest, WrongTypeInEmbedFrom) {
nlohmann::json schema = R"({
"name": "objects",
"fields": [
{"name": "category", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": [1122], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto op = collectionManager.create_collection(schema);
ASSERT_FALSE(op.ok());
ASSERT_EQ("Property `embed.from` must contain only field names as strings.", op.error());
}
TEST_F(CollectionTest, WrongTypeForEmbedding) {
nlohmann::json schema = R"({
"name": "objects",
"fields": [
{"name": "category", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["category"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll = op.get();
nlohmann::json doc;
doc["category"] = 1;
auto add_op = validator_t::validate_embed_fields(doc, coll->get_embedding_fields(), coll->get_schema(), true);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `category` has malformed data.", add_op.error());
}
TEST_F(CollectionTest, WrongTypeOfElementForEmbeddingInStringArray) {
nlohmann::json schema = R"({
"name": "objects",
"fields": [
{"name": "category", "type": "string[]"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["category"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll = op.get();
nlohmann::json doc;
doc["category"].push_back(33);
auto add_op = validator_t::validate_embed_fields(doc, coll->get_embedding_fields(), coll->get_schema(), true);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `category` has malformed data.", add_op.error());
}
TEST_F(CollectionTest, UpdateEmbeddingsForUpdatedDocument) {
nlohmann::json schema = R"({
"name": "objects",
"fields": [
{"name": "name", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll = op.get();
nlohmann::json doc;
doc["name"] = "butter";
auto add_op = coll->add(doc.dump());
ASSERT_TRUE(add_op.ok());
// get embedding field
// get id of the document
auto id = add_op.get()["id"];
// get embedding field from the document
auto embedding_field = add_op.get()["embedding"].get<std::vector<float>>();
ASSERT_EQ(384, embedding_field.size());
// update the document
nlohmann::json update_doc;
update_doc["name"] = "butterball";
std::string dirty_values;
auto update_op = coll->update_matching_filter("id:=" + id.get<std::string>(), update_doc.dump(), dirty_values);
ASSERT_TRUE(update_op.ok());
ASSERT_EQ(1, update_op.get()["num_updated"]);
// get the document again
auto get_op = coll->get(id);
ASSERT_TRUE(get_op.ok());
auto updated_embedding_field = get_op.get()["embedding"].get<std::vector<float>>();
// check if the embedding field is updated
ASSERT_NE(embedding_field, updated_embedding_field);
}
TEST_F(CollectionTest, CreateCollectionWithOpenAI) {
nlohmann::json schema = R"({
"name": "objects",
"fields": [
{"name": "name", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"], "model_config": {"model_name": "openai/text-embedding-ada-002"}}}
]
})"_json;
if (std::getenv("api_key") == nullptr) {
LOG(INFO) << "Skipping test as api_key is not set.";
return;
}
auto api_key = std::string(std::getenv("api_key"));
schema["fields"][1]["embed"]["model_config"]["api_key"] = api_key;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
// create one more collection
schema = R"({
"name": "objects2",
"fields": [
{"name": "name", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"], "model_config": {"model_name": "openai/text-embedding-ada-002"}}}
]
})"_json;
schema["fields"][1]["embed"]["model_config"]["api_key"] = api_key;
op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
}
TEST_F(CollectionTest, CreateOpenAIEmbeddingField) {
nlohmann::json schema = R"({
"name": "objects",
"fields": [
{"name": "name", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"], "model_config": {"model_name": "openai/text-embedding-ada-002"}}}
]
})"_json;
if (std::getenv("api_key") == nullptr) {
LOG(INFO) << "Skipping test as api_key is not set.";
return;
}
auto api_key = std::string(std::getenv("api_key"));
schema["fields"][1]["embed"]["model_config"]["api_key"] = api_key;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
auto summary = op.get()->get_summary_json();
ASSERT_EQ("openai/text-embedding-ada-002", summary["fields"][1]["embed"]["model_config"]["model_name"]);
ASSERT_EQ(1536, summary["fields"][1]["num_dim"]);
nlohmann::json doc;
doc["name"] = "butter";
auto add_op = op.get()->add(doc.dump());
ASSERT_TRUE(add_op.ok());
ASSERT_EQ(1536, add_op.get()["embedding"].size());
}
TEST_F(CollectionTest, HideOpenAIApiKey) {
nlohmann::json schema = R"({
"name": "objects",
"fields": [
{"name": "name", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"], "model_config": {"model_name": "openai/text-embedding-ada-002"}}}
]
})"_json;
if (std::getenv("api_key") == nullptr) {
LOG(INFO) << "Skipping test as api_key is not set.";
return;
}
auto api_key = std::string(std::getenv("api_key"));
schema["fields"][1]["embed"]["model_config"]["api_key"] = api_key;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
auto summary = op.get()->get_summary_json();
// hide api key with * after first 3 characters
ASSERT_EQ(summary["fields"][1]["embed"]["model_config"]["api_key"].get<std::string>(), api_key.replace(5, api_key.size() - 5, api_key.size() - 5, '*'));
}
TEST_F(CollectionTest, PrefixSearchDisabledForOpenAI) {
nlohmann::json schema = R"({
"name": "objects",
"fields": [
{"name": "name", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"], "model_config": {"model_name": "openai/text-embedding-ada-002"}}}
]
})"_json;
if (std::getenv("api_key") == nullptr) {
LOG(INFO) << "Skipping test as api_key is not set.";
return;
}
auto api_key = std::string(std::getenv("api_key"));
schema["fields"][1]["embed"]["model_config"]["api_key"] = api_key;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
nlohmann::json doc;
doc["name"] = "butter";
auto add_op = op.get()->add(doc.dump());
ASSERT_TRUE(add_op.ok());
spp::sparse_hash_set<std::string> dummy_include_exclude;
auto search_res_op = op.get()->search("dummy", {"embedding"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD, dummy_include_exclude, dummy_include_exclude, 10, "", 30, 4, "");
ASSERT_FALSE(search_res_op.ok());
ASSERT_EQ("Prefix search is not supported for remote embedders. Please set `prefix=false` as an additional search parameter to disable prefix searching.", search_res_op.error());
search_res_op = op.get()->search("dummy", {"embedding"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, Index::DROP_TOKENS_THRESHOLD, dummy_include_exclude, dummy_include_exclude, 10, "", 30, 4, "");
ASSERT_TRUE(search_res_op.ok());
}
TEST_F(CollectionTest, MoreThanOneEmbeddingField) {
nlohmann::json schema = R"({
"name": "objects",
"fields": [
{"name": "name", "type": "string"},
{"name": "name2", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"], "model_config": {"model_name": "ts/e5-small"}}},
{"name": "embedding2", "type":"float[]", "embed":{"from": ["name2"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
auto coll = op.get();
nlohmann::json doc;
doc["name"] = "butter";
doc["name2"] = "butterball";
auto add_op = validator_t::validate_embed_fields(doc, op.get()->get_embedding_fields(), op.get()->get_schema(), true);
ASSERT_TRUE(add_op.ok());
spp::sparse_hash_set<std::string> dummy_include_exclude;
auto search_res_op = coll->search("butter", {"name", "embedding", "embedding2"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD, dummy_include_exclude, dummy_include_exclude, 10, "", 30, 4, "");
ASSERT_FALSE(search_res_op.ok());
ASSERT_EQ("Only one embedding field is allowed in the query.", search_res_op.error());
}
TEST_F(CollectionTest, EmbeddingFieldEmptyArrayInDocument) {
nlohmann::json schema = R"({
"name": "objects",
"fields": [
{"name": "names", "type": "string[]"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["names"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
auto coll = op.get();
nlohmann::json doc;
doc["names"] = nlohmann::json::array();
// try adding
auto add_op = coll->add(doc.dump());
ASSERT_TRUE(add_op.ok());
ASSERT_TRUE(add_op.get()["embedding"].is_null());
// try updating
auto id = add_op.get()["id"];
doc["names"].push_back("butter");
std::string dirty_values;
auto update_op = coll->update_matching_filter("id:=" + id.get<std::string>(), doc.dump(), dirty_values);
ASSERT_TRUE(update_op.ok());
ASSERT_EQ(1, update_op.get()["num_updated"]);
auto get_op = coll->get(id);
ASSERT_TRUE(get_op.ok());
ASSERT_FALSE(get_op.get()["embedding"].is_null());
ASSERT_EQ(384, get_op.get()["embedding"].size());
}
TEST_F(CollectionTest, CatchPartialResponseFromRemoteEmbedding) {
std::string partial_json = R"({
"results": [
{
"embedding": [
0.0,
0.0,
0.0
],
"text": "butter"
},
{
"embedding": [
0.0,
0.0,
0.0
],
"text": "butterball"
},
{
"embedding": [
0.0,
0.0)";
nlohmann::json req_body = R"({
"inputs": [
"butter",
"butterball",
"butterfly"
]
})"_json;
OpenAIEmbedder embedder("", "", 0, false, "");
auto res = embedder.get_error_json(req_body, 200, partial_json);
ASSERT_EQ(res["response"]["error"], "Malformed response from OpenAI API.");
ASSERT_EQ(res["request"]["body"], req_body);
}
| 228,234
|
C++
|
.cpp
| 4,075
| 46.74773
| 224
| 0.568033
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,715
|
collection_infix_search_test.cpp
|
typesense_typesense/test/collection_infix_search_test.cpp
|
#include <gtest/gtest.h>
#include <string>
#include <vector>
#include <fstream>
#include <algorithm>
#include <collection_manager.h>
#include "collection.h"
class CollectionInfixSearchTest : public ::testing::Test {
protected:
Store *store;
CollectionManager & collectionManager = CollectionManager::get_instance();
std::atomic<bool> quit = false;
std::vector<std::string> query_fields;
std::vector<sort_by> sort_fields;
void setupCollection() {
std::string state_dir_path = "/tmp/typesense_test/collection_infix";
LOG(INFO) << "Truncating and creating: " << state_dir_path;
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
store = new Store(state_dir_path);
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
}
virtual void SetUp() {
setupCollection();
}
virtual void TearDown() {
collectionManager.dispose();
delete store;
}
};
TEST_F(CollectionInfixSearchTest, InfixBasics) {
std::vector<field> fields = {field("title", field_types::STRING, false, false, true, "", -1, 1),
field("points", field_types::INT32, false),
field("non_infix", field_types::STRING, true)};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "GH100037IN8900X";
doc["points"] = 100;
doc["non_infix"] = "foobar";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto response = coll1->search("bar",
{"non_infix"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {always});
ASSERT_FALSE(response.ok());
ASSERT_EQ("Could not find `non_infix` in the infix index."
" Make sure to enable infix search by specifying `infix: true` in the schema.", response.error());
auto results = coll1->search("100037",
{"title"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {always}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(1, results["hits"][0]["highlights"].size());
ASSERT_EQ("title", results["hits"][0]["highlights"][0]["field"].get<std::string>());
ASSERT_EQ("<mark>GH100037IN8900X</mark>", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
ASSERT_EQ("<mark>GH100037IN8900X</mark>", results["hits"][0]["highlights"][0]["value"].get<std::string>());
// verify off behavior
results = coll1->search("100037",
{"title"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}).get();
ASSERT_EQ(0, results["found"].get<size_t>());
ASSERT_EQ(0, results["hits"].size());
// when fallback is used, only the prefix result is returned
doc["id"] = "1";
doc["title"] = "100037SG7120X";
doc["points"] = 100;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
results = coll1->search("100037",
{"title"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {fallback}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("1", results["hits"][0]["document"]["id"].get<std::string>().c_str());
// always behavior: both prefix and infix matches are returned but ranked below prefix match
results = coll1->search("100037",
{"title"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {always}).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
ASSERT_STREQ("1", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("0", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_TRUE(results["hits"][0]["text_match"].get<size_t>() > results["hits"][1]["text_match"].get<size_t>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionInfixSearchTest, InfixOnArray) {
std::vector<field> fields = {field("model_numbers", field_types::STRING_ARRAY, false, false, true, "", -1, 1),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc;
doc["id"] = "0";
doc["model_numbers"] = {"GH100037IN8900X", "GH100047IN8900X", "GH100057IN8900X"};
doc["points"] = 100;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("47in",
{"model_numbers"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "model_numbers", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {always}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(1, results["hits"][0]["highlights"].size());
ASSERT_EQ("model_numbers", results["hits"][0]["highlights"][0]["field"].get<std::string>());
ASSERT_EQ("<mark>GH100047IN8900X</mark>", results["hits"][0]["highlights"][0]["snippets"][0].get<std::string>());
ASSERT_EQ("<mark>GH100047IN8900X</mark>", results["hits"][0]["highlights"][0]["values"][0].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionInfixSearchTest, InfixNoMatchButRegularHighlight) {
std::vector<field> fields = {field("title", field_types::STRING, false, false, true, "", -1, 1),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "White Bread";
doc["points"] = 100;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("brown bread",
{"title"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "model_numbers", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {always}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(1, results["hits"][0]["highlights"].size());
ASSERT_EQ("White <mark>Bread</mark>", results["hits"][0]["highlight"]["title"]["snippet"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionInfixSearchTest, InfixWithFiltering) {
std::vector<field> fields = {field("title", field_types::STRING, false, false, true, "", -1, 1),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "GH100037IN8900X";
doc1["points"] = 100;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["title"] = "XH100037IN8900X";
doc2["points"] = 200;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
auto results = coll1->search("37in8",
{"title"}, "points: 200", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, "", "", {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {always}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("1", results["hits"][0]["document"]["id"].get<std::string>().c_str());
// filtering + exclusion via curation
nlohmann::json doc3;
doc3["id"] = "2";
doc3["title"] = "RH100037IN8900X";
doc3["points"] = 300;
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
results = coll1->search("37IN8", {"title"}, "points:>= 200", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, "", "2", {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {always}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("1", results["hits"][0]["document"]["id"].get<std::string>().c_str());
auto schema_json =
R"({
"name": "Foods",
"fields": [
{"name": "title", "type": "string", "infix": true},
{"name": "summary", "type": "string", "infix": true},
{"name": "rating", "type": "int32"}
]
})"_json;
std::vector<nlohmann::json> documents = {
R"({
"title": "Spicy Chicken Tacos",
"summary": "These are tacos made with spicy chicken fillings.",
"rating": 2
})"_json,
R"({
"title": "Salad With Taco Toppings",
"summary": "Healthy salad with taco seasoning topping.",
"rating": 3
})"_json,
R"({
"title": "Beef Street Tacos",
"summary": "Just like eating in Mexico!",
"rating": 1
})"_json,
R"({
"title": "Bean Burritos",
"summary": "Home made beans wrapped in a tortilla.",
"rating": 3
})"_json,
R"({
"title": "Cheese Enchiladas",
"summary": "Fresh cheese tortilla wrapped and baked.",
"rating": 2
})"_json,
R"({
"title": "Green Sauce Tacoquitos",
"summary": "Deep fried tacos covered in green sauce.",
"rating": 5
})"_json,
R"({
"title": "Susan's SuperTacosSupereme",
"summary": "The famous chef Susan Pancakey's taco supreme.",
"rating": 1
})"_json,
};
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
std::map<std::string, std::string> req_params = {
{"collection", "Foods"},
{"q", "taco"},
{"query_by", "title,summary"},
{"infix", "always,always"},
{"filter_by", "rating:>=2 && rating:<=4"},
};
nlohmann::json embedded_params;
std::string json_res;
auto now_ts = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
auto search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
nlohmann::json result = nlohmann::json::parse(json_res);
ASSERT_EQ(2, result["found"].get<size_t>());
ASSERT_EQ(2, result["hits"].size());
ASSERT_EQ("1", result["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ(3, result["hits"][0]["document"]["rating"].get<std::int32_t>());
ASSERT_EQ("Salad With Taco Toppings", result["hits"][0]["document"]["title"].get<std::string>());
ASSERT_EQ("0", result["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ(2, result["hits"][1]["document"]["rating"].get<std::int32_t>());
ASSERT_EQ("Spicy Chicken Tacos", result["hits"][1]["document"]["title"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionInfixSearchTest, RespectPrefixAndSuffixLimits) {
std::vector<field> fields = {field("title", field_types::STRING, false, false, true, "", -1, 1),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "GH100037IN8900X";
doc["points"] = 100;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "1";
doc["title"] = "X100037SG89007120X";
doc["points"] = 100;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
// check extra prefixes
auto results = coll1->search("100037",
{"title"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {always}, 1).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("1", results["hits"][0]["document"]["id"].get<std::string>().c_str());
results = coll1->search("100037",
{"title"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {always}, 2).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
ASSERT_STREQ("1", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("0", results["hits"][1]["document"]["id"].get<std::string>().c_str());
// check extra suffixes
results = coll1->search("8900",
{"title"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {always}, INT16_MAX, 2).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
results = coll1->search("8900",
{"title"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {always}, INT16_MAX, 5).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
ASSERT_STREQ("1", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("0", results["hits"][1]["document"]["id"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionInfixSearchTest, InfixSpecificField) {
std::vector<field> fields = {field("title", field_types::STRING, false, false, true, "", -1, 1),
field("description", field_types::STRING, false, false, true, "", -1, 1),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "GH100037IN8900X";
doc["description"] = "foobar";
doc["points"] = 100;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "1";
doc["title"] = "foobar";
doc["description"] = "GH100037IN8900X";
doc["points"] = 100;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("100037",
{"title", "description"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {always, off}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
results = coll1->search("100037",
{"title", "description"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off, always}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("1", results["hits"][0]["document"]["id"].get<std::string>().c_str());
// highlight infix match only on infix-searched field
doc["id"] = "2";
doc["title"] = "fuzzbuzz HYU16736GY6372";
doc["description"] = "HYU16736GY6372";
doc["points"] = 100;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
results = coll1->search("16736",
{"title", "description"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off, always}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("2", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ(1, results["hits"][0]["highlights"].size());
ASSERT_EQ("description", results["hits"][0]["highlights"][0]["field"].get<std::string>());
ASSERT_EQ("<mark>HYU16736GY6372</mark>", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
ASSERT_FALSE(results["hits"][0]["highlights"][0].contains("value"));
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionInfixSearchTest, InfixOneOfManyFields) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "content", "type": "object"},
{"name": "data.title", "type": "string"},
{"name": "data.idClient", "type": "string"},
{"name": "data.jobNumber", "type": "string", "infix": true}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc = R"(
{
"data": {
"idFS": "xx",
"jobNumber": "XX_XX-EG00907",
"idClient": "862323",
"title": "my title"
},
"content": {
"task": "my task",
"description": "my description",
"status": "my status"
}
})"_json;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("EG00907",
{"data.title", "content", "data.idClient", "data.jobNumber"}, "", {}, {}, {0},
3, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off, off, off, always}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
}
TEST_F(CollectionInfixSearchTest, InfixDeleteAndUpdate) {
std::vector<field> fields = {field("title", field_types::STRING, false, false, true, "", -1, 1),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "GH100037IN8900X";
doc["points"] = 100;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("100037",
{"title"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {always}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
coll1->remove("0");
for(size_t i = 0; i < coll1->_get_index()->_get_infix_index().at("title").size(); i++) {
ASSERT_EQ(0, coll1->_get_index()->_get_infix_index().at("title").at(i)->size());
}
results = coll1->search("100037",
{"title"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {always}).get();
ASSERT_EQ(0, results["found"].get<size_t>());
ASSERT_EQ(0, results["hits"].size());
// add the document again and then update it
ASSERT_TRUE(coll1->add(doc.dump()).ok());
results = coll1->search("100037",
{"title"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {always}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
doc["title"] = "YHD3342D78912";
ASSERT_TRUE(coll1->add(doc.dump(), UPSERT).ok());
results = coll1->search("342D78",
{"title"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {always}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
results = coll1->search("100037",
{"title"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {always}).get();
ASSERT_EQ(0, results["found"].get<size_t>());
ASSERT_EQ(0, results["hits"].size());
std::string key = "yhd3342d78912";
auto strhash = StringUtils::hash_wy(key.c_str(), key.size());
const auto& infix_sets = coll1->_get_index()->_get_infix_index().at("title");
ASSERT_EQ(1, infix_sets[strhash % 4]->size());
for(size_t i = 0; i < infix_sets.size(); i++) {
if(i != strhash % 4) {
ASSERT_EQ(0, infix_sets[i]->size());
}
}
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionInfixSearchTest, MultiFieldInfixSearch) {
std::vector<field> fields = {field("title", field_types::STRING, false, false, true, "", -1, 1),
field("mpn", field_types::STRING, false, false, true, "", -1, 1),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "100037 Shoe";
doc["mpn"] = "HYDGHSGAH";
doc["points"] = 100;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "1";
doc["title"] = "Running Shoe";
doc["mpn"] = "GHX100037IN";
doc["points"] = 100;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("100037",
{"title", "mpn"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {always}).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionInfixSearchTest, DeleteDocWithInfixIndex) {
std::vector<field> fields = {field("title", field_types::STRING, false, false, true, "", -1, 1),
field("mpn", field_types::STRING, false, false, true, "", -1, 1),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "Running Shoe";
doc["mpn"] = "HYDGHSGAH";
doc["points"] = 100;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "1";
doc["title"] = "Running Band";
doc["mpn"] = "GHX100037IN";
doc["points"] = 100;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("nni",
{"title"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {always}).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
// drop one document
coll1->remove("0");
// search again
results = coll1->search("nni",
{"title"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {always}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("1", results["hits"][0]["document"]["id"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
| 30,831
|
C++
|
.cpp
| 542
| 43.774908
| 122
| 0.494795
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,716
|
id_list_test.cpp
|
typesense_typesense/test/id_list_test.cpp
|
#include <gtest/gtest.h>
#include <id_list.h>
#include "logger.h"
TEST(IdListTest, IdListIteratorTest) {
id_list_t id_list(2);
for(size_t i = 0; i < 10; i++) {
id_list.upsert(i*2);
}
auto iter = id_list.new_iterator();
for(size_t i = 0; i < 10; i++) {
iter.skip_to(i*2);
ASSERT_EQ(i*2, iter.id());
ASSERT_TRUE(iter.valid());
}
iter.skip_to(19);
ASSERT_FALSE(iter.valid());
auto iter2 = id_list.new_iterator();
size_t count = 0;
while(iter2.valid()) {
iter2.next();
count++;
}
ASSERT_EQ(10, count);
ASSERT_FALSE(iter2.valid());
}
TEST(IdListTest, IdListIntersectionTest) {
id_list_t id_list(2);
size_t res_len = 10*1000;
for (size_t i = 0; i < 1000; i++) {
id_list.upsert(i * 2);
}
// large res_ids + small id_list
uint32_t* res_ids = new uint32_t[res_len];
for(size_t i = 1; i < res_len; i++) {
res_ids[i] = (rand() % (res_len*10));
}
std::sort(res_ids, res_ids + res_len);
auto count = id_list.intersect_count(res_ids, res_len, false, 0);
ASSERT_NE(0, count);
delete [] res_ids;
}
| 1,161
|
C++
|
.cpp
| 41
| 22.95122
| 69
| 0.561767
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,717
|
collection_sorting_test.cpp
|
typesense_typesense/test/collection_sorting_test.cpp
|
#include <gtest/gtest.h>
#include <string>
#include <vector>
#include <fstream>
#include <algorithm>
#include <collection_manager.h>
#include "collection.h"
class CollectionSortingTest : public ::testing::Test {
protected:
Store *store;
CollectionManager & collectionManager = CollectionManager::get_instance();
std::atomic<bool> quit = false;
std::vector<std::string> query_fields;
std::vector<sort_by> sort_fields;
void setupCollection() {
std::string state_dir_path = "/tmp/typesense_test/collection_sorting";
LOG(INFO) << "Truncating and creating: " << state_dir_path;
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
store = new Store(state_dir_path);
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
}
virtual void SetUp() {
setupCollection();
}
virtual void TearDown() {
collectionManager.dispose();
delete store;
}
};
TEST_F(CollectionSortingTest, SortingOrder) {
Collection *coll_mul_fields;
std::ifstream infile(std::string(ROOT_DIR)+"test/multi_field_documents.jsonl");
std::vector<field> fields = {field("title", field_types::STRING, false),
field("starring", field_types::STRING, false),
field("points", field_types::INT32, false),
field("cast", field_types::STRING_ARRAY, false)};
coll_mul_fields = collectionManager.get_collection("coll_mul_fields").get();
if(coll_mul_fields == nullptr) {
coll_mul_fields = collectionManager.create_collection("coll_mul_fields", 4, fields, "points").get();
}
std::string json_line;
while (std::getline(infile, json_line)) {
coll_mul_fields->add(json_line);
}
infile.close();
query_fields = {"title"};
std::vector<std::string> facets;
sort_fields = { sort_by("points", "ASC") };
nlohmann::json results = coll_mul_fields->search("the", query_fields, "", facets, sort_fields, {0}, 15, 1, FREQUENCY, {false}).get();
ASSERT_EQ(10, results["hits"].size());
std::vector<std::string> ids = {"17", "13", "10", "4", "0", "1", "8", "6", "16", "11"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// limiting results to just 5, "ASC" keyword must be case insensitive
sort_fields = { sort_by("points", "asc") };
results = coll_mul_fields->search("the", query_fields, "", facets, sort_fields, {0}, 5, 1, FREQUENCY, {false}).get();
ASSERT_EQ(5, results["hits"].size());
ids = {"17", "13", "10", "4", "0"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// desc
sort_fields = { sort_by("points", "dEsc") };
results = coll_mul_fields->search("the", query_fields, "", facets, sort_fields, {0}, 15, 1, FREQUENCY, {false}).get();
ASSERT_EQ(10, results["hits"].size());
ids = {"11", "16", "6", "8", "1", "0", "10", "4", "13", "17"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// With empty list of sort_by fields:
// should be ordered desc on the default sorting field, since the match score will be the same for all records.
sort_fields = { };
results = coll_mul_fields->search("of", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(5, results["hits"].size());
ids = {"11", "12", "5", "4", "17"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
collectionManager.drop_collection("coll_mul_fields");
}
TEST_F(CollectionSortingTest, DefaultSortingFieldValidations) {
// Default sorting field must be a numerical field
std::vector<field> fields = {field("name", field_types::STRING, false),
field("tags", field_types::STRING_ARRAY, true),
field("age", field_types::INT32, false),
field("in_stock", field_types::BOOL, false),
field("average", field_types::INT32, false) };
std::vector<sort_by> sort_fields = { sort_by("age", "DESC"), sort_by("average", "DESC") };
Option<Collection*> collection_op = collectionManager.create_collection("sample_collection", 4, fields, "name");
ASSERT_FALSE(collection_op.ok());
ASSERT_EQ("Default sorting field `name` is not a sortable type.", collection_op.error());
collectionManager.drop_collection("sample_collection");
// Default sorting field must exist as a field in schema
sort_fields = { sort_by("age", "DESC"), sort_by("average", "DESC") };
collection_op = collectionManager.create_collection("sample_collection", 4, fields, "NOT-DEFINED");
ASSERT_FALSE(collection_op.ok());
ASSERT_EQ("Default sorting field is defined as `NOT-DEFINED` but is not found in the schema.", collection_op.error());
collectionManager.drop_collection("sample_collection");
// must be able to use boolean field as default sorting field
collection_op = collectionManager.create_collection("sample_collection", 4, fields, "in_stock");
ASSERT_TRUE(collection_op.ok());
auto coll = collection_op.get();
nlohmann::json doc;
doc["name"] = "Example";
doc["tags"] = {"example"};
doc["age"] = 100;
doc["in_stock"] = true;
doc["average"] = 45;
ASSERT_TRUE(coll->add(doc.dump()).ok());
}
TEST_F(CollectionSortingTest, NoDefaultSortingField) {
Collection *coll1;
std::ifstream infile(std::string(ROOT_DIR)+"test/documents.jsonl");
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields).get();
}
std::string json_line;
while (std::getline(infile, json_line)) {
coll1->add(json_line);
}
infile.close();
// without a default sorting field, matches should be sorted by (text_match, seq_id)
auto results = coll1->search("rocket", {"title"}, "", {}, {}, {1}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(4, results["found"].get<size_t>());
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ(24, results["out_of"]);
std::vector<std::string> ids = {"16", "15", "7", "0"};
for(size_t i=0; i < results["hits"].size(); i++) {
ASSERT_EQ(ids[i], results["hits"][i]["document"]["id"].get<std::string>());
}
// try removing a document and doing wildcard (tests the seq_id array used for wildcard searches)
auto remove_op = coll1->remove("0");
ASSERT_TRUE(remove_op.ok());
results = coll1->search("*", {}, "", {}, {}, {1}, 30, 1, FREQUENCY, {false}).get();
ASSERT_EQ(23, results["found"].get<size_t>());
ASSERT_EQ(23, results["hits"].size());
ASSERT_EQ(23, results["out_of"]);
for(size_t i=23; i >= 1; i--) {
std::string doc_id = (i == 4) ? "foo" : std::to_string(i);
ASSERT_EQ(doc_id, results["hits"][23 - i]["document"]["id"].get<std::string>());
}
}
TEST_F(CollectionSortingTest, FrequencyOrderedTokensWithoutDefaultSortingField) {
// when no default sorting field is provided, tokens must be ordered on frequency
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields).get();
}
// since only top 4 tokens are fetched for prefixes, the "enyzme" should not show up in the results
std::vector<std::string> tokens = {
"enter", "elephant", "enamel", "ercot", "enyzme", "energy",
"epoch", "epyc", "express", "everest", "end"
};
for(size_t i = 0; i < tokens.size(); i++) {
size_t num_repeat = tokens.size() - i;
std::string title = tokens[i];
for(size_t j = 0; j < num_repeat; j++) {
nlohmann::json doc;
doc["title"] = title;
doc["points"] = num_repeat;
coll1->add(doc.dump());
}
}
// max candidates as default 4
auto results = coll1->search("e", {"title"}, "", {}, {}, {0}, 100, 1, NOT_SET, {true}).get();
// [11 + 10 + 9 + 8] + 7 + 6 + 5 + 4 + 3 + 2
ASSERT_EQ(38, results["found"].get<size_t>());
// we have to ensure that no result contains the word "end" since it occurs least number of times
bool found_end = false;
for(auto& res: results["hits"].items()) {
if(res.value()["document"]["title"] == "enyzme") {
found_end = true;
}
}
// 2 candidates
results = coll1->search("e", {"title"}, "", {}, {}, {0}, 100, 1, NOT_SET, {true},
0, spp::sparse_hash_set<std::string>(), spp::sparse_hash_set<std::string>(),
10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
off, 2).get();
// [11 + 10] + 9 + 8 + 7 + 6 + 5 + 4 + 3 + 2
ASSERT_EQ(21, results["found"].get<size_t>());
ASSERT_FALSE(found_end);
}
TEST_F(CollectionSortingTest, TokenOrderingOnFloatValue) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::FLOAT, false)};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::string> tokens = {
"enter", "elephant", "enamel", "ercot", "enyzme", "energy",
"epoch", "epyc", "express", "everest", "end"
};
for(size_t i = 0; i < tokens.size(); i++) {
std::string title = tokens[i];
float fpoint = (0.01 * i);
nlohmann::json doc;
doc["title"] = title;
doc["points"] = fpoint;
coll1->add(doc.dump());
}
auto results = coll1->search("e", {"title"}, "", {}, {}, {0}, 3, 1, MAX_SCORE, {true}).get();
ASSERT_EQ("10", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("9", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("8", results["hits"][2]["document"]["id"].get<std::string>());
}
TEST_F(CollectionSortingTest, Int64AsDefaultSortingField) {
Collection *coll_mul_fields;
std::ifstream infile(std::string(ROOT_DIR)+"test/multi_field_documents.jsonl");
std::vector<field> fields = {field("title", field_types::STRING, false),
field("starring", field_types::STRING, false),
field("points", field_types::INT64, false),
field("cast", field_types::STRING_ARRAY, false)};
coll_mul_fields = collectionManager.get_collection("coll_mul_fields").get();
if(coll_mul_fields == nullptr) {
coll_mul_fields = collectionManager.create_collection("coll_mul_fields", 4, fields, "points").get();
}
auto doc_str1 = "{\"title\": \"foo\", \"starring\": \"bar\", \"points\": 343234324234233234, \"cast\": [\"baz\"] }";
const Option<nlohmann::json> & add_op = coll_mul_fields->add(doc_str1);
ASSERT_TRUE(add_op.ok());
auto doc_str2 = "{\"title\": \"foo\", \"starring\": \"bar\", \"points\": 343234324234233232, \"cast\": [\"baz\"] }";
auto doc_str3 = "{\"title\": \"foo\", \"starring\": \"bar\", \"points\": 343234324234233235, \"cast\": [\"baz\"] }";
auto doc_str4 = "{\"title\": \"foo\", \"starring\": \"bar\", \"points\": 343234324234233231, \"cast\": [\"baz\"] }";
coll_mul_fields->add(doc_str2);
coll_mul_fields->add(doc_str3);
coll_mul_fields->add(doc_str4);
query_fields = {"title"};
std::vector<std::string> facets;
sort_fields = { sort_by("points", "ASC") };
nlohmann::json results = coll_mul_fields->search("foo", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(4, results["hits"].size());
std::vector<std::string> ids = {"3", "1", "0", "2"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// DESC
sort_fields = { sort_by("points", "desc") };
results = coll_mul_fields->search("foo", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(4, results["hits"].size());
ids = {"2", "0", "1", "3"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
}
TEST_F(CollectionSortingTest, SortOnFloatFields) {
Collection *coll_float_fields;
std::ifstream infile(std::string(ROOT_DIR)+"test/float_documents.jsonl");
std::vector<field> fields = {
field("title", field_types::STRING, false),
field("score", field_types::FLOAT, false),
field("average", field_types::FLOAT, false)
};
std::vector<sort_by> sort_fields_desc = { sort_by("score", "DESC"), sort_by("average", "DESC") };
coll_float_fields = collectionManager.get_collection("coll_float_fields").get();
if(coll_float_fields == nullptr) {
coll_float_fields = collectionManager.create_collection("coll_float_fields", 4, fields, "score").get();
}
std::string json_line;
while (std::getline(infile, json_line)) {
coll_float_fields->add(json_line);
}
infile.close();
query_fields = {"title"};
std::vector<std::string> facets;
nlohmann::json results = coll_float_fields->search("Jeremy", query_fields, "", facets, sort_fields_desc, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(7, results["hits"].size());
std::vector<std::string> ids = {"2", "0", "3", "1", "5", "4", "6"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
std::vector<sort_by> sort_fields_asc = { sort_by("score", "ASC"), sort_by("average", "ASC") };
results = coll_float_fields->search("Jeremy", query_fields, "", facets, sort_fields_asc, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(7, results["hits"].size());
ids = {"6", "4", "5", "1", "3", "0", "2"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
EXPECT_STREQ(id.c_str(), result_id.c_str());
}
// second field by desc
std::vector<sort_by> sort_fields_asc_desc = { sort_by("score", "ASC"), sort_by("average", "DESC") };
results = coll_float_fields->search("Jeremy", query_fields, "", facets, sort_fields_asc_desc, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(7, results["hits"].size());
ids = {"5", "4", "6", "1", "3", "0", "2"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
EXPECT_STREQ(id.c_str(), result_id.c_str());
}
collectionManager.drop_collection("coll_float_fields");
}
TEST_F(CollectionSortingTest, ThreeSortFieldsLimit) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),
field("average", field_types::INT32, false),
field("min", field_types::INT32, false),
field("max", field_types::INT32, false),
};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "points").get();
}
nlohmann::json doc1;
doc1["id"] = "100";
doc1["title"] = "The quick brown fox";
doc1["points"] = 25;
doc1["average"] = 25;
doc1["min"] = 25;
doc1["max"] = 25;
coll1->add(doc1.dump());
std::vector<sort_by> sort_fields_desc = {
sort_by("points", "DESC"),
sort_by("average", "DESC"),
sort_by("max", "DESC"),
sort_by("min", "DESC"),
};
query_fields = {"title"};
auto res_op = coll1->search("the", query_fields, "", {}, sort_fields_desc, {0}, 10, 1, FREQUENCY, {false});
ASSERT_FALSE(res_op.ok());
ASSERT_STREQ("Only upto 3 sort_by fields can be specified.", res_op.error().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSortingTest, ThreeSortFieldsTextMatchLast) {
Collection *coll1;
std::vector<field> fields = { field("title", field_types::STRING, false),
field("artist", field_types::STRING, false),
field("popularity", field_types::INT32, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"Coby Grant", "100"}, // text_match: 33684577
{"Coby Prant", "84642"}, // text_match: 129377
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["artist"] = records[i][0];
doc["popularity"] = std::stoi(records[i][1]);
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
std::vector<sort_by> sort_fields = { sort_by("popularity", "DESC"), sort_by("points", "DESC"), sort_by(sort_field_const::text_match, "DESC") };
auto res = coll1->search("grant",
{"title","artist"}, "", {}, sort_fields, {1}, 10, 1, FREQUENCY, {false}, 10,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10).get();
ASSERT_EQ(2, res["found"].get<size_t>());
ASSERT_STREQ("1", res["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("0", res["hits"][1]["document"]["id"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSortingTest, SingleFieldTextMatchScoreDefault) {
// when queried with a single field, _text_match score should be used implicitly as the second sorting field
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"Alppha Beta"},
{"Alpha Beta"},
{"Alphas Beta"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["points"] = 100;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
std::vector<sort_by> sort_fields = { sort_by("points", "DESC") };
auto results = coll1->search("alpha",
{"title"}, "", {}, sort_fields, {2}, 10, 1, FREQUENCY,
{false}, 10,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10).get();
ASSERT_EQ(3, results["found"].get<size_t>());
ASSERT_EQ(3, results["hits"].size());
ASSERT_STREQ("1", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("2", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("0", results["hits"][2]["document"]["id"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSortingTest, NegativeInt64Value) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT64, false),
};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "points").get();
}
nlohmann::json doc1;
doc1["id"] = "100";
doc1["title"] = "The quick brown fox";
doc1["points"] = -2678400;
coll1->add(doc1.dump());
std::vector<sort_by> sort_fields_desc = {
sort_by("points", "DESC")
};
query_fields = {"title"};
auto res = coll1->search("*", query_fields, "points:>=1577836800", {}, sort_fields_desc, {0}, 10, 1, FREQUENCY,
{false}).get();
ASSERT_EQ(0, res["found"].get<size_t>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSortingTest, GeoPointSorting) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("loc", field_types::GEOPOINT, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"Palais Garnier", "48.872576479306765, 2.332291112241466"},
{"Sacre Coeur", "48.888286721920934, 2.342340862419206"},
{"Arc de Triomphe", "48.87538726829884, 2.296113163780903"},
{"Place de la Concorde", "48.86536119187326, 2.321850747347093"},
{"Louvre Musuem", "48.86065813197502, 2.3381285349616725"},
{"Les Invalides", "48.856648379569904, 2.3118555692631357"},
{"Eiffel Tower", "48.85821022164442, 2.294239067890161"},
{"Notre-Dame de Paris", "48.852455825574495, 2.35071182406452"},
{"Musee Grevin", "48.872370541246816, 2.3431536410008906"},
{"Pantheon", "48.84620987789056, 2.345152755563131"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
std::vector<std::string> lat_lng;
StringUtils::split(records[i][1], lat_lng, ", ");
double lat = std::stod(lat_lng[0]);
double lng = std::stod(lat_lng[1]);
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["loc"] = {lat, lng};
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
// pick a large radius covering all points, with a point close to Pantheon
std::vector<sort_by> geo_sort_fields = {
sort_by("loc(48.84442912268208, 2.3490714964332353)", "ASC")
};
auto results = coll1->search("*",
{}, "loc: (48.84442912268208, 2.3490714964332353, 20km)",
{}, geo_sort_fields, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(10, results["found"].get<size_t>());
std::vector<std::string> expected_ids = {
"9", "7", "4", "5", "3", "8", "0", "6", "1", "2"
};
for(size_t i=0; i < expected_ids.size(); i++) {
ASSERT_STREQ(expected_ids[i].c_str(), results["hits"][i]["document"]["id"].get<std::string>().c_str());
}
ASSERT_EQ(348, results["hits"][0]["geo_distance_meters"]["loc"].get<int>());
ASSERT_EQ(900, results["hits"][1]["geo_distance_meters"]["loc"].get<int>());
ASSERT_EQ(1973, results["hits"][2]["geo_distance_meters"]["loc"].get<int>());
// desc, without filter
geo_sort_fields = {
sort_by("loc(48.84442912268208, 2.3490714964332353)", "DESC")
};
results = coll1->search("*",
{}, "",
{}, geo_sort_fields, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(10, results["found"].get<size_t>());
for(size_t i=0; i < expected_ids.size(); i++) {
ASSERT_STREQ(expected_ids[expected_ids.size() - 1 - i].c_str(), results["hits"][i]["document"]["id"].get<std::string>().c_str());
}
// with bad sort field formats
std::vector<sort_by> bad_geo_sort_fields = {
sort_by("loc(,2.3490714964332353)", "ASC")
};
auto res_op = coll1->search("*",
{}, "",
{}, bad_geo_sort_fields, {0}, 10, 1, FREQUENCY);
ASSERT_FALSE(res_op.ok());
ASSERT_STREQ("Bad syntax for sorting field `loc`", res_op.error().c_str());
bad_geo_sort_fields = {
sort_by("loc(x, y)", "ASC")
};
res_op = coll1->search("*",
{}, "",
{}, bad_geo_sort_fields, {0}, 10, 1, FREQUENCY);
ASSERT_FALSE(res_op.ok());
ASSERT_STREQ("Bad syntax for sorting field `loc`", res_op.error().c_str());
bad_geo_sort_fields = {
sort_by("loc(", "ASC")
};
res_op = coll1->search("*",
{}, "",
{}, bad_geo_sort_fields, {0}, 10, 1, FREQUENCY);
ASSERT_FALSE(res_op.ok());
ASSERT_STREQ("Could not find a field named `loc(` in the schema for sorting.", res_op.error().c_str());
bad_geo_sort_fields = {
sort_by("loc)", "ASC")
};
res_op = coll1->search("*",
{}, "",
{}, bad_geo_sort_fields, {0}, 10, 1, FREQUENCY);
ASSERT_FALSE(res_op.ok());
ASSERT_STREQ("Could not find a field named `loc)` in the schema for sorting.", res_op.error().c_str());
bad_geo_sort_fields = {
sort_by("l()", "ASC")
};
res_op = coll1->search("*",
{}, "",
{}, bad_geo_sort_fields, {0}, 10, 1, FREQUENCY);
ASSERT_FALSE(res_op.ok());
ASSERT_STREQ("Could not find a field named `l` in the schema for sorting.", res_op.error().c_str());
// should not allow creation of collection with geo field as default_sorting_field
nlohmann::json schema = R"({
"name": "coll_geo",
"fields": [
{"name": "title", "type": "string"},
{"name": "location", "type": "geopoint" }
],
"default_sorting_field": "location"
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_FALSE(op.ok());
ASSERT_EQ("Default sorting field cannot be of type geopoint.", op.error());
schema = R"({
"name": "coll_geo",
"fields": [
{"name": "title", "type": "string"},
{"name": "location", "type": "geopoint[]" }
],
"default_sorting_field": "location"
})"_json;
op = collectionManager.create_collection(schema);
ASSERT_FALSE(op.ok());
ASSERT_EQ("Default sorting field cannot be of type geopoint.", op.error());
collectionManager.drop_collection("coll1");
collectionManager.drop_collection("coll_geo");
}
TEST_F(CollectionSortingTest, GeoPointSortingWithExcludeRadius) {
Collection* coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("loc", field_types::GEOPOINT, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"Tibetan Colony", "32.24678, 77.19239"},
{"Civil Hospital", "32.23959, 77.18763"},
{"Johnson Lodge", "32.24751, 77.18814"},
{"Lion King Rock", "32.24493, 77.17038"},
{"Jai Durga Handloom", "32.25749, 77.17583"},
{"Panduropa", "32.26059, 77.21798"},
};
for (size_t i = 0; i < records.size(); i++) {
nlohmann::json doc;
std::vector<std::string> lat_lng;
StringUtils::split(records[i][1], lat_lng, ", ");
double lat = std::stod(lat_lng[0]);
double lng = std::stod(lat_lng[1]);
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["loc"] = {lat, lng};
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
std::vector<sort_by> geo_sort_fields = {
sort_by("loc(32.24348, 77.1893, exclude_radius: 1km)", "ASC"),
sort_by("points", "DESC"),
};
auto results = coll1->search("*",
{}, "loc: (32.24348, 77.1893, 20 km)",
{}, geo_sort_fields, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(6, results["found"].get<size_t>());
std::vector<std::string> expected_ids = {
"2", "1", "0", "3", "4", "5"
};
for (size_t i = 0; i < expected_ids.size(); i++) {
ASSERT_STREQ(expected_ids[i].c_str(), results["hits"][i]["document"]["id"].get<std::string>().c_str());
}
// without exclusion filter
geo_sort_fields = {
sort_by("loc(32.24348, 77.1893)", "ASC"),
sort_by("points", "DESC"),
};
results = coll1->search("*",
{}, "loc: (32.24348, 77.1893, 20 km)",
{}, geo_sort_fields, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(6, results["found"].get<size_t>());
expected_ids = {
"1", "2", "0", "3", "4", "5"
};
for (size_t i = 0; i < expected_ids.size(); i++) {
ASSERT_STREQ(expected_ids[i].c_str(), results["hits"][i]["document"]["id"].get<std::string>().c_str());
}
geo_sort_fields = { sort_by("loc(32.24348, 77.1893, precision: 2mi)", "ASC") };
auto res_op = coll1->search("*", {}, "loc: (32.24348, 77.1893, 20 km)",
{}, geo_sort_fields, {0}, 10, 1, FREQUENCY);
ASSERT_TRUE(res_op.ok());
// bad vertex -- Edge 0 is degenerate (duplicate vertex)
geo_sort_fields = { sort_by("loc(28.7040592, 77.10249019999999)", "ASC") };
res_op = coll1->search("*", {}, "loc: (28.7040592, 77.10249019999999, 28.7040592, "
"77.10249019999999, 28.7040592, 77.10249019999999, 28.7040592, 77.10249019999999)",
{}, geo_sort_fields, {0}, 10, 1, FREQUENCY);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Polygon is invalid: Edge 0 is degenerate (duplicate vertex)", res_op.error());
// badly formatted exclusion filter
geo_sort_fields = { sort_by("loc(32.24348, 77.1893, exclude_radius 1 km)", "ASC") };
res_op = coll1->search("*", {}, "loc: (32.24348, 77.1893, 20 km)",
{}, geo_sort_fields, {0}, 10, 1, FREQUENCY);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Bad syntax for sorting field `loc`", res_op.error());
geo_sort_fields = { sort_by("loc(32.24348, 77.1893, exclude_radius: 1 meter)", "ASC") };
res_op = coll1->search("*", {}, "loc: (32.24348, 77.1893, 20 km)",
{}, geo_sort_fields, {0}, 10, 1, FREQUENCY);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Sort field's parameter unit must be either `km` or `mi`.", res_op.error());
geo_sort_fields = { sort_by("loc(32.24348, 77.1893, exclude_radius: -10 km)", "ASC") };
res_op = coll1->search("*", {}, "loc: (32.24348, 77.1893, 20 km)",
{}, geo_sort_fields, {0}, 10, 1, FREQUENCY);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Sort field's parameter must be a positive number.", res_op.error());
geo_sort_fields = { sort_by("loc(32.24348, 77.1893, exclude_radius: 10 km 20 mi)", "ASC") };
res_op = coll1->search("*", {}, "loc: (32.24348, 77.1893, 20 km)",
{}, geo_sort_fields, {0}, 10, 1, FREQUENCY);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Bad syntax for sorting field `loc`", res_op.error());
geo_sort_fields = { sort_by("loc(32.24348, 77.1893, exclude_radius: 1k)", "ASC") };
res_op = coll1->search("*", {}, "loc: (32.24348, 77.1893, 20 km)",
{}, geo_sort_fields, {0}, 10, 1, FREQUENCY);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Sort field's parameter unit must be either `km` or `mi`.", res_op.error());
geo_sort_fields = { sort_by("loc(32.24348, 77.1893, exclude_radius: 5)", "ASC") };
res_op = coll1->search("*", {}, "loc: (32.24348, 77.1893, 20 km)",
{}, geo_sort_fields, {0}, 10, 1, FREQUENCY);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Bad syntax for sorting field `loc`", res_op.error());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSortingTest, GeoPointSortingWithPrecision) {
Collection* coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("loc", field_types::GEOPOINT, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"Tibetan Colony", "32.24678, 77.19239"},
{"Civil Hospital", "32.23959, 77.18763"},
{"Johnson Lodge", "32.24751, 77.18814"},
{"Lion King Rock", "32.24493, 77.17038"},
{"Jai Durga Handloom", "32.25749, 77.17583"},
{"Panduropa", "32.26059, 77.21798"},
{"Police Station", "32.23743, 77.18639"},
{"Panduropa Post", "32.26263, 77.2196"},
};
for (size_t i = 0; i < records.size(); i++) {
nlohmann::json doc;
std::vector<std::string> lat_lng;
StringUtils::split(records[i][1], lat_lng, ", ");
double lat = std::stod(lat_lng[0]);
double lng = std::stod(lat_lng[1]);
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["loc"] = {lat, lng};
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
std::vector<sort_by> geo_sort_fields = {
sort_by("loc(32.24348, 77.1893, precision: 0.9 km)", "ASC"),
sort_by("points", "DESC"),
};
auto results = coll1->search("*",
{}, "loc: (32.24348, 77.1893, 20 km)",
{}, geo_sort_fields, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(8, results["found"].get<size_t>());
std::vector<std::string> expected_ids = {
"6", "2", "1", "0", "3", "4", "7", "5"
};
std::vector<float> geo_distance_meters = {726,461,460,467,1786,2007,3556,3299};
for (size_t i = 0; i < expected_ids.size(); i++) {
auto const& hit = results["hits"][i];
ASSERT_EQ(expected_ids[i], hit["document"]["id"]);
ASSERT_FLOAT_EQ(geo_distance_meters[i], hit["geo_distance_meters"]["loc"]);
}
// badly formatted precision
geo_sort_fields = { sort_by("loc(32.24348, 77.1893, precision 1 km)", "ASC") };
auto res_op = coll1->search("*", {}, "loc: (32.24348, 77.1893, 20 km)",
{}, geo_sort_fields, {0}, 10, 1, FREQUENCY);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Bad syntax for sorting field `loc`", res_op.error());
geo_sort_fields = { sort_by("loc(32.24348, 77.1893, precision: 1 meter)", "ASC") };
res_op = coll1->search("*", {}, "loc: (32.24348, 77.1893, 20 km)",
{}, geo_sort_fields, {0}, 10, 1, FREQUENCY);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Sort field's parameter unit must be either `km` or `mi`.", res_op.error());
geo_sort_fields = { sort_by("loc(32.24348, 77.1893, precision: -10 km)", "ASC") };
res_op = coll1->search("*", {}, "loc: (32.24348, 77.1893, 20 km)",
{}, geo_sort_fields, {0}, 10, 1, FREQUENCY);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Sort field's parameter must be a positive number.", res_op.error());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSortingTest, GeoPointAsOptionalField) {
Collection* coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("loc", field_types::GEOPOINT, false, true),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"Tibetan Colony", "32.24678, 77.19239"},
{"Civil Hospital", "32.23959, 77.18763"},
{"Johnson Lodge", "32.24751, 77.18814"},
{"Lion King Rock", "32.24493, 77.17038"},
{"Jai Durga Handloom", "32.25749, 77.17583"},
{"Panduropa", "32.26059, 77.21798"},
{"Police Station", "32.23743, 77.18639"},
{"Panduropa Post", "32.26263, 77.2196"},
};
for (size_t i = 0; i < records.size(); i++) {
nlohmann::json doc;
std::vector<std::string> lat_lng;
StringUtils::split(records[i][1], lat_lng, ", ");
double lat = std::stod(lat_lng[0]);
double lng = std::stod(lat_lng[1]);
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
if(i != 2) {
doc["loc"] = {lat, lng};
}
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
std::vector<sort_by> geo_sort_fields = {
sort_by("loc(32.24348, 77.1893, precision: 0.9 km)", "ASC"),
sort_by("points", "DESC"),
};
auto results = coll1->search("*",
{}, "loc: (32.24348, 77.1893, 20 km)",
{}, geo_sort_fields, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(7, results["found"].get<size_t>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSortingTest, GeoPointArraySorting) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("loc", field_types::GEOPOINT_ARRAY, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::vector<std::string>>> records = {
{ {"Alpha Inc", "Ennore", "13.22112, 80.30511"},
{"Alpha Inc", "Velachery", "12.98973, 80.23095"}
},
{
{"Veera Inc", "Thiruvallur", "13.12752, 79.90136"},
},
{
{"B1 Inc", "Bengaluru", "12.98246, 77.5847"},
{"B1 Inc", "Hosur", "12.74147, 77.82915"},
{"B1 Inc", "Vellore", "12.91866, 79.13075"},
},
{
{"M Inc", "Nashik", "20.11282, 73.79458"},
{"M Inc", "Pune", "18.56309, 73.855"},
}
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0][0];
doc["points"] = i;
std::vector<std::vector<double>> lat_lngs;
for(size_t k = 0; k < records[i].size(); k++) {
std::vector<std::string> lat_lng_str;
StringUtils::split(records[i][k][2], lat_lng_str, ", ");
std::vector<double> lat_lng = {
std::stod(lat_lng_str[0]),
std::stod(lat_lng_str[1])
};
lat_lngs.push_back(lat_lng);
}
doc["loc"] = lat_lngs;
auto add_op = coll1->add(doc.dump());
ASSERT_TRUE(add_op.ok());
}
std::vector<sort_by> geo_sort_fields = {
sort_by("loc(13.12631, 80.20252)", "ASC"),
sort_by("points", "DESC"),
};
// pick a location close to Chennai
auto results = coll1->search("*",
{}, "loc: (13.12631, 80.20252, 100 km)",
{}, geo_sort_fields, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("1", results["hits"][1]["document"]["id"].get<std::string>().c_str());
// pick a large radius covering all points
geo_sort_fields = {
sort_by("loc(13.03388, 79.25868)", "ASC"),
sort_by("points", "DESC"),
};
results = coll1->search("*",
{}, "loc: (13.03388, 79.25868, 1000 km)",
{}, geo_sort_fields, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(4, results["found"].get<size_t>());
ASSERT_STREQ("2", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("1", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("0", results["hits"][2]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("3", results["hits"][3]["document"]["id"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSortingTest, SortByTitle) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false, false, true, "", true),
field("artist", field_types::STRING, true),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
auto create_op = collectionManager.create_collection("coll1", 2, fields, "title");
ASSERT_TRUE(create_op.ok());
coll1 = create_op.get();
}
std::vector<std::vector<std::string>> records = {
{"aaa", "ABCD"},
{"a", "ABCD"},
{"abcd", "ABCD"},
{"abdde", "ABCD"},
{"b", "ABCD"},
{"bab", "ABCD"},
{"baa", "ABCD"},
{"bcma", "ABCD"},
{"cdma", "ABCD"},
{"cc", "ABCD"},
{"c", "ABCD"},
{"cxya", "ABCD"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["artist"] = records[i][1];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
std::vector<sort_by> sort_fields = {
sort_by("title", "ASC")
};
std::vector<std::string> expected_order = {
"a",
"aaa",
"abcd",
"abdde",
"b",
"baa",
"bab",
"bcma",
"c",
"cc",
"cdma",
"cxya"
};
auto results = coll1->search("*", {}, "", {}, sort_fields, {0}, 20, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(12, results["found"].get<size_t>());
for(size_t i = 0; i < results["hits"].size(); i++) {
ASSERT_EQ(expected_order[i], results["hits"][i]["document"]["title"].get<std::string>());
}
// descending order
sort_fields = {
sort_by("title", "DESC")
};
results = coll1->search("*", {}, "", {}, sort_fields, {0}, 20, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(12, results["found"].get<size_t>());
for(size_t i = 0; i < results["hits"].size(); i++) {
ASSERT_EQ(expected_order[expected_order.size() - i - 1], results["hits"][i]["document"]["title"].get<std::string>());
}
// when querying for a string field with sort disabled
sort_fields = {
sort_by("artist", "DESC")
};
auto res_op = coll1->search("*", {}, "", {}, sort_fields, {0}, 20, 1, FREQUENCY, {true}, 10);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Could not find a field named `artist` in the schema for sorting.", res_op.error());
// don't allow non-sort string field to be used as default sorting field
fields = {field("title", field_types::STRING, false, false, true, "", false),
field("artist", field_types::STRING, true),
field("points", field_types::INT32, false),};
auto create_op = collectionManager.create_collection("coll2", 2, fields, "title");
ASSERT_FALSE(create_op.ok());
ASSERT_EQ("Default sorting field `title` is not a sortable type.", create_op.error());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSortingTest, SortByIntegerAndString) {
Collection* coll1;
std::vector<field> fields = {field("title", field_types::STRING, false, false, true, "", true),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
auto create_op = collectionManager.create_collection("coll1", 2, fields, "title");
ASSERT_TRUE(create_op.ok());
coll1 = create_op.get();
}
std::vector<std::vector<std::string>> records = {
{"abdde", "2"},
{"b", "2"},
{"b", "1"},
{"a", "1"},
{"c", "1"},
{"dd", "4"},
{"bab", "3"},
{"baa", "3"},
{"bcma", "3"},
{"cdma", "3"},
{"c", "5"},
{"x", "6"},
};
for (size_t i = 0; i < records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["points"] = std::stoi(records[i][1]);
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
std::vector<sort_by> sort_fields = {
sort_by("points", "ASC"),
sort_by("title", "ASC"),
};
auto results = coll1->search("*", {}, "", {}, sort_fields, {0}, 20, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ("a", results["hits"][0]["document"]["title"].get<std::string>());
ASSERT_EQ("b", results["hits"][1]["document"]["title"].get<std::string>());
ASSERT_EQ("c", results["hits"][2]["document"]["title"].get<std::string>());
ASSERT_EQ("abdde", results["hits"][3]["document"]["title"].get<std::string>());
ASSERT_EQ("b", results["hits"][4]["document"]["title"].get<std::string>());
ASSERT_EQ("baa", results["hits"][5]["document"]["title"].get<std::string>());
sort_fields = {
sort_by("_text_match", "DESC"),
sort_by("points", "ASC"),
sort_by("title", "ASC"),
};
results = coll1->search("b", {"title"}, "", {}, sort_fields, {0}, 20, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ("b", results["hits"][0]["document"]["title"].get<std::string>());
ASSERT_EQ("b", results["hits"][1]["document"]["title"].get<std::string>());
ASSERT_EQ("baa", results["hits"][2]["document"]["title"].get<std::string>());
ASSERT_EQ("bab", results["hits"][3]["document"]["title"].get<std::string>());
ASSERT_EQ("bcma", results["hits"][4]["document"]["title"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSortingTest, SortByStringEmptyValuesConfigFirstField) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false, false, true, "", true),
field("points1", field_types::INT32, false),
field("points2", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points1").get();
}
std::vector<std::string> tokens = {
"alpha", "beta", "", "gamma"
};
for(size_t i = 0; i < tokens.size(); i++) {
std::string title = tokens[i];
nlohmann::json doc;
doc["title"] = title;
doc["points1"] = 100;
doc["points2"] = 100;
coll1->add(doc.dump());
}
// ascending
std::vector<sort_by> sort_fields = {
sort_by("title(missing_values: first)", "ASC"),
};
auto results = coll1->search("*", {"title"}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
sort_fields = {
sort_by("title(missing_values: last)", "ASC"),
};
results = coll1->search("*", {"title"}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][3]["document"]["id"].get<std::string>());
// descending
sort_fields = {
sort_by("title(missing_values: first)", "DESC"),
};
results = coll1->search("*", {"title"}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
sort_fields = {
sort_by("title(missing_values: last)", "DESC"),
};
results = coll1->search("*", {"title"}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][3]["document"]["id"].get<std::string>());
// without explicit arg, missing values will be deemed as having largest value (same as SQL)
sort_fields = {
sort_by("title", "asc"),
};
results = coll1->search("*", {"title"}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][3]["document"]["id"].get<std::string>());
sort_fields = {
sort_by("title", "desc"),
};
results = coll1->search("*", {"title"}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
// natural order
sort_fields = {
sort_by("title(missing_values: normal)", "asc"),
};
results = coll1->search("*", {"title"}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][3]["document"]["id"].get<std::string>());
sort_fields = {
sort_by("title(missing_values: normal)", "desc"),
};
results = coll1->search("*", {"title"}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
// bad syntax
sort_fields = {
sort_by("title(foo: bar)", "desc"),
};
auto res_op = coll1->search("*", {"title"}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true});
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Bad syntax for sorting field `title`", res_op.error());
sort_fields = {
sort_by("title(missing_values: bar)", "desc"),
};
res_op = coll1->search("*", {"title"}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true});
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Bad syntax for sorting field `title`", res_op.error());
}
TEST_F(CollectionSortingTest, SortByStringEmptyValuesConfigSecondField) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false, false, true, "", true),
field("points1", field_types::INT32, false),
field("points2", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points1").get();
}
std::vector<std::string> tokens = {
"alpha", "beta", "", "gamma"
};
for(size_t i = 0; i < tokens.size(); i++) {
std::string title = tokens[i];
nlohmann::json doc;
doc["title"] = title;
doc["points1"] = 100;
doc["points2"] = 100;
coll1->add(doc.dump());
}
// ascending
std::vector<sort_by> sort_fields = {
sort_by("points1", "ASC"),
sort_by("title(missing_values: first)", "ASC"),
};
auto results = coll1->search("*", {"title"}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
sort_fields = {
sort_by("points1", "ASC"),
sort_by("title(missing_values: last)", "ASC"),
};
results = coll1->search("*", {"title"}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][3]["document"]["id"].get<std::string>());
// descending
sort_fields = {
sort_by("points1", "ASC"),
sort_by("title(missing_values: first)", "DESC"),
};
results = coll1->search("*", {"title"}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
sort_fields = {
sort_by("points1", "ASC"),
sort_by("title(missing_values: last)", "DESC"),
};
results = coll1->search("*", {"title"}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][3]["document"]["id"].get<std::string>());
// without explicit arg, missing values will be deemed as having largest value (same as SQL)
sort_fields = {
sort_by("points1", "ASC"),
sort_by("title", "ASC"),
};
results = coll1->search("*", {"title"}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][3]["document"]["id"].get<std::string>());
sort_fields = {
sort_by("points1", "ASC"),
sort_by("title", "DESC"),
};
results = coll1->search("*", {"title"}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
}
TEST_F(CollectionSortingTest, SortByStringEmptyValuesConfigThirdField) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false, false, true, "", true),
field("points1", field_types::INT32, false),
field("points2", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points1").get();
}
std::vector<std::string> tokens = {
"alpha", "beta", "", "gamma"
};
for(size_t i = 0; i < tokens.size(); i++) {
std::string title = tokens[i];
nlohmann::json doc;
doc["title"] = title;
doc["points1"] = 100;
doc["points2"] = 100;
coll1->add(doc.dump());
}
// ascending
std::vector<sort_by> sort_fields = {
sort_by("points1", "ASC"),
sort_by("points2", "ASC"),
sort_by("title(missing_values: first)", "ASC"),
};
auto results = coll1->search("*", {"title"}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
sort_fields = {
sort_by("points1", "ASC"),
sort_by("points2", "ASC"),
sort_by("title(missing_values: last)", "ASC"),
};
results = coll1->search("*", {"title"}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][3]["document"]["id"].get<std::string>());
// descending
sort_fields = {
sort_by("points1", "ASC"),
sort_by("points2", "ASC"),
sort_by("title(missing_values: first)", "DESC"),
};
results = coll1->search("*", {"title"}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
sort_fields = {
sort_by("points1", "ASC"),
sort_by("points2", "ASC"),
sort_by("title(missing_values: last)", "DESC"),
};
results = coll1->search("*", {"title"}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][3]["document"]["id"].get<std::string>());
// without explicit arg, missing values will be deemed as having largest value (same as SQL)
sort_fields = {
sort_by("points1", "ASC"),
sort_by("points2", "ASC"),
sort_by("title", "ASC"),
};
results = coll1->search("*", {"title"}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][3]["document"]["id"].get<std::string>());
sort_fields = {
sort_by("points1", "ASC"),
sort_by("points2", "ASC"),
sort_by("title", "DESC"),
};
results = coll1->search("*", {"title"}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
}
TEST_F(CollectionSortingTest, SortByStringAccentedChars) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false, false, true, "", true),
field("artist", field_types::STRING, true),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
auto create_op = collectionManager.create_collection("coll1", 2, fields, "title");
ASSERT_TRUE(create_op.ok());
coll1 = create_op.get();
}
std::vector<std::vector<std::string>> records = {
{"The unbearable lightness of being", "ABCD"},
{"A brief history of time", "ABCD"},
{"Über den Wolken", "ABCD"},
{"Ändere deine Coding Gewohnheiten", "ABCD"},
{"Zodiac", "ABCD"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["artist"] = records[i][1];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
std::vector<sort_by> sort_fields = {
sort_by("title", "ASC")
};
std::vector<std::string> expected_order = {
"A brief history of time",
"Ändere deine Coding Gewohnheiten",
"The unbearable lightness of being",
"Über den Wolken",
"Zodiac",
};
auto results = coll1->search("*", {}, "", {}, sort_fields, {0}, 20, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(5, results["found"].get<size_t>());
for(size_t i = 0; i < results["hits"].size(); i++) {
ASSERT_EQ(expected_order[i], results["hits"][i]["document"]["title"].get<std::string>());
}
// descending order
sort_fields = {
sort_by("title", "DESC")
};
results = coll1->search("*", {}, "", {}, sort_fields, {0}, 20, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(5, results["found"].get<size_t>());
for(size_t i = 0; i < results["hits"].size(); i++) {
ASSERT_EQ(expected_order[expected_order.size() - i - 1], results["hits"][i]["document"]["title"].get<std::string>());
}
}
TEST_F(CollectionSortingTest, TextMatchBucketRanking) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("description", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "Mark Antony";
doc1["description"] = "Counsellor";
doc1["points"] = 100;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["title"] = "Marks Spencer";
doc2["description"] = "Sales Expert";
doc2["points"] = 200;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
sort_fields = {
sort_by("_text_match(buckets: 10)", "DESC"),
sort_by("points", "DESC"),
};
auto results = coll1->search("mark", {"title"},
"", {}, sort_fields, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {3}, 1000, true).get();
// when there are more buckets than results, no bucketing will happen
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][1]["document"]["id"].get<std::string>());
// bucketing by 1 makes the text match score the same
sort_fields = {
sort_by("_text_match(buckets: 1)", "DESC"),
sort_by("points", "DESC"),
};
results = coll1->search("mark", {"title"},
"", {}, sort_fields, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {3}, 1000, true).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
size_t score1 = std::stoul(results["hits"][0]["text_match_info"]["score"].get<std::string>());
size_t score2 = std::stoul(results["hits"][1]["text_match_info"]["score"].get<std::string>());
ASSERT_TRUE(score1 < score2);
// bucketing by 0 produces original text match
sort_fields = {
sort_by("_text_match(buckets: 0)", "DESC"),
sort_by("points", "DESC"),
};
results = coll1->search("mark", {"title"},
"", {}, sort_fields, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {3}, 1000, true).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][1]["document"]["id"].get<std::string>());
// don't allow bad parameter name
sort_fields[0] = sort_by("_text_match(foobar: 0)", "DESC");
auto res_op = coll1->search("mark", {"title"},
"", {}, sort_fields, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {3}, 1000, true);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Invalid sorting parameter passed for _text_match.", res_op.error());
// handle bad syntax
sort_fields[0] = sort_by("_text_match(foobar:", "DESC");
res_op = coll1->search("mark", {"title"},
"", {}, sort_fields, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {3}, 1000, true);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Could not find a field named `_text_match(foobar:` in the schema for sorting.", res_op.error());
// handle bad value
sort_fields[0] = sort_by("_text_match(buckets: x)", "DESC");
res_op = coll1->search("mark", {"title"},
"", {}, sort_fields, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {3}, 1000, true);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Invalid value passed for _text_match `buckets` configuration.", res_op.error());
// handle negative value
sort_fields[0] = sort_by("_text_match(buckets: -1)", "DESC");
res_op = coll1->search("mark", {"title"},
"", {}, sort_fields, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {3}, 1000, true);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Invalid value passed for _text_match `buckets` configuration.", res_op.error());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSortingTest, TextMatchMoreDocsThanBuckets) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
std::vector<std::vector<std::string>> records = {
{"Mark Antony"},
{"Marks Spencer"},
{"Marking Rhine"},
{"Markolm Spane"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
sort_fields = {
sort_by("_text_match(buckets: 2)", "DESC"),
sort_by("points", "DESC"),
};
auto results = coll1->search("mark", {"title"},
"", {}, sort_fields, {0}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {1}, 1000, true).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("3", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("2", results["hits"][2]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][3]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSortingTest, RepeatingTokenRanking) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "Mong Mong";
doc1["points"] = 100;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["title"] = "Mong Spencer";
doc2["points"] = 200;
nlohmann::json doc3;
doc3["id"] = "2";
doc3["title"] = "Mong Mong Spencer";
doc3["points"] = 300;
nlohmann::json doc4;
doc4["id"] = "3";
doc4["title"] = "Spencer Mong Mong";
doc4["points"] = 400;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
ASSERT_TRUE(coll1->add(doc4.dump()).ok());
sort_fields = {
sort_by("_text_match", "DESC"),
sort_by("points", "DESC"),
};
auto results = coll1->search("mong mong", {"title"},
"", {}, sort_fields, {2}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {3}, 1000, true).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("3", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("2", results["hits"][2]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][3]["document"]["id"].get<std::string>());
ASSERT_EQ(1157451471575842841, results["hits"][0]["text_match"].get<size_t>());
ASSERT_EQ(1157451471575318553, results["hits"][1]["text_match"].get<size_t>());
ASSERT_EQ(1157451471575318553, results["hits"][2]["text_match"].get<size_t>());
ASSERT_EQ(1157451471575318553, results["hits"][3]["text_match"].get<size_t>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSortingTest, SortingDoesNotHaveTextMatchComponent) {
// text_match_score field should not be present in response
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "Test Title";
doc1["points"] = 100;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
sort_fields = {
sort_by("points", "DESC"),
sort_by("points", "DESC"),
sort_by("points", "DESC"),
};
auto results = coll1->search("test", {"title"}, "", {}, sort_fields, {2}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(0, results["hits"][0].count("text_match"));
results = coll1->search("*", {}, "", {}, sort_fields, {2}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(0, results["hits"][0].count("text_match"));
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSortingTest, IntegerFloatAndBoolShouldDefaultSortTrue) {
std::string coll_schema = R"(
{
"name": "coll1",
"fields": [
{"name": "title", "type": "string", "infix": true },
{"name": "points", "type": "int32" },
{"name": "timestamp", "type": "int64" },
{"name": "max", "type": "float" },
{"name": "is_valid", "type": "bool" }
]
}
)";
nlohmann::json schema = nlohmann::json::parse(coll_schema);
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "Right on";
doc1["points"] = 100;
doc1["timestamp"] = 7273272372732;
doc1["max"] = 97.6;
doc1["is_valid"] = true;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
auto res_op = coll1->search("*", {"title"}, "", {}, {sort_by("points", "DESC")}, {2}, 10, 1, FREQUENCY, {true}, 10);
ASSERT_TRUE(res_op.ok());
res_op = coll1->search("*", {"title"}, "", {}, {sort_by("timestamp", "DESC")}, {2}, 10, 1, FREQUENCY, {true}, 10);
ASSERT_TRUE(res_op.ok());
res_op = coll1->search("*", {"title"}, "", {}, {sort_by("max", "DESC")}, {2}, 10, 1, FREQUENCY, {true}, 10);
ASSERT_TRUE(res_op.ok());
res_op = coll1->search("*", {"title"}, "", {}, {sort_by("is_valid", "DESC")}, {2}, 10, 1, FREQUENCY, {true}, 10);
ASSERT_TRUE(res_op.ok());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSortingTest, DisallowSortingOnNonIndexedIntegerField) {
std::string coll_schema = R"(
{
"name": "coll1",
"fields": [
{"name": "title", "type": "string" },
{"name": "points", "type": "int32", "index": false, "optional": true }
]
}
)";
nlohmann::json schema = nlohmann::json::parse(coll_schema);
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "Right on";
doc1["points"] = 100;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
auto res_op = coll1->search("*", {"title"}, "", {}, {sort_by("points", "DESC")}, {2}, 10, 1, FREQUENCY, {true}, 10);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Could not find a field named `points` in the schema for sorting.", res_op.error());
res_op = coll1->search("*", {"title"}, "", {}, {sort_by("points(missing_values: first)", "DESC")}, {2}, 10, 1, FREQUENCY, {true}, 10);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Could not find a field named `points` in the schema for sorting.", res_op.error());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSortingTest, WildcardSearchSequenceIdSort) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "category", "type": "string"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["category"] = "Shoes";
for(size_t i = 0; i < 30; i++) {
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
std::vector<sort_by> sort_fields = {
sort_by("_seq_id", "DESC"),
};
auto res = coll1->search("*", {"category"}, "", {}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(10, res["hits"].size());
ASSERT_EQ(30, res["found"].get<size_t>());
}
TEST_F(CollectionSortingTest, DefaultSortingFieldStringNotIndexed) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "category", "type": "string", "sort": true, "index": false}
],
"default_sorting_field": "category"
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["category"] = "Shoes";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
std::vector<sort_by> sort_fields = {};
auto res_op = coll1->search("*", {}, "", {}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 0);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Default sorting field not found in the schema or it has been marked as a "
"non-indexed field.", res_op.error());
}
TEST_F(CollectionSortingTest, SortingFieldNotIndexed) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "category", "type": "int32", "sort": true, "index": false}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["category"] = 100;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
std::vector<sort_by> sort_fields = {
sort_by("category", "DESC"),
};
auto res_op = coll1->search("*", {}, "", {}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 0);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Could not find a field named `category` in the schema for sorting.", res_op.error());
}
TEST_F(CollectionSortingTest, OptionalFilteringViaSortingWildcard) {
std::string coll_schema = R"(
{
"name": "coll1",
"fields": [
{"name": "title", "type": "string" },
{"name": "brand", "type": "string", "infix": true },
{"name": "points", "type": "int32" }
]
}
)";
nlohmann::json schema = nlohmann::json::parse(coll_schema);
Collection* coll1 = collectionManager.create_collection(schema).get();
for(size_t i = 0; i < 5; i++) {
nlohmann::json doc;
doc["title"] = "Title " + std::to_string(i);
doc["points"] = i;
doc["brand"] = (i == 0 || i == 3) ? "Nike" : "Adidas";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
std::vector<sort_by> sort_fields = {
sort_by({"brand:nike"}, {1}, "DESC"),
sort_by("points", "DESC"),
};
auto results = coll1->search("*", {"title"}, "", {}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(5, results["hits"].size());
std::vector<std::string> expected_ids = {"3", "0", "4", "2", "1"};
for(size_t i = 0; i < expected_ids.size(); i++) {
ASSERT_EQ(expected_ids[i], results["hits"][i]["document"]["id"].get<std::string>());
}
// compound query
sort_fields = {
sort_by({"brand:nike && points:0"}, {1}, "DESC"),
sort_by("points", "DESC"),
};
results = coll1->search("*", {"title"}, "", {}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(5, results["hits"].size());
expected_ids = {"0", "4", "3", "2", "1"};
for(size_t i = 0; i < expected_ids.size(); i++) {
ASSERT_EQ(expected_ids[i], results["hits"][i]["document"]["id"].get<std::string>());
}
sort_fields = {
sort_by({"brand:nike", "points:1"}, {2, 1}, "DESC"),
sort_by("points", "DESC"),
};
results = coll1->search("*", {"title"}, "", {}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(5, results["hits"].size());
expected_ids = {"3", "0", "1", "4", "2"};
for(size_t i = 0; i < expected_ids.size(); i++) {
ASSERT_EQ(expected_ids[i], results["hits"][i]["document"]["id"].get<std::string>());
}
// when no results are found for eval query
sort_fields = {
sort_by({"brand:foobar"}, {1}, "DESC"),
sort_by("points", "DESC"),
};
results = coll1->search("*", {"title"}, "", {}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(5, results["hits"].size());
expected_ids = {"4", "3", "2", "1", "0"};
for(size_t i = 0; i < expected_ids.size(); i++) {
ASSERT_EQ(expected_ids[i], results["hits"][i]["document"]["id"].get<std::string>());
}
nlohmann::json doc = R"(
{
"title": "title5",
"brand": "puma",
"points": 5
}
)"_json;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
sort_fields = {
sort_by({"brand:nike", "brand:adidas"}, {3, 2}, "DESC"),
sort_by("points", "DESC"),
};
results = coll1->search("*", {"title"}, "", {}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(6, results["hits"].size());
expected_ids = {"3", "0", "4", "2", "1", "5"};
for(size_t i = 0; i < expected_ids.size(); i++) {
ASSERT_EQ(expected_ids[i], results["hits"][i]["document"]["id"].get<std::string>());
}
// Score associated with the first match is assigned to the document.
sort_fields = {
sort_by({"brand:nike", "brand:adidas", "points: 1"}, {3, 2, 5}, "DESC"),
sort_by("points", "DESC"),
};
results = coll1->search("*", {"title"}, "", {}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(6, results["hits"].size());
expected_ids = {"3", "0", "4", "2", "1", "5"};
for(size_t i = 0; i < expected_ids.size(); i++) {
ASSERT_EQ(expected_ids[i], results["hits"][i]["document"]["id"].get<std::string>());
}
// bad syntax for eval query
sort_fields = {
sort_by({"brandnike || points:0"}, {1}, "DESC"),
sort_by("points", "DESC"),
};
auto res_op = coll1->search("*", {"title"}, "", {}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 10);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Error parsing eval expression in sort_by clause.", res_op.error());
// when eval condition is empty
std::map<std::string, std::string> req_params = {
{"collection", "coll1"},
{"q", "*"},
{"query_by", "title"},
{"sort_by", "_eval():desc"},
};
nlohmann::json embedded_params;
std::string json_res;
auto now_ts = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
auto search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("The eval expression in sort_by is empty.", search_op.error());
req_params = {
{"collection", "coll1"},
{"q", "a"},
{"query_by", "brand"},
{"sort_by", "_eval(brand:puma):desc, _text_match:desc"},
{"infix", "always"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
results = nlohmann::json::parse(json_res);
ASSERT_EQ(4, results["hits"].size()); // 3 Adidas 1 Puma documents
// Because of `_eval`, Puma document will be on top even when having a lower text match score than Adidas documents.
expected_ids = {"5", "4", "2", "1"};
for(size_t i = 0; i < expected_ids.size(); i++) {
ASSERT_EQ(expected_ids[i], results["hits"][i]["document"]["id"].get<std::string>());
}
// more bad syntax!
sort_fields = {
sort_by(")", "DESC"),
sort_by("points", "DESC"),
};
res_op = coll1->search("*", {"title"}, "", {}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 10);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Could not find a field named `)` in the schema for sorting.", res_op.error());
// don't allow multiple sorting eval expressions
sort_fields = {
sort_by({"brand: nike || points:0"}, {1}, "DESC"),
sort_by({"brand: nike || points:0"}, {1}, "DESC"),
};
res_op = coll1->search("*", {"title"}, "", {}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 10);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Only one sorting eval expression is allowed.", res_op.error());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSortingTest, OptionalFilteringViaSortingSearch) {
std::string coll_schema = R"(
{
"name": "coll1",
"fields": [
{"name": "title", "type": "string" },
{"name": "brand", "type": "string" },
{"name": "points", "type": "int32" }
]
}
)";
nlohmann::json schema = nlohmann::json::parse(coll_schema);
Collection* coll1 = collectionManager.create_collection(schema).get();
for(size_t i = 0; i < 5; i++) {
nlohmann::json doc;
doc["title"] = "Title " + std::to_string(i);
doc["points"] = i;
doc["brand"] = (i == 0 || i == 3) ? "Nike" : "Adidas";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
std::vector<sort_by> sort_fields = {
sort_by({"brand:nike"}, {1}, "DESC"),
sort_by("points", "DESC"),
};
auto results = coll1->search("title", {"title"}, "", {}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(5, results["hits"].size());
std::vector<std::string> expected_ids = {"3", "0", "4", "2", "1"};
for(size_t i = 0; i < expected_ids.size(); i++) {
ASSERT_EQ(expected_ids[i], results["hits"][i]["document"]["id"].get<std::string>());
}
// compound query
sort_fields = {
sort_by({"brand:nike && points:0"}, {1}, "DESC"),
sort_by("points", "DESC"),
};
results = coll1->search("title", {"title"}, "", {}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(5, results["hits"].size());
std::map<std::string, std::string> req_params = {
{"collection", "coll1"},
{"q", "title"},
{"query_by", "title"},
{"sort_by", "_eval(brand:[nike, adidas] && points:0):desc, points:DESC"}
};
nlohmann::json embedded_params;
std::string json_res;
auto now_ts = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
auto search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
results = nlohmann::json::parse(json_res);
ASSERT_EQ(5, results["hits"].size());
expected_ids = {"0", "4", "3", "2", "1"};
for(size_t i = 0; i < expected_ids.size(); i++) {
ASSERT_EQ(expected_ids[i], results["hits"][i]["document"]["id"].get<std::string>());
}
// when no results are found for eval query
sort_fields = {
sort_by({"brand:foobar"}, {1}, "DESC"),
sort_by("points", "DESC"),
};
results = coll1->search("title", {"title"}, "", {}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(5, results["hits"].size());
expected_ids = {"4", "3", "2", "1", "0"};
for(size_t i = 0; i < expected_ids.size(); i++) {
ASSERT_EQ(expected_ids[i], results["hits"][i]["document"]["id"].get<std::string>());
}
// bad syntax for eval query
sort_fields = {
sort_by({"brandnike || points:0"}, {1}, "DESC"),
sort_by("points", "DESC"),
};
auto res_op = coll1->search("title", {"title"}, "", {}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 10);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Error parsing eval expression in sort_by clause.", res_op.error());
// more bad syntax!
sort_fields = {
sort_by(")", "DESC"),
sort_by("points", "DESC"),
};
res_op = coll1->search("title", {"title"}, "", {}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 10);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Could not find a field named `)` in the schema for sorting.", res_op.error());
collectionManager.drop_collection("coll1");
schema =
R"({
"name": "Products",
"fields": [
{"name": "product_id", "type": "string"},
{"name": "product_name", "type": "string", "infix": true},
{"name": "product_description", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["product_description"], "model_config": {"model_name": "ts/e5-small"}}},
{"name": "rating", "type": "int32"},
{"name": "stocks", "type": "object"},
{"name": "stocks.*", "type": "auto", "optional": true}
],
"enable_nested_fields": true
})"_json;
std::vector<nlohmann::json> documents = {
R"({
"product_id": "product_a",
"product_name": "shampoo",
"product_description": "Our new moisturizing shampoo is perfect for those with dry or damaged hair.",
"rating": "2",
"stocks": {
"26": {
"rec": true
}
}
})"_json,
R"({
"product_id": "product_b",
"product_name": "soap",
"product_description": "Introducing our all-natural, organic soap bar made with essential oils and botanical ingredients.",
"rating": "4",
"stocks": {
"26": {
"rec": false
}
}
})"_json,
R"({
"product_id": "product_c",
"product_name": "comb",
"product_description": "Experience the natural elegance and gentle care of our handcrafted wooden combs – because your hair deserves the best.",
"rating": "3",
"stocks": {}
})"_json,
R"({
"product_id": "product_d",
"product_name": "hair oil",
"product_description": "Revitalize your hair with our nourishing hair oil – nature's secret to lustrous, healthy locks.",
"rating": "1",
"stocks": {
"26": {
"rec": false
}
}
})"_json
};
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
req_params = {
{"collection", "Products"},
{"q", "*"},
{"sort_by", "_eval([(stocks.26.rec:true):3, (stocks.26.rec:false):2]):desc"},
{"include_fields", "product_id, product_name, stocks"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
auto res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(4, res_obj["found"].get<size_t>());
ASSERT_EQ(4, res_obj["hits"].size());
ASSERT_EQ("product_a", res_obj["hits"][0]["document"]["product_id"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("stocks"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["stocks"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"]["stocks"].count("26"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["stocks"]["26"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"]["stocks"]["26"].count("rec"));
ASSERT_TRUE(res_obj["hits"][0]["document"]["stocks"]["26"]["rec"]);
ASSERT_EQ("product_d", res_obj["hits"][1]["document"]["product_id"]);
ASSERT_EQ(1, res_obj["hits"][1]["document"].count("stocks"));
ASSERT_EQ(1, res_obj["hits"][1]["document"]["stocks"].size());
ASSERT_EQ(1, res_obj["hits"][1]["document"]["stocks"].count("26"));
ASSERT_EQ(1, res_obj["hits"][1]["document"]["stocks"]["26"].size());
ASSERT_EQ(1, res_obj["hits"][1]["document"]["stocks"]["26"].count("rec"));
ASSERT_FALSE(res_obj["hits"][1]["document"]["stocks"]["26"]["rec"]);
ASSERT_EQ("product_b", res_obj["hits"][2]["document"]["product_id"]);
ASSERT_EQ(1, res_obj["hits"][2]["document"].count("stocks"));
ASSERT_EQ(1, res_obj["hits"][2]["document"]["stocks"].size());
ASSERT_EQ(1, res_obj["hits"][2]["document"]["stocks"].count("26"));
ASSERT_EQ(1, res_obj["hits"][2]["document"]["stocks"]["26"].size());
ASSERT_EQ(1, res_obj["hits"][2]["document"]["stocks"]["26"].count("rec"));
ASSERT_FALSE(res_obj["hits"][2]["document"]["stocks"]["26"]["rec"]);
ASSERT_EQ("product_c", res_obj["hits"][3]["document"]["product_id"]);
ASSERT_EQ(1, res_obj["hits"][3]["document"].count("stocks"));
ASSERT_EQ(0, res_obj["hits"][3]["document"]["stocks"].size());
}
TEST_F(CollectionSortingTest, DisallowIdAsDefaultSortingField) {
std::string coll_schema = R"(
{
"name": "coll1",
"default_sorting_field": "id",
"fields": [
{"name": "id", "type": "string" }
]
}
)";
nlohmann::json schema = nlohmann::json::parse(coll_schema);
auto coll_op = collectionManager.create_collection(schema);
ASSERT_FALSE(coll_op.ok());
ASSERT_EQ("Invalid `default_sorting_field` value: cannot be `id`.", coll_op.error());
}
TEST_F(CollectionSortingTest, OptionalFilteringViaSortingSecondThirdParams) {
std::string coll_schema = R"(
{
"name": "coll1",
"fields": [
{"name": "title", "type": "string" },
{"name": "brand", "type": "string" },
{"name": "points", "type": "int32" },
{"name": "val", "type": "int32" }
]
}
)";
nlohmann::json schema = nlohmann::json::parse(coll_schema);
Collection* coll1 = collectionManager.create_collection(schema).get();
for(size_t i = 0; i < 5; i++) {
nlohmann::json doc;
doc["title"] = "Title " + std::to_string(i);
doc["val"] = 0;
doc["points"] = i;
doc["brand"] = (i == 0 || i == 3) ? "Nike" : "Adidas";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
std::vector<sort_by> sort_fields = {
sort_by("val", "DESC"),
sort_by({"brand:nike"}, {1}, "DESC"),
sort_by("points", "DESC"),
};
auto results = coll1->search("title", {"title"}, "", {}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(5, results["hits"].size());
std::vector<std::string> expected_ids = {"3", "0", "4", "2", "1"};
for(size_t i = 0; i < expected_ids.size(); i++) {
ASSERT_EQ(expected_ids[i], results["hits"][i]["document"]["id"].get<std::string>());
}
// eval expression as 3rd sorting argument
sort_fields = {
sort_by("val", "DESC"),
sort_by("val", "DESC"),
sort_by({"brand:nike"}, {1}, "DESC"),
};
results = coll1->search("title", {"title"}, "", {}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(5, results["hits"].size());
for(size_t i = 0; i < expected_ids.size(); i++) {
ASSERT_EQ(expected_ids[i], results["hits"][i]["document"]["id"].get<std::string>());
}
sort_fields = {
sort_by("val", "DESC"),
sort_by({"brand:adidas", "brand:nike"}, {2, 1}, "DESC"),
sort_by("points", "DESC"),
};
expected_ids = {"4", "2", "1","3", "0"};
results = coll1->search("title", {"title"}, "", {}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(5, results["hits"].size());
for(size_t i = 0; i < expected_ids.size(); i++) {
ASSERT_EQ(expected_ids[i], results["hits"][i]["document"]["id"].get<std::string>());
}
sort_fields = {
sort_by("val", "DESC"),
sort_by("val", "DESC"),
sort_by({"brand:adidas", "brand:nike"}, {2, 1}, "DESC"),
};
results = coll1->search("title", {"title"}, "", {}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(5, results["hits"].size());
for(size_t i = 0; i < expected_ids.size(); i++) {
ASSERT_EQ(expected_ids[i], results["hits"][i]["document"]["id"].get<std::string>());
}
}
TEST_F(CollectionSortingTest, AscendingVectorDistance) {
std::string coll_schema = R"(
{
"name": "coll1",
"fields": [
{"name": "title", "type": "string" },
{"name": "points", "type": "float[]", "num_dim": 2}
]
}
)";
nlohmann::json schema = nlohmann::json::parse(coll_schema);
Collection* coll1 = collectionManager.create_collection(schema).get();
std::vector<std::vector<float>> points = {
{3.0, 4.0},
{9.0, 21.0},
{8.0, 15.0},
{1.0, 1.0},
{5.0, 7.0}
};
for(size_t i = 0; i < points.size(); i++) {
nlohmann::json doc;
doc["title"] = "Title " + std::to_string(i);
doc["points"] = points[i];
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
std::vector<sort_by> sort_fields = {
sort_by("_vector_distance", "asc"),
};
auto results = coll1->search("*", {}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "points:([8.0, 15.0])").get();
ASSERT_EQ(5, results["hits"].size());
std::vector<std::string> expected_ids = {"2", "1", "4", "0", "3"};
for(size_t i = 0; i < expected_ids.size(); i++) {
ASSERT_EQ(expected_ids[i], results["hits"][i]["document"]["id"].get<std::string>());
}
}
TEST_F(CollectionSortingTest, DescendingVectorDistance) {
std::string coll_schema = R"(
{
"name": "coll1",
"fields": [
{"name": "title", "type": "string" },
{"name": "points", "type": "float[]", "num_dim": 2}
]
}
)";
nlohmann::json schema = nlohmann::json::parse(coll_schema);
Collection* coll1 = collectionManager.create_collection(schema).get();
std::vector<std::vector<float>> points = {
{3.0, 4.0},
{9.0, 21.0},
{8.0, 15.0},
{1.0, 1.0},
{5.0, 7.0}
};
for(size_t i = 0; i < points.size(); i++) {
nlohmann::json doc;
doc["title"] = "Title " + std::to_string(i);
doc["points"] = points[i];
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
std::vector<sort_by> sort_fields = {
sort_by("_vector_distance", "DESC"),
};
auto results = coll1->search("*", {}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "points:([8.0, 15.0])").get();
ASSERT_EQ(5, results["hits"].size());
std::vector<std::string> expected_ids = {"3", "0", "4", "1", "2"};
for(size_t i = 0; i < expected_ids.size(); i++) {
ASSERT_EQ(expected_ids[i], results["hits"][i]["document"]["id"].get<std::string>());
}
}
TEST_F(CollectionSortingTest, InvalidVectorDistanceSorting) {
std::string coll_schema = R"(
{
"name": "coll1",
"fields": [
{"name": "title", "type": "string" },
{"name": "points", "type": "float[]", "num_dim": 2}
]
}
)";
nlohmann::json schema = nlohmann::json::parse(coll_schema);
Collection* coll1 = collectionManager.create_collection(schema).get();
std::vector<std::vector<float>> points = {
{1.0, 1.0},
{2.0, 2.0},
{3.0, 3.0},
{4.0, 4.0},
{5.0, 5.0},
};
for(size_t i = 0; i < points.size(); i++) {
nlohmann::json doc;
doc["title"] = "Title " + std::to_string(i);
doc["points"] = points[i];
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
std::vector<sort_by> sort_fields = {
sort_by("_vector_distance", "desc"),
};
auto results = coll1->search("title", {"title"}, "", {}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 10);
ASSERT_FALSE(results.ok());
ASSERT_EQ("sort_by vector_distance is only supported for vector queries, semantic search and hybrid search.", results.error());
}
TEST_F(CollectionSortingTest, TestSortByVectorQuery) {
std::string coll_schema = R"(
{
"name": "coll1",
"fields": [
{"name": "name", "type": "string" },
{"name": "points", "type": "float[]", "num_dim": 2}
]
}
)";
nlohmann::json schema = nlohmann::json::parse(coll_schema);
auto create_coll = collectionManager.create_collection(schema);
ASSERT_TRUE(create_coll.ok());
auto coll = create_coll.get();
std::vector<std::vector<float>> points = {
{7.0, 8.0},
{8.0, 15.0},
{5.0, 12.0},
};
for(size_t i = 0; i < points.size(); i++) {
nlohmann::json doc;
doc["name"] = "Title " + std::to_string(i);
doc["points"] = points[i];
ASSERT_TRUE(coll->add(doc.dump()).ok());
}
std::vector<sort_by> sort_fields = {};
auto results = coll->search("title", {"name"}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "").get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"]);
ASSERT_EQ("1", results["hits"][1]["document"]["id"]);
ASSERT_EQ("0", results["hits"][2]["document"]["id"]);
sort_fields = {
sort_by("_vector_query(points:([5.0, 5.0]))", "asc"),
};
results = coll->search("title", {"name"}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "").get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"]);
ASSERT_EQ("1", results["hits"][1]["document"]["id"]);
ASSERT_EQ("2", results["hits"][2]["document"]["id"]);
sort_fields = {
sort_by("_vector_query(points:([5.0, 5.0]))", "desc"),
};
results = coll->search("title", {"name"}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "").get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"]);
ASSERT_EQ("1", results["hits"][1]["document"]["id"]);
ASSERT_EQ("0", results["hits"][2]["document"]["id"]);
}
TEST_F(CollectionSortingTest, TestVectorQueryQsSorting) {
auto schema_json =
R"({
"name": "test",
"fields": [
{"name": "name", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto coll = collection_create_op.get();
auto add_op = coll->add(R"({
"name": "buttercup"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
add_op = coll->add(R"({
"name": "butter"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
auto results = coll->search("butter", {"name"}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "").get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"]);
ASSERT_EQ("0", results["hits"][1]["document"]["id"]);
sort_fields = {
sort_by("_vector_query(embedding:([], queries: [powerpuff girls, cartoon]))", "asc"),
};
results = coll->search("butter", {"name"}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "").get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"]);
ASSERT_EQ("1", results["hits"][1]["document"]["id"]);
}
TEST_F(CollectionSortingTest, TestVectorQueryDistanceThresholdSorting) {
auto schema_json = R"({
"name": "products",
"fields":[
{
"name": "product_name",
"type": "string"
},
{
"name": "embedding",
"type": "float[]",
"embed": {
"from": [
"product_name"
],
"model_config": {
"model_name": "ts/e5-small"
}
}
}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto coll_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(coll_op.ok());
auto coll = coll_op.get();
std::vector<std::string> products = {"Mobile Phone", "Cell Phone", "Telephone"};
nlohmann::json doc;
for (auto product: products) {
doc["product_name"] = product;
ASSERT_TRUE(coll->add(doc.dump()).ok());
}
// when eval condition is empty
std::map<std::string, std::string> req_params = {
{"collection", "products"},
{"q", "phone"},
{"query_by", "product_name"},
{"sort_by", "_text_match:desc,_vector_query(embedding:([],distance_threshold:0.3)):asc"},
{"exclude_fields", "embedding"}
};
nlohmann::json embedded_params;
std::string json_res;
auto now_ts = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
auto search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
auto res = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res["hits"].size());
ASSERT_EQ("Mobile Phone", res["hits"][0]["document"]["product_name"]);
ASSERT_EQ(0.07853113859891891, res["hits"][0]["vector_distance"].get<float>());
ASSERT_EQ("Cell Phone", res["hits"][1]["document"]["product_name"]);
ASSERT_EQ(0.08472149819135666, res["hits"][1]["vector_distance"].get<float>());
}
TEST_F(CollectionSortingTest, TestSortByRandomOrder) {
auto schema_json = R"({
"name": "digital_products",
"fields":[
{
"name": "product_name","type": "string"
}]
})"_json;
auto coll_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(coll_op.ok());
auto coll = coll_op.get();
std::vector<std::string> products = {"Samsung Smartphone", "Vivo SmartPhone", "Oneplus Smartphone", "Pixel Smartphone", "Moto Smartphone"};
nlohmann::json doc;
for (auto product: products) {
doc["product_name"] = product;
ASSERT_TRUE(coll->add(doc.dump()).ok());
}
sort_fields = {
sort_by("_rand(5)", "asc"),
};
auto results = coll->search("smartphone", {"product_name"}, "", {}, sort_fields, {0}).get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"]);
ASSERT_EQ("4", results["hits"][1]["document"]["id"]);
ASSERT_EQ("0", results["hits"][2]["document"]["id"]);
ASSERT_EQ("3", results["hits"][3]["document"]["id"]);
ASSERT_EQ("2", results["hits"][4]["document"]["id"]);
sort_fields = {
sort_by("_rand(8)", "asc"),
};
results = coll->search("smartphone", {"product_name"}, "", {}, sort_fields, {0}).get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"]);
ASSERT_EQ("3", results["hits"][1]["document"]["id"]);
ASSERT_EQ("4", results["hits"][2]["document"]["id"]);
ASSERT_EQ("0", results["hits"][3]["document"]["id"]);
ASSERT_EQ("2", results["hits"][4]["document"]["id"]);
//without seed value it takes current time as seed
sort_fields = {
sort_by("_rand()", "asc"),
};
results = coll->search("smartphone", {"product_name"}, "", {}, sort_fields, {0}).get();
ASSERT_EQ(5, results["hits"].size());
sort_fields = {
sort_by("_rand", "asc"),
};
results = coll->search("*", {}, "", {}, sort_fields, {0}).get();
ASSERT_EQ(5, results["hits"].size());
//should work with other sort params as tie breaker for first param
sort_fields = {
sort_by("_text_match", "desc"),
sort_by("_rand(5)", "asc")
};
results = coll->search("smartphone", {"product_name"}, "", {}, sort_fields, {0}).get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"]);
ASSERT_EQ("4", results["hits"][1]["document"]["id"]);
ASSERT_EQ("0", results["hits"][2]["document"]["id"]);
ASSERT_EQ("3", results["hits"][3]["document"]["id"]);
ASSERT_EQ("2", results["hits"][4]["document"]["id"]);
sort_fields = {
sort_by("_text_match", "desc"),
sort_by("_rand(8)", "asc")
};
results = coll->search("smartphone", {"product_name"}, "", {}, sort_fields, {0}).get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"]);
ASSERT_EQ("3", results["hits"][1]["document"]["id"]);
ASSERT_EQ("4", results["hits"][2]["document"]["id"]);
ASSERT_EQ("0", results["hits"][3]["document"]["id"]);
ASSERT_EQ("2", results["hits"][4]["document"]["id"]);
//negative seed value is not allowed
sort_fields = {
sort_by("_rand(-1)", "asc"),
};
auto results_op = coll->search("*", {}, "", {}, sort_fields, {0});
ASSERT_EQ("Only positive integer seed value is allowed.", results_op.error());
sort_fields = {
sort_by("_rand(sadkjkj)", "asc"),
};
results_op = coll->search("*", {}, "", {}, sort_fields, {0});
ASSERT_EQ("Only positive integer seed value is allowed.", results_op.error());
//typos
sort_fields = {
sort_by("rand()", "asc"),
};
results_op = coll->search("*", {}, "", {}, sort_fields, {0});
ASSERT_EQ("Could not find a field named `rand` in the schema for sorting.", results_op.error());
sort_fields = {
sort_by("_random()", "asc"),
};
results_op = coll->search("*", {}, "", {}, sort_fields, {0});
ASSERT_EQ("Could not find a field named `_random` in the schema for sorting.", results_op.error());
}
TEST_F(CollectionSortingTest, DiffFunctionSort) {
auto schema_json = R"({
"name": "products",
"fields":[
{
"name": "name","type": "string",
"name": "timestamp","type": "int64"
}]
})"_json;
auto coll_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(coll_op.ok());
auto coll = coll_op.get();
std::vector<std::string> products = {"Samsung Smartphone", "Vivo SmartPhone", "Oneplus Smartphone", "Pixel Smartphone", "Moto Smartphone"};
nlohmann::json doc;
for (auto i = 0; i < products.size(); ++i) {
doc["name"] = products[i];
doc["timestamp"] = 1728383250 + i * 1000;
ASSERT_TRUE(coll->add(doc.dump()).ok());
}
//put 1728383250 + 3000 as base value
sort_fields = {
sort_by("timestamp(origin: 1728386250, func: diff)", "asc"),
};
auto results = coll->search("*", {}, "", {}, sort_fields, {0}).get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ("3", results["hits"][0]["document"]["id"]);
ASSERT_EQ(1728386250, results["hits"][0]["document"]["timestamp"].get<size_t>());
ASSERT_EQ("4", results["hits"][1]["document"]["id"]);
ASSERT_EQ(1728387250, results["hits"][1]["document"]["timestamp"].get<size_t>());
ASSERT_EQ("2", results["hits"][2]["document"]["id"]);
ASSERT_EQ(1728385250, results["hits"][2]["document"]["timestamp"].get<size_t>());
ASSERT_EQ("1", results["hits"][3]["document"]["id"]);
ASSERT_EQ(1728384250, results["hits"][3]["document"]["timestamp"].get<size_t>());
ASSERT_EQ("0", results["hits"][4]["document"]["id"]);
ASSERT_EQ(1728383250, results["hits"][4]["document"]["timestamp"].get<size_t>());
//desc sort
sort_fields = {
sort_by("timestamp(func:diff, origin: 1728386250)", "desc"),
};
results = coll->search("*", {}, "", {}, sort_fields, {0}).get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"]);
ASSERT_EQ(1728383250, results["hits"][0]["document"]["timestamp"].get<size_t>());
ASSERT_EQ("1", results["hits"][1]["document"]["id"]);
ASSERT_EQ(1728384250, results["hits"][1]["document"]["timestamp"].get<size_t>());
ASSERT_EQ("4", results["hits"][2]["document"]["id"]);
ASSERT_EQ(1728387250, results["hits"][2]["document"]["timestamp"].get<size_t>());
ASSERT_EQ("2", results["hits"][3]["document"]["id"]);
ASSERT_EQ(1728385250, results["hits"][3]["document"]["timestamp"].get<size_t>());
ASSERT_EQ("3", results["hits"][4]["document"]["id"]);
ASSERT_EQ(1728386250, results["hits"][4]["document"]["timestamp"].get<size_t>());
}
TEST_F(CollectionSortingTest, DecayFunctionsValidation) {
auto schema_json = R"({
"name": "products",
"fields":[
{
"name": "name","type": "string",
"name": "timestamp","type": "int64"
}]
})"_json;
auto coll_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(coll_op.ok());
auto coll = coll_op.get();
std::vector<std::string> products = {"Samsung Smartphone", "Vivo SmartPhone", "Oneplus Smartphone", "Pixel Smartphone", "Moto Smartphone"};
nlohmann::json doc;
for (auto i = 0; i < products.size(); ++i) {
doc["name"] = products[i];
doc["timestamp"] = 1728383250 + i * 1000;
ASSERT_TRUE(coll->add(doc.dump()).ok());
}
//non integer scale value
sort_fields = {
sort_by("timestamp(origin: 1728386250, scale: 100.4, func: linear)", "asc"),
};
auto results = coll->search("*", {}, "", {}, sort_fields, {0});
ASSERT_EQ("sort_by: scale param should be non-zero integer.", results.error());
//non integer origin value
sort_fields = {
sort_by("timestamp(origin: 1728386250.5, scale: 100, func: linear)", "asc"),
};
results = coll->search("*", {}, "", {}, sort_fields, {0});
ASSERT_EQ("sort_by: origin param should be integer.", results.error());
//non integer offset value
sort_fields = {
sort_by("timestamp(origin: 1728386250, scale: 100, func: linear, offset: -2.5)", "asc"),
};
results = coll->search("*", {}, "", {}, sort_fields, {0});
ASSERT_EQ("sort_by: offset param should be integer.", results.error());
//0 scale value
sort_fields = {
sort_by("timestamp(origin: 1728386250, scale: 0, func: linear, offset: -2)", "asc"),
};
results = coll->search("*", {}, "", {}, sort_fields, {0});
ASSERT_EQ("sort_by: scale param should be non-zero integer.", results.error());
//missing scale param
sort_fields = {
sort_by("timestamp(origin: 1728386250, func: linear, offset: -2)", "asc"),
};
results = coll->search("*", {}, "", {}, sort_fields, {0});
ASSERT_EQ("Bad syntax. origin and scale are mandatory params for decay function linear", results.error());
//missing origin param
sort_fields = {
sort_by("timestamp(scale: 100, func: linear, offset: -2)", "asc"),
};
results = coll->search("*", {}, "", {}, sort_fields, {0});
ASSERT_EQ("Bad syntax. origin and scale are mandatory params for decay function linear", results.error());
//decay value should be between 0.0 to 1.0
sort_fields = {
sort_by("timestamp(origin: 1728386250, func: linear, scale: -10, decay: 1.4)", "asc"),
};
results = coll->search("*", {}, "", {}, sort_fields, {0});
ASSERT_EQ("sort_by: decay param should be float in range [0.0, 1.0].", results.error());
//only gauss, linear, diff, and exp keys are supported for decay functions
sort_fields = {
sort_by("timestamp(origin: 1728386250, func: expo, scale: -10, decay: 0.4)", "asc"),
};
results = coll->search("*", {}, "", {}, sort_fields, {0});
ASSERT_EQ("Bad syntax. Not a valid decay function key `expo`.", results.error());
//missing func
sort_fields = {
sort_by("timestamp(origin: 1728386250)", "asc"),
};
results = coll->search("*", {}, "", {}, sort_fields, {0});
ASSERT_EQ("Bad syntax. Missing param `func`.", results.error());
//correct params
sort_fields = {
sort_by("timestamp(origin: 1728386250, func: exp, scale: -10, decay: 0.4)", "asc"),
};
results = coll->search("*", {}, "", {}, sort_fields, {0});
ASSERT_TRUE(results.ok());
}
TEST_F(CollectionSortingTest, DecayFunctionsTest) {
auto schema_json = R"({
"name": "products",
"fields":[
{"name": "product_name","type": "string"},
{"name": "timestamp","type": "int64"}
]
})"_json;
auto coll_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(coll_op.ok());
auto coll = coll_op.get();
std::vector<std::string> products = {"Samsung Smartphone", "Vivo SmartPhone", "Oneplus Smartphone", "Pixel Smartphone", "Moto Smartphone"};
nlohmann::json doc;
for (auto i = 0; i < products.size(); ++i) {
doc["product_name"] = products[i];
doc["timestamp"] = 1728383250 + i * 1000;
ASSERT_TRUE(coll->add(doc.dump()).ok());
}
sort_fields = {
sort_by("timestamp(origin: 1728385250, func: gauss, scale: 1000, decay: 0.5)", "desc"),
};
auto results = coll->search("smartphone", {"product_name"}, "", {}, sort_fields, {0}).get();
ASSERT_EQ(5, results["hits"].size());
//score reduces by half respecting gaussian curve with scale value from origin
ASSERT_EQ("2", results["hits"][0]["document"]["id"]);
ASSERT_EQ(1728385250, results["hits"][0]["document"]["timestamp"].get<size_t>());
ASSERT_EQ("3", results["hits"][1]["document"]["id"]);
ASSERT_EQ(1728386250, results["hits"][1]["document"]["timestamp"].get<size_t>());
ASSERT_EQ("1", results["hits"][2]["document"]["id"]);
ASSERT_EQ(1728384250, results["hits"][2]["document"]["timestamp"].get<size_t>());
ASSERT_EQ("4", results["hits"][3]["document"]["id"]);
ASSERT_EQ(1728387250, results["hits"][3]["document"]["timestamp"].get<size_t>());
ASSERT_EQ("0", results["hits"][4]["document"]["id"]);
ASSERT_EQ(1728383250, results["hits"][4]["document"]["timestamp"].get<size_t>());
}
| 121,195
|
C++
|
.cpp
| 2,541
| 38.77253
| 160
| 0.537733
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,718
|
analytics_manager_test.cpp
|
typesense_typesense/test/analytics_manager_test.cpp
|
#include <gtest/gtest.h>
#include <string>
#include <vector>
#include <collection_manager.h>
#include <analytics_manager.h>
#include "collection.h"
#include "core_api.h"
class AnalyticsManagerTest : public ::testing::Test {
protected:
Store *store;
Store *analytic_store;
CollectionManager& collectionManager = CollectionManager::get_instance();
std::atomic<bool> quit = false;
std::string state_dir_path, analytics_dir_path;
std::vector<std::string> query_fields;
std::vector<sort_by> sort_fields;
AnalyticsManager& analyticsManager = AnalyticsManager::get_instance();
uint32_t analytics_minute_rate_limit = 5;
void setupCollection() {
state_dir_path = "/tmp/typesense_test/analytics_manager_test";
analytics_dir_path = "/tmp/typesense-test/analytics";
LOG(INFO) << "Truncating and creating: " << state_dir_path;
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
system("mkdir -p /tmp/typesense_test/models");
store = new Store(state_dir_path);
LOG(INFO) << "Truncating and creating: " << analytics_dir_path;
system(("rm -rf "+ analytics_dir_path +" && mkdir -p "+analytics_dir_path).c_str());
analytic_store = new Store(analytics_dir_path, 24*60*60, 1024, true, FOURWEEKS_SECS);
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
analyticsManager.init(store, analytic_store, analytics_minute_rate_limit);
analyticsManager.resetToggleRateLimit(false);
}
virtual void SetUp() {
setupCollection();
}
virtual void TearDown() {
collectionManager.dispose();
delete store;
delete analytic_store;
analyticsManager.stop();
}
};
TEST_F(AnalyticsManagerTest, AddSuggestion) {
nlohmann::json titles_schema = R"({
"name": "titles",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
Collection* titles_coll = collectionManager.create_collection(titles_schema).get();
nlohmann::json doc;
doc["title"] = "Cool trousers";
ASSERT_TRUE(titles_coll->add(doc.dump()).ok());
// create a collection to store suggestions
nlohmann::json suggestions_schema = R"({
"name": "top_queries",
"fields": [
{"name": "q", "type": "string" },
{"name": "count", "type": "int32" }
]
})"_json;
Collection* suggestions_coll = collectionManager.create_collection(suggestions_schema).get();
nlohmann::json analytics_rule = R"({
"name": "top_search_queries",
"type": "popular_queries",
"params": {
"limit": 100,
"source": {
"collections": ["titles"]
},
"destination": {
"collection": "top_queries"
}
}
})"_json;
auto create_op = analyticsManager.create_rule(analytics_rule, false, true);
ASSERT_TRUE(create_op.ok());
std::string q = "coo";
analyticsManager.add_suggestion("titles", q, "cool", true, "1");
auto popularQueries = analyticsManager.get_popular_queries();
auto userQueries = popularQueries["top_queries"]->get_user_prefix_queries()["1"];
ASSERT_EQ(1, userQueries.size());
ASSERT_EQ("coo", userQueries[0].query); // expanded query is NOT stored since it's not enabled
// add another query which is more popular
q = "buzzfoo";
analyticsManager.add_suggestion("titles", q, q, true, "1");
analyticsManager.add_suggestion("titles", q, q, true, "2");
analyticsManager.add_suggestion("titles", q, q, true, "3");
popularQueries = analyticsManager.get_popular_queries();
userQueries = popularQueries["top_queries"]->get_user_prefix_queries()["1"];
ASSERT_EQ(2, userQueries.size());
ASSERT_EQ("coo", userQueries[0].query);
ASSERT_EQ("buzzfoo", userQueries[1].query);
ASSERT_TRUE(analyticsManager.remove_rule("top_search_queries").ok());
}
TEST_F(AnalyticsManagerTest, AddSuggestionWithExpandedQuery) {
nlohmann::json titles_schema = R"({
"name": "titles",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
Collection* titles_coll = collectionManager.create_collection(titles_schema).get();
nlohmann::json doc;
doc["title"] = "Cool trousers";
ASSERT_TRUE(titles_coll->add(doc.dump()).ok());
// create a collection to store suggestions
nlohmann::json suggestions_schema = R"({
"name": "top_queries",
"fields": [
{"name": "q", "type": "string" },
{"name": "count", "type": "int32" }
]
})"_json;
Collection* suggestions_coll = collectionManager.create_collection(suggestions_schema).get();
nlohmann::json analytics_rule = R"({
"name": "top_search_queries",
"type": "popular_queries",
"params": {
"limit": 100,
"expand_query": true,
"source": {
"collections": ["titles"]
},
"destination": {
"collection": "top_queries"
}
}
})"_json;
auto create_op = analyticsManager.create_rule(analytics_rule, false, true);
ASSERT_TRUE(create_op.ok());
analyticsManager.add_suggestion("titles", "c", "cool", true, "1");
auto popularQueries = analyticsManager.get_popular_queries();
auto userQueries = popularQueries["top_queries"]->get_user_prefix_queries()["1"];
ASSERT_EQ(1, userQueries.size());
ASSERT_EQ("cool", userQueries[0].query);
ASSERT_TRUE(analyticsManager.remove_rule("top_search_queries").ok());
}
TEST_F(AnalyticsManagerTest, GetAndDeleteSuggestions) {
nlohmann::json titles_schema = R"({
"name": "titles",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
Collection* titles_coll = collectionManager.create_collection(titles_schema).get();
nlohmann::json doc;
doc["title"] = "Cool trousers";
ASSERT_TRUE(titles_coll->add(doc.dump()).ok());
nlohmann::json analytics_rule = R"({
"name": "top_search_queries",
"type": "popular_queries",
"params": {
"limit": 100,
"source": {
"collections": ["titles"]
},
"destination": {
"collection": "top_queries"
}
}
})"_json;
auto create_op = analyticsManager.create_rule(analytics_rule, false, true);
ASSERT_TRUE(create_op.ok());
analytics_rule = R"({
"name": "top_search_queries2",
"type": "popular_queries",
"params": {
"limit": 100,
"source": {
"collections": ["titles"]
},
"destination": {
"collection": "top_queries"
}
}
})"_json;
create_op = analyticsManager.create_rule(analytics_rule, false, true);
ASSERT_FALSE(create_op.ok());
ASSERT_EQ("There's already another configuration for this destination collection.", create_op.error());
analytics_rule = R"({
"name": "top_search_queries3",
"type": "popular_queries",
"params": {
"limit": 100,
"source": {
"collections": [241, 2353]
},
"destination": {
"collection": "top_queries"
}
}
})"_json;
create_op = analyticsManager.create_rule(analytics_rule, false, true);
ASSERT_FALSE(create_op.ok());
ASSERT_EQ("Source collections value should be a string.", create_op.error());
analytics_rule = R"({
"name": "top_search_queries2",
"type": "popular_queries",
"params": {
"limit": 100,
"source": {
"collections": ["titles"]
},
"destination": {
"collection": "top_queries2"
}
}
})"_json;
create_op = analyticsManager.create_rule(analytics_rule, false, true);
ASSERT_TRUE(create_op.ok());
auto rules = analyticsManager.list_rules().get()["rules"];
ASSERT_EQ(2, rules.size());
ASSERT_TRUE(analyticsManager.get_rule("top_search_queries").ok());
ASSERT_TRUE(analyticsManager.get_rule("top_search_queries2").ok());
auto missing_rule_op = analyticsManager.get_rule("top_search_queriesX");
ASSERT_FALSE(missing_rule_op.ok());
ASSERT_EQ(404, missing_rule_op.code());
ASSERT_EQ("Rule not found.", missing_rule_op.error());
// upsert rule that already exists
analytics_rule = R"({
"name": "top_search_queries2",
"type": "popular_queries",
"params": {
"limit": 100,
"source": {
"collections": ["titles"]
},
"destination": {
"collection": "top_queriesUpdated"
}
}
})"_json;
create_op = analyticsManager.create_rule(analytics_rule, true, true);
ASSERT_TRUE(create_op.ok());
auto existing_rule = analyticsManager.get_rule("top_search_queries2").get();
ASSERT_EQ("top_queriesUpdated", existing_rule["params"]["destination"]["collection"].get<std::string>());
// reject when upsert is not enabled
create_op = analyticsManager.create_rule(analytics_rule, false, true);
ASSERT_FALSE(create_op.ok());
ASSERT_EQ("There's already another configuration with the name `top_search_queries2`.", create_op.error());
// try deleting both rules
ASSERT_TRUE(analyticsManager.remove_rule("top_search_queries").ok());
ASSERT_TRUE(analyticsManager.remove_rule("top_search_queries2").ok());
missing_rule_op = analyticsManager.get_rule("top_search_queries");
ASSERT_FALSE(missing_rule_op.ok());
missing_rule_op = analyticsManager.get_rule("top_search_queries2");
ASSERT_FALSE(missing_rule_op.ok());
}
TEST_F(AnalyticsManagerTest, EventsValidation) {
nlohmann::json titles_schema = R"({
"name": "titles",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
Collection* titles_coll = collectionManager.create_collection(titles_schema).get();
nlohmann::json titles1_schema = R"({
"name": "titles_1",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
Collection* titles1_coll = collectionManager.create_collection(titles1_schema).get();
std::shared_ptr<http_req> req = std::make_shared<http_req>();
std::shared_ptr<http_res> res = std::make_shared<http_res>(nullptr);
auto analytics_rule = R"({
"name": "product_events",
"type": "log",
"params": {
"source": {
"collections": ["titles"],
"events": [{"type": "click", "name": "AP"}, {"type": "visit", "name": "VP"}]
}
}
})"_json;
auto create_op = analyticsManager.create_rule(analytics_rule, true, true);
ASSERT_TRUE(create_op.ok());
//wrong type
nlohmann::json event1 = R"({
"type": "query_click",
"name": "AP",
"data": {
"q": "technology",
"collection": "titles",
"doc_id": "21",
"user_id": "13"
}
})"_json;
req->body = event1.dump();
ASSERT_FALSE(post_create_event(req, res));
ASSERT_EQ("{\"message\": \"event_type query_click not found.\"}", res->body);
//missing name
event1 = R"({
"type": "click",
"data": {
"collection": "titles",
"doc_id": "21",
"user_id": "13"
}
})"_json;
req->body = event1.dump();
ASSERT_FALSE(post_create_event(req, res));
ASSERT_EQ("{\"message\": \"key `name` not found.\"}", res->body);
//should be string type
nlohmann::json event3 = R"({
"type": "conversion",
"name": "AP",
"data": {
"q": "technology",
"doc_id": 21,
"user_id": "13"
}
})"_json;
req->body = event3.dump();
ASSERT_FALSE(post_create_event(req, res));
ASSERT_EQ("{\"message\": \"event should have 'doc_id' as string value.\"}", res->body);
event3 = R"({
"type": "conversion",
"name": "AP",
"data": {
"q": "technology",
"doc_id": "21",
"user_id": 12
}
})"_json;
req->body = event3.dump();
ASSERT_FALSE(post_create_event(req, res));
ASSERT_EQ("{\"message\": \"event should have 'user_id' as string value.\"}", res->body);
event3 = R"({
"type": "conversion",
"name": "AP",
"data": {
"q": 1245,
"doc_id": "21",
"user_id": "13"
}
})"_json;
req->body = event3.dump();
ASSERT_FALSE(post_create_event(req, res));
ASSERT_EQ("{\"message\": \"'q' should be a string value.\"}", res->body);
//event name should be unique
analytics_rule = R"({
"name": "product_click_events2",
"type": "log",
"params": {
"source": {
"collections": ["titles"],
"events": [{"type": "click", "name": "AP"}]
}
}
})"_json;
create_op = analyticsManager.create_rule(analytics_rule, true, true);
ASSERT_FALSE(create_op.ok());
ASSERT_EQ("Events must contain a unique name.", create_op.error());
//wrong event name
nlohmann::json event4 = R"({
"type": "visit",
"name": "AB",
"data": {
"q": "technology",
"doc_id": "21",
"user_id": "11"
}
})"_json;
req->body = event4.dump();
ASSERT_FALSE(post_create_event(req, res));
//correct params
nlohmann::json event5 = R"({
"type": "click",
"name": "AP",
"data": {
"q": "technology",
"doc_id": "21",
"user_id": "13"
}
})"_json;
req->body = event5.dump();
ASSERT_TRUE(post_create_event(req, res));
nlohmann::json event6 = R"({
"type": "visit",
"name": "VP",
"data": {
"q": "technology",
"doc_id": "21",
"user_id": "11"
}
})"_json;
req->body = event6.dump();
ASSERT_TRUE(post_create_event(req, res));
//wrong event type
nlohmann::json event7 = R"({
"type": "conversion",
"name": "VP",
"data": {
"q": "technology",
"doc_id": "21",
"user_id": "11"
}
})"_json;
req->body = event7.dump();
ASSERT_FALSE(post_create_event(req, res));
//custom event
analytics_rule = R"({
"name": "product_events2",
"type": "log",
"params": {
"source": {
"collections": ["titles"],
"events": [{"type": "custom", "name": "CP"}]
}
}
})"_json;
create_op = analyticsManager.create_rule(analytics_rule, true, true);
ASSERT_TRUE(create_op.ok());
nlohmann::json event8 = R"({
"type": "custom",
"name": "CP",
"data": {
"q": "technology",
"doc_id": "21",
"user_id": "11",
"label1": "foo",
"label2": "bar",
"info": "xyz"
}
})"_json;
req->body = event8.dump();
ASSERT_TRUE(post_create_event(req, res));
//deleting rule should delete events associated with it
req->params["name"] = "product_events2";
ASSERT_TRUE(del_analytics_rules(req, res));
analytics_rule = R"({
"name": "product_events2",
"type": "log",
"params": {
"source": {
"collections": ["titles"],
"events": [{"type": "custom", "name": "CP"}]
}
}
})"_json;
create_op = analyticsManager.create_rule(analytics_rule, true, true);
ASSERT_TRUE(create_op.ok());
// log based event should be created with only doc_id and user_id
event5 = R"({
"type": "click",
"name": "AP",
"data": {
"doc_id": "21",
"user_id": "123"
}
})"_json;
req->body = event5.dump();
ASSERT_TRUE(post_create_event(req, res));
//search events validation
nlohmann::json suggestions_schema = R"({
"name": "top_queries",
"fields": [
{"name": "q", "type": "string" },
{"name": "count", "type": "int32" }
]
})"_json;
Collection* suggestions_coll = collectionManager.create_collection(suggestions_schema).get();
analytics_rule = R"({
"name": "popular_searches",
"type": "popular_queries",
"params": {
"source": {
"collections": ["titles"],
"events": [{"type": "search", "name": "PS1"}]
},
"destination": {
"collection": "top_queries"
}
}
})"_json;
create_op = analyticsManager.create_rule(analytics_rule, true, true);
ASSERT_TRUE(create_op.ok());
analytics_rule = R"({
"name": "nohits_searches",
"type": "nohits_queries",
"params": {
"source": {
"collections": ["titles"],
"events": [{"type": "search", "name": "NH1"}]
},
"destination": {
"collection": "top_queries"
}
}
})"_json;
create_op = analyticsManager.create_rule(analytics_rule, true, true);
ASSERT_TRUE(create_op.ok());
//missing query param
auto event9 = R"({
"type": "search",
"name": "NH1",
"data": {
"user_id": "11"
}
})"_json;
req->body = event9.dump();
ASSERT_FALSE(post_create_event(req, res));
ASSERT_EQ("{\"message\": \"search event json data fields should contain `q` as string value.\"}", res->body);
event9 = R"({
"type": "search",
"name": "NH1",
"data": {
"q": "11"
}
})"_json;
req->body = event9.dump();
ASSERT_FALSE(post_create_event(req, res));
ASSERT_EQ("{\"message\": \"search event json data fields should contain `user_id` as string value.\"}", res->body);
//correct params
event9 = R"({
"type": "search",
"name": "NH1",
"data": {
"q": "tech",
"user_id": "11"
}
})"_json;
req->body = event9.dump();
ASSERT_TRUE(post_create_event(req, res));
//for log events source collections is not optional
req->params["name"] = "product_events2";
ASSERT_TRUE(del_analytics_rules(req, res));
analytics_rule = R"({
"name": "product_events2",
"type": "log",
"params": {
"source": {
"events": [{"type": "custom", "name": "CP"}]
}
}
})"_json;
create_op = analyticsManager.create_rule(analytics_rule, true, true);
ASSERT_FALSE(create_op.ok());
ASSERT_EQ("Must contain a valid list of source collections.", create_op.error());
//try adding removed events
ASSERT_TRUE(analyticsManager.remove_rule("product_events").ok());
analytics_rule = R"({
"name": "product_events",
"type": "log",
"params": {
"source": {
"collections": ["titles"],
"events": [{"type": "click", "name": "AP"}, {"type": "visit", "name": "VP"}]
}
}
})"_json;
create_op = analyticsManager.create_rule(analytics_rule, false, true);
ASSERT_TRUE(create_op.ok());
analytics_rule = R"({
"name": "product_events2",
"type": "log",
"params": {
"source": {
"collections": ["titles", "titles_1"],
"events": [{"type": "click", "name": "CP"}]
}
}
})"_json;
create_op = analyticsManager.create_rule(analytics_rule, true, true);
ASSERT_TRUE(create_op.ok());
event9 = R"({
"type": "click",
"name": "CP",
"data": {
"doc_id": "12",
"user_id": "11"
}
})"_json;
req->body = event9.dump();
ASSERT_FALSE(post_create_event(req, res));
ASSERT_EQ("{\"message\": \"Multiple source collections. 'collection' should be specified\"}", res->body);
event9 = R"({
"type": "click",
"name": "CP",
"data": {
"doc_id": "12",
"user_id": "11",
"collection": "titles"
}
})"_json;
req->body = event9.dump();
ASSERT_TRUE(post_create_event(req, res));
}
TEST_F(AnalyticsManagerTest, EventsPersist) {
//remove all rules first
analyticsManager.remove_all_rules();
nlohmann::json titles_schema = R"({
"name": "titles",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
Collection *titles_coll = collectionManager.create_collection(titles_schema).get();
std::shared_ptr<http_req> req = std::make_shared<http_req>();
std::shared_ptr<http_res> res = std::make_shared<http_res>(nullptr);
auto analytics_rule = R"({
"name": "product_click_events",
"type": "log",
"params": {
"source": {
"collections": ["titles"],
"events": [{"type": "click", "name": "APC"}]
}
}
})"_json;
auto create_op = analyticsManager.create_rule(analytics_rule, true, true);
ASSERT_TRUE(create_op.ok());
nlohmann::json event = R"({
"type": "click",
"name": "APC",
"data": {
"q": "technology",
"doc_id": "21",
"user_id": "13"
}
})"_json;
req->body = event.dump();
ASSERT_TRUE(post_create_event(req, res));
//get events
nlohmann::json payload = nlohmann::json::array();
nlohmann::json event_data;
auto collection_events_map = analyticsManager.get_log_events();
for (auto &events_collection_it: collection_events_map) {
const auto& collection = events_collection_it.first;
for(const auto& event: events_collection_it.second) {
event.to_json(event_data, collection);
payload.push_back(event_data);
}
}
//manually trigger write to db
ASSERT_TRUE(analyticsManager.write_to_db(payload));
std::vector<std::string> values;
analyticsManager.get_last_N_events("13", "*", 5, values);
ASSERT_EQ(1, values.size());
auto parsed_json = nlohmann::json::parse(values[0]);
ASSERT_EQ("APC", parsed_json["name"]);
ASSERT_EQ("titles", parsed_json["collection"]);
ASSERT_EQ("13", parsed_json["user_id"]);
ASSERT_EQ("21", parsed_json["doc_id"]);
ASSERT_EQ("technology", parsed_json["query"]);
event = R"({
"type": "click",
"name": "APC",
"data": {
"q": "technology",
"doc_id": "12",
"user_id": "13"
}
})"_json;
req->body = event.dump();
ASSERT_TRUE(post_create_event(req, res));
//get events
payload.clear();
collection_events_map = analyticsManager.get_log_events();
for (auto &events_collection_it: collection_events_map) {
const auto& collection = events_collection_it.first;
for(const auto& event: events_collection_it.second) {
event.to_json(event_data, collection);
payload.push_back(event_data);
}
}
//manually trigger write to db
ASSERT_TRUE(analyticsManager.write_to_db(payload));
values.clear();
analyticsManager.get_last_N_events("13", "*", 5, values);
ASSERT_EQ(2, values.size());
parsed_json = nlohmann::json::parse(values[0]);
//events will be fetched in LIFO order
ASSERT_EQ("APC", parsed_json["name"]);
ASSERT_EQ("titles", parsed_json["collection"]);
ASSERT_EQ("13", parsed_json["user_id"]);
ASSERT_EQ("12", parsed_json["doc_id"]);
ASSERT_EQ("technology", parsed_json["query"]);
parsed_json = nlohmann::json::parse(values[1]);
ASSERT_EQ("APC", parsed_json["name"]);
ASSERT_EQ("titles", parsed_json["collection"]);
ASSERT_EQ("13", parsed_json["user_id"]);
ASSERT_EQ("21", parsed_json["doc_id"]);
ASSERT_EQ("technology", parsed_json["query"]);
}
TEST_F(AnalyticsManagerTest, EventsRateLimitTest) {
nlohmann::json titles_schema = R"({
"name": "titles",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
Collection* titles_coll = collectionManager.create_collection(titles_schema).get();
std::shared_ptr<http_req> req = std::make_shared<http_req>();
std::shared_ptr<http_res> res = std::make_shared<http_res>(nullptr);
auto analytics_rule = R"({
"name": "product_events2",
"type": "log",
"params": {
"source": {
"collections": ["titles"],
"events": [{"type": "click", "name": "AB"}]
}
}
})"_json;
auto create_op = analyticsManager.create_rule(analytics_rule, true, true);
ASSERT_TRUE(create_op.ok());
nlohmann::json event1 = R"({
"type": "click",
"name": "AB",
"data": {
"q": "technology",
"doc_id": "21",
"user_id": "13"
}
})"_json;
//reset the LRU cache to test the rate limit
analyticsManager.resetToggleRateLimit(true);
for(auto i = 0; i < 5; ++i) {
req->body = event1.dump();
ASSERT_TRUE(post_create_event(req, res));
}
//as rate limit is 5, adding one more event above that should trigger rate limit
ASSERT_FALSE(post_create_event(req, res));
ASSERT_EQ("{\"message\": \"event rate limit reached.\"}", res->body);
analyticsManager.resetToggleRateLimit(false);
//try with different limit
//restart analytics manager as fresh
analyticsManager.dispose();
analyticsManager.stop();
analytics_minute_rate_limit = 20;
analyticsManager.init(store, analytic_store, analytics_minute_rate_limit);
analytics_rule = R"({
"name": "product_events2",
"type": "log",
"params": {
"source": {
"collections": ["titles"],
"events": [{"type": "click", "name": "AB"}]
}
}
})"_json;
create_op = analyticsManager.create_rule(analytics_rule, true, true);
ASSERT_TRUE(create_op.ok());
event1 = R"({
"type": "click",
"name": "AB",
"data": {
"q": "technology",
"doc_id": "21",
"user_id": "13"
}
})"_json;
//reset the LRU cache to test the rate limit
analyticsManager.resetToggleRateLimit(true);
for(auto i = 0; i < 20; ++i) {
req->body = event1.dump();
ASSERT_TRUE(post_create_event(req, res));
}
//as rate limit is 20, adding one more event above that should trigger rate limit
ASSERT_FALSE(post_create_event(req, res));
ASSERT_EQ("{\"message\": \"event rate limit reached.\"}", res->body);
analyticsManager.resetToggleRateLimit(false);
}
TEST_F(AnalyticsManagerTest, NoresultsQueries) {
nlohmann::json titles_schema = R"({
"name": "titles",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
Collection* titles_coll = collectionManager.create_collection(titles_schema).get();
nlohmann::json doc;
doc["title"] = "Cool trousers";
ASSERT_TRUE(titles_coll->add(doc.dump()).ok());
nlohmann::json suggestions_schema = R"({
"name": "top_queries",
"fields": [
{"name": "q", "type": "string" },
{"name": "count", "type": "int32" }
]
})"_json;
Collection* suggestions_coll = collectionManager.create_collection(suggestions_schema).get();
nlohmann::json analytics_rule = R"({
"name": "search_queries",
"type": "nohits_queries",
"params": {
"limit": 100,
"source": {
"collections": ["titles"]
},
"destination": {
"collection": "top_queries"
}
}
})"_json;
auto create_op = analyticsManager.create_rule(analytics_rule, false, true);
ASSERT_TRUE(create_op.ok());
std::string q = "foobar";
analyticsManager.add_nohits_query("titles", q, true, "1");
auto noresults_queries = analyticsManager.get_nohits_queries();
auto userQueries = noresults_queries["top_queries"]->get_user_prefix_queries()["1"];
ASSERT_EQ(1, userQueries.size());
ASSERT_EQ("foobar", userQueries[0].query);
//try deleting nohits_queries rule
ASSERT_TRUE(analyticsManager.remove_rule("search_queries").ok());
noresults_queries = analyticsManager.get_nohits_queries();
ASSERT_EQ(0, noresults_queries.size());
}
TEST_F(AnalyticsManagerTest, QueryLengthTruncation) {
nlohmann::json titles_schema = R"({
"name": "titles",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
Collection* titles_coll = collectionManager.create_collection(titles_schema).get();
nlohmann::json doc;
doc["title"] = "Cool trousers";
ASSERT_TRUE(titles_coll->add(doc.dump()).ok());
nlohmann::json suggestions_schema = R"({
"name": "queries",
"fields": [
{"name": "q", "type": "string" },
{"name": "count", "type": "int32" }
]
})"_json;
Collection* suggestions_coll = collectionManager.create_collection(suggestions_schema).get();
nlohmann::json analytics_rule = R"({
"name": "search_queries",
"type": "nohits_queries",
"params": {
"limit": 100,
"source": {
"collections": ["titles"]
},
"destination": {
"collection": "queries"
}
}
})"_json;
auto create_op = analyticsManager.create_rule(analytics_rule, false, true);
ASSERT_TRUE(create_op.ok());
std::string q1 = StringUtils::randstring(1050);
std::string q2 = StringUtils::randstring(1000);
analyticsManager.add_nohits_query("titles", q1, true, "1");
analyticsManager.add_nohits_query("titles", q2, true, "2");
auto noresults_queries = analyticsManager.get_nohits_queries();
auto userQueries = noresults_queries["queries"]->get_user_prefix_queries()["1"];
ASSERT_EQ(0, userQueries.size());
userQueries = noresults_queries["queries"]->get_user_prefix_queries()["2"];
ASSERT_EQ(1, userQueries.size());
ASSERT_EQ(q2, userQueries[0].query);
// delete nohits_queries rule
ASSERT_TRUE(analyticsManager.remove_rule("search_queries").ok());
noresults_queries = analyticsManager.get_nohits_queries();
ASSERT_EQ(0, noresults_queries.size());
// add popularity rule
analytics_rule = R"({
"name": "top_search_queries",
"type": "popular_queries",
"params": {
"limit": 100,
"source": {
"collections": ["titles"]
},
"destination": {
"collection": "queries"
}
}
})"_json;
create_op = analyticsManager.create_rule(analytics_rule, false, true);
ASSERT_TRUE(create_op.ok());
analyticsManager.add_suggestion("titles", q1, "cool", true, "1");
analyticsManager.add_suggestion("titles", q2, "cool", true, "2");
auto popular_queries = analyticsManager.get_popular_queries();
userQueries = popular_queries["queries"]->get_user_prefix_queries()["1"];
ASSERT_EQ(0, userQueries.size());
userQueries = popular_queries["queries"]->get_user_prefix_queries()["2"];
ASSERT_EQ(1, userQueries.size());
ASSERT_EQ(q2, userQueries[0].query);
}
TEST_F(AnalyticsManagerTest, SuggestionConfigRule) {
//clear all rules first
analyticsManager.remove_all_rules();
nlohmann::json titles_schema = R"({
"name": "titles",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
Collection* titles_coll = collectionManager.create_collection(titles_schema).get();
// create a collection to store suggestions
nlohmann::json suggestions_schema = R"({
"name": "top_queries",
"fields": [
{"name": "q", "type": "string" },
{"name": "count", "type": "int32" }
]
})"_json;
Collection* suggestions_coll = collectionManager.create_collection(suggestions_schema).get();
//add popular quries rule
nlohmann::json analytics_rule = R"({
"name": "top_search_queries",
"type": "popular_queries",
"params": {
"limit": 100,
"source": {
"collections": ["titles"]
},
"destination": {
"collection": "top_queries"
}
}
})"_json;
auto create_op = analyticsManager.create_rule(analytics_rule, false, true);
ASSERT_TRUE(create_op.ok());
//add nohits rule
analytics_rule = R"({
"name": "search_queries",
"type": "nohits_queries",
"params": {
"limit": 100,
"source": {
"collections": ["titles"]
},
"destination": {
"collection": "top_queries"
}
}
})"_json;
create_op = analyticsManager.create_rule(analytics_rule, false, true);
ASSERT_TRUE(create_op.ok());
auto rules = analyticsManager.list_rules().get()["rules"];
ASSERT_EQ(2, rules.size());
ASSERT_EQ("search_queries", rules[0]["name"]);
ASSERT_EQ("nohits_queries", rules[0]["type"]);
ASSERT_EQ("top_search_queries", rules[1]["name"]);
ASSERT_EQ("popular_queries", rules[1]["type"]);
//try deleting rules
ASSERT_TRUE(analyticsManager.remove_rule("search_queries").ok());
ASSERT_TRUE(analyticsManager.remove_rule("top_search_queries").ok());
rules = analyticsManager.list_rules().get()["rules"];
ASSERT_EQ(0, rules.size());
}
TEST_F(AnalyticsManagerTest, PopularityScore) {
nlohmann::json products_schema = R"({
"name": "products",
"fields": [
{"name": "title", "type": "string"},
{"name": "popularity", "type": "int32"}
]
})"_json;
Collection* products_coll = collectionManager.create_collection(products_schema).get();
nlohmann::json doc;
doc["popularity"] = 0;
doc["id"] = "0";
doc["title"] = "Cool trousers";
ASSERT_TRUE(products_coll->add(doc.dump()).ok());
doc["id"] = "1";
doc["title"] = "Funky trousers";
ASSERT_TRUE(products_coll->add(doc.dump()).ok());
doc["id"] = "2";
doc["title"] = "Casual shorts";
ASSERT_TRUE(products_coll->add(doc.dump()).ok());
doc["id"] = "3";
doc["title"] = "Trendy shorts";
ASSERT_TRUE(products_coll->add(doc.dump()).ok());
doc["id"] = "4";
doc["title"] = "Formal pants";
ASSERT_TRUE(products_coll->add(doc.dump()).ok());
auto analytics_rule = R"({
"name": "product_popularity",
"type": "counter",
"params": {
"source": {
"collections": ["products"],
"events": [
{"type": "click", "weight": 1, "name": "CLK1", "log_to_store": true},
{"type": "conversion", "weight": 5, "name": "CNV1", "log_to_store": true}
]
},
"destination": {
"collection": "products",
"counter_field": "popularity"
}
}
})"_json;
auto create_op = analyticsManager.create_rule(analytics_rule, false, true);
ASSERT_TRUE(create_op.ok());
std::shared_ptr<http_req> req = std::make_shared<http_req>();
std::shared_ptr<http_res> res = std::make_shared<http_res>(nullptr);
nlohmann::json event1 = R"({
"type": "conversion",
"name": "CNV1",
"data": {
"q": "trousers",
"doc_id": "1",
"user_id": "13"
}
})"_json;
req->body = event1.dump();
ASSERT_TRUE(post_create_event(req, res));
nlohmann::json event2 = R"({
"type": "click",
"name": "CLK1",
"data": {
"q": "shorts",
"doc_id": "3",
"user_id": "11"
}
})"_json;
req->body = event2.dump();
ASSERT_TRUE(post_create_event(req, res));
ASSERT_TRUE(post_create_event(req, res));
auto popular_clicks = analyticsManager.get_popular_clicks();
ASSERT_EQ(1, popular_clicks.size());
ASSERT_EQ("popularity", popular_clicks["products"].counter_field);
ASSERT_EQ(2, popular_clicks["products"].docid_counts.size());
ASSERT_EQ(5, popular_clicks["products"].docid_counts["1"]);
ASSERT_EQ(2, popular_clicks["products"].docid_counts["3"]);
nlohmann::json event3 = R"({
"type": "click",
"name": "CLK1",
"data": {
"q": "shorts",
"doc_id": "1",
"user_id": "11"
}
})"_json;
req->body = event3.dump();
ASSERT_TRUE(post_create_event(req, res));
nlohmann::json event4 = R"({
"type": "conversion",
"name": "CNV1",
"data": {
"q": "shorts",
"doc_id": "3",
"user_id": "11"
}
})"_json;
req->body = event4.dump();
ASSERT_TRUE(post_create_event(req, res));
popular_clicks = analyticsManager.get_popular_clicks();
ASSERT_EQ(1, popular_clicks.size());
ASSERT_EQ("popularity", popular_clicks["products"].counter_field);
ASSERT_EQ(2, popular_clicks["products"].docid_counts.size());
ASSERT_EQ(7, popular_clicks["products"].docid_counts["3"]);
ASSERT_EQ(6, popular_clicks["products"].docid_counts["1"]);
//trigger persistance event manually
for(auto& popular_clicks_it : popular_clicks) {
std::string docs;
req->params["collection"] = popular_clicks_it.first;
req->params["action"] = "update";
popular_clicks_it.second.serialize_as_docs(docs);
req->body = docs;
post_import_documents(req, res);
}
sort_fields = {sort_by("popularity", "DESC")};
auto results = products_coll->search("*", {}, "", {},
sort_fields, {0}, 10, 1, FREQUENCY,{false},
Index::DROP_TOKENS_THRESHOLD,spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>()).get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ("3", results["hits"][0]["document"]["id"]);
ASSERT_EQ(7, results["hits"][0]["document"]["popularity"]);
ASSERT_EQ("Trendy shorts", results["hits"][0]["document"]["title"]);
ASSERT_EQ("1", results["hits"][1]["document"]["id"]);
ASSERT_EQ(6, results["hits"][1]["document"]["popularity"]);
ASSERT_EQ("Funky trousers", results["hits"][1]["document"]["title"]);
//after persist should able to add new events
analyticsManager.persist_popular_events(nullptr, 0);
nlohmann::json event5 = R"({
"type": "conversion",
"name": "CNV1",
"data": {
"q": "shorts",
"doc_id": "3",
"user_id": "11"
}
})"_json;
req->body = event5.dump();
ASSERT_TRUE(post_create_event(req, res));
popular_clicks = analyticsManager.get_popular_clicks();
ASSERT_EQ(1, popular_clicks.size());
ASSERT_EQ("popularity", popular_clicks["products"].counter_field);
ASSERT_EQ(1, popular_clicks["products"].docid_counts.size());
}
TEST_F(AnalyticsManagerTest, PopularityScoreValidation) {
//restart analytics manager as fresh
analyticsManager.dispose();
analyticsManager.stop();
analyticsManager.init(store, analytic_store, analytics_minute_rate_limit);
nlohmann::json products_schema = R"({
"name": "books",
"fields": [
{"name": "title", "type": "string"},
{"name": "popularity", "type": "int32"}
]
})"_json;
Collection *products_coll = collectionManager.create_collection(products_schema).get();
nlohmann::json doc;
doc["popularity"] = 0;
doc["id"] = "0";
doc["title"] = "Cool trousers";
ASSERT_TRUE(products_coll->add(doc.dump()).ok());
doc["id"] = "1";
doc["title"] = "Funky trousers";
ASSERT_TRUE(products_coll->add(doc.dump()).ok());
nlohmann::json analytics_rule = R"({
"name": "books_popularity",
"type": "counter",
"params": {
"source": {
"collections": ["books"],
"events": [{"type": "click", "weight": 1, "name": "CLK2"}, {"type": "conversion", "weight": 5, "name": "CNV2"} ]
},
"destination": {
"collection": "popular_books",
"counter_field": "popularity"
}
}
})"_json;
auto create_op = analyticsManager.create_rule(analytics_rule, false, true);
ASSERT_FALSE(create_op.ok());
ASSERT_EQ("Collection `popular_books` not found.", create_op.error());
analytics_rule = R"({
"name": "books_popularity",
"type": "counter",
"params": {
"source": {
"collections": ["books"],
"events": [{"type": "click", "weight": 1, "name": "CLK3"}, {"type": "conversion", "weight": 5, "name": "CNV3"} ]
},
"destination": {
"collection": "books",
"counter_field": "popularity_score"
}
}
})"_json;
create_op = analyticsManager.create_rule(analytics_rule, false, true);
ASSERT_FALSE(create_op.ok());
ASSERT_EQ("counter_field `popularity_score` not found in destination collection.", create_op.error());
analytics_rule = R"({
"name": "books_popularity",
"type": "popular_click",
"params": {
"source": {
"collections": ["books"],
"events": [{"type": "query_click", "weight": 1}, {"type": "query_purchase", "weight": 5} ]
},
"destination": {
"collection": "books",
"counter_field": "popularity_score"
}
}
})"_json;
create_op = analyticsManager.create_rule(analytics_rule, false, true);
ASSERT_FALSE(create_op.ok());
ASSERT_EQ("Invalid type.", create_op.error());
analytics_rule = R"({
"name": "books_popularity",
"type": "counter",
"params": {
"source": {
"collections": ["books"]
},
"destination": {
"collection": "books",
"counter_field": "popularity_score"
}
}
})"_json;
create_op = analyticsManager.create_rule(analytics_rule, false, true);
ASSERT_FALSE(create_op.ok());
ASSERT_EQ("Bad or missing events.", create_op.error());
analytics_rule = R"({
"name": "books_popularity",
"type": "counter",
"params": {
"source": {
"collections": ["books"],
"events": []
},
"destination": {
"collection": "books",
"counter_field": "popularity_score"
}
}
})"_json;
create_op = analyticsManager.create_rule(analytics_rule, false, true);
ASSERT_FALSE(create_op.ok());
ASSERT_EQ("Bad or missing events.", create_op.error());
analytics_rule = R"({
"name": "books_popularity",
"type": "counter",
"params": {
"source": {
"collections": ["books"],
"events": "query_click"
},
"destination": {
"collection": "books",
"counter_field": "popularity_score"
}
}
})"_json;
create_op = analyticsManager.create_rule(analytics_rule, false, true);
ASSERT_FALSE(create_op.ok());
ASSERT_EQ("Bad or missing events.", create_op.error());
analytics_rule = R"({
"name": "books_popularity",
"type": "counter",
"params": {
"source": {
"collections": ["books"],
"events": [{"type": "click", "weight": 1}, {"type": "conversion", "weight": 5} ]
},
"destination": {
"collection": "books",
"counter_field": "popularity"
}
}
})"_json;
create_op = analyticsManager.create_rule(analytics_rule, false, true);
ASSERT_FALSE(create_op.ok());
ASSERT_EQ("Events must contain a unique name.", create_op.error());
std::shared_ptr<http_req> req = std::make_shared<http_req>();
std::shared_ptr<http_res> res = std::make_shared<http_res>(nullptr);
analytics_rule = R"({
"name": "books_popularity",
"type": "counter",
"params": {
"source": {
"collections": ["books"],
"events": [{"type": "click", "name" : "CLK4"}, {"type": "conversion", "name": "CNV4", "log_to_store" : true} ]
},
"destination": {
"collection": "books",
"counter_field": "popularity"
}
}
})"_json;
create_op = analyticsManager.create_rule(analytics_rule, false, true);
ASSERT_FALSE(create_op.ok());
ASSERT_EQ("Counter events must contain a weight value.", create_op.error());
//correct params
analytics_rule = R"({
"name": "books_popularity2",
"type": "counter",
"params": {
"source": {
"collections": ["books"],
"events": [{"type": "click", "weight": 1, "name" : "CLK4"}, {"type": "conversion", "weight": 5, "name": "CNV4", "log_to_store" : true} ]
},
"destination": {
"collection": "books",
"counter_field": "popularity"
}
}
})"_json;
create_op = analyticsManager.create_rule(analytics_rule, false, true);
ASSERT_TRUE(create_op.ok());
auto rule_op = analyticsManager.get_rule("books_popularity2");
ASSERT_TRUE(rule_op.ok());
auto rule = rule_op.get();
ASSERT_EQ(analytics_rule["params"]["source"]["events"], rule["params"]["source"]["events"]);
ASSERT_EQ(analytics_rule["params"]["destination"]["counter_field"], rule["params"]["destination"]["counter_field"]);
nlohmann::json event = R"({
"type": "conversion",
"name": "CNV4",
"data": {
"q": "shorts",
"doc_id": "1",
"user_id": "11"
}
})"_json;
req->body = event.dump();
ASSERT_TRUE(post_create_event(req, res));
auto popular_clicks = analyticsManager.get_popular_clicks();
ASSERT_EQ("popularity", popular_clicks["books"].counter_field);
ASSERT_EQ(1, popular_clicks["books"].docid_counts.size());
ASSERT_EQ(5, popular_clicks["books"].docid_counts["1"]);
//trigger persistence event manually
for (auto &popular_clicks_it: popular_clicks) {
std::string docs;
req->params["collection"] = popular_clicks_it.first;
req->params["action"] = "update";
popular_clicks_it.second.serialize_as_docs(docs);
req->body = docs;
post_import_documents(req, res);
}
sort_fields = {sort_by("popularity", "DESC")};
auto results = products_coll->search("*", {}, "", {},
sort_fields, {0}, 10, 1, FREQUENCY, {false},
Index::DROP_TOKENS_THRESHOLD, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>()).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"]);
ASSERT_EQ(5, results["hits"][0]["document"]["popularity"]);
ASSERT_EQ("Funky trousers", results["hits"][0]["document"]["title"]);
ASSERT_EQ("0", results["hits"][1]["document"]["id"]);
ASSERT_EQ(0, results["hits"][1]["document"]["popularity"]);
ASSERT_EQ("Cool trousers", results["hits"][1]["document"]["title"]);
nlohmann::json payload = nlohmann::json::array();
nlohmann::json event_data;
auto collection_events_map = analyticsManager.get_log_events();
for (auto &events_collection_it: collection_events_map) {
const auto& collection = events_collection_it.first;
for(const auto& event: events_collection_it.second) {
event.to_json(event_data, collection);
payload.push_back(event_data);
}
}
//manually trigger write to db
ASSERT_TRUE(analyticsManager.write_to_db(payload));
std::vector<std::string> values;
analyticsManager.get_last_N_events("11", "*", 5, values);
ASSERT_EQ(1, values.size());
auto parsed_json = nlohmann::json::parse(values[0]);
ASSERT_EQ("CNV4", parsed_json["name"]);
ASSERT_EQ("books", parsed_json["collection"]);
ASSERT_EQ("11", parsed_json["user_id"]);
ASSERT_EQ("1", parsed_json["doc_id"]);
ASSERT_EQ("shorts", parsed_json["query"]);
//now add click event rule
analytics_rule = R"({
"name": "click_event_rule",
"type": "log",
"params": {
"source": {
"collections": ["books"],
"events": [{"type": "click", "name": "APC2"}]
}
}
})"_json;
create_op = analyticsManager.create_rule(analytics_rule, true, true);
ASSERT_TRUE(create_op.ok());
event = R"({
"type": "click",
"name": "APC2",
"data": {
"q": "technology",
"doc_id": "21",
"user_id": "13"
}
})"_json;
req->body = event.dump();
ASSERT_TRUE(post_create_event(req, res));
//normal click event should not increment popularity score
popular_clicks = analyticsManager.get_popular_clicks();
ASSERT_EQ("popularity", popular_clicks["books"].counter_field);
ASSERT_EQ(1, popular_clicks["books"].docid_counts.size());
ASSERT_EQ(5, popular_clicks["books"].docid_counts["1"]);
//add another counter event
event = R"({
"type": "conversion",
"name": "CNV4",
"data": {
"q": "shorts",
"doc_id": "1",
"user_id": "11"
}
})"_json;
req->body = event.dump();
ASSERT_TRUE(post_create_event(req, res));
popular_clicks = analyticsManager.get_popular_clicks();
ASSERT_EQ(1, popular_clicks.size());
ASSERT_EQ("popularity", popular_clicks["books"].counter_field);
ASSERT_EQ(1, popular_clicks["books"].docid_counts.size());
ASSERT_EQ(10, popular_clicks["books"].docid_counts["1"]);
payload.clear();
event_data.clear();
collection_events_map = analyticsManager.get_log_events();
for (auto &events_collection_it: collection_events_map) {
const auto& collection = events_collection_it.first;
for(const auto& event: events_collection_it.second) {
event.to_json(event_data, collection);
payload.push_back(event_data);
}
}
//manually trigger write to db
ASSERT_TRUE(analyticsManager.write_to_db(payload));
values.clear();
analyticsManager.get_last_N_events("11", "*", 5, values);
ASSERT_EQ(2, values.size());
parsed_json = nlohmann::json::parse(values[0]);
ASSERT_EQ("CNV4", parsed_json["name"]);
ASSERT_EQ("books", parsed_json["collection"]);
ASSERT_EQ("11", parsed_json["user_id"]);
ASSERT_EQ("1", parsed_json["doc_id"]);
ASSERT_EQ("shorts", parsed_json["query"]);
parsed_json = nlohmann::json::parse(values[1]);
ASSERT_EQ("CNV4", parsed_json["name"]);
ASSERT_EQ("books", parsed_json["collection"]);
ASSERT_EQ("11", parsed_json["user_id"]);
ASSERT_EQ("1", parsed_json["doc_id"]);
ASSERT_EQ("shorts", parsed_json["query"]);
values.clear();
analyticsManager.get_last_N_events("13", "*", 5, values);
ASSERT_EQ(1, values.size());
parsed_json = nlohmann::json::parse(values[0]);
ASSERT_EQ("APC2", parsed_json["name"]);
ASSERT_EQ("books", parsed_json["collection"]);
ASSERT_EQ("13", parsed_json["user_id"]);
ASSERT_EQ("21", parsed_json["doc_id"]);
ASSERT_EQ("technology", parsed_json["query"]);
analyticsManager.dispose();
analyticsManager.stop();
analyticsManager.init(store, analytic_store, analytics_minute_rate_limit);
analytics_rule = R"({
"name": "books_popularity3",
"type": "counter",
"params": {
"source": {
"collections": ["books"],
"events": [{"type": "conversion", "weight": 5, "name": "CNV4"} ]
},
"destination": {
"collection": "books",
"counter_field": "popularity"
}
}
})"_json;
create_op = analyticsManager.create_rule(analytics_rule, false, true);
ASSERT_TRUE(create_op.ok());
event = R"({
"type": "conversion",
"name": "CNV4",
"data": {
"q": "shorts",
"doc_id": "1",
"user_id": "11"
}
})"_json;
req->body = event.dump();
ASSERT_TRUE(post_create_event(req, res));
popular_clicks = analyticsManager.get_popular_clicks();
ASSERT_EQ(1, popular_clicks.size());
ASSERT_EQ("popularity", popular_clicks["books"].counter_field);
ASSERT_EQ(1, popular_clicks["books"].docid_counts.size());
ASSERT_EQ(5, popular_clicks["books"].docid_counts["1"]);
}
TEST_F(AnalyticsManagerTest, AnalyticsStoreTTL) {
analyticsManager.dispose();
analyticsManager.stop();
delete analytic_store;
//set TTL of an hour
LOG(INFO) << "Truncating and creating: " << analytics_dir_path;
system(("rm -rf "+ analytics_dir_path +" && mkdir -p "+analytics_dir_path).c_str());
nlohmann::json titles_schema = R"({
"name": "titles",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
Collection* titles_coll = collectionManager.create_collection(titles_schema).get();
analytic_store = new Store(analytics_dir_path, 24*60*60, 1024, true, FOURWEEKS_SECS);
analyticsManager.init(store, analytic_store, analytics_minute_rate_limit);
auto analytics_rule = R"({
"name": "product_events2",
"type": "log",
"params": {
"source": {
"collections": ["titles"],
"events": [{"type": "click", "name": "AB"}]
}
}
})"_json;
auto create_op = analyticsManager.create_rule(analytics_rule, true, true);
ASSERT_TRUE(create_op.ok());
std::shared_ptr<http_req> req = std::make_shared<http_req>();
std::shared_ptr<http_res> res = std::make_shared<http_res>(nullptr);
nlohmann::json event1 = R"({
"type": "click",
"name": "AB",
"data": {
"q": "technology",
"doc_id": "21",
"user_id": "13"
}
})"_json;
req->body = event1.dump();
ASSERT_TRUE(post_create_event(req, res));
//get events
nlohmann::json payload = nlohmann::json::array();
nlohmann::json event_data;
auto collection_events_map = analyticsManager.get_log_events();
for (auto &events_collection_it: collection_events_map) {
const auto& collection = events_collection_it.first;
for(const auto& event: events_collection_it.second) {
event.to_json(event_data, collection);
}
}
payload.push_back(event_data);
//manually trigger write to db
ASSERT_TRUE(analyticsManager.write_to_db(payload));
//try fetching from db
const std::string prefix_start = "13%";
const std::string prefix_end = "13`";
std::vector<std::string> events;
analytic_store->scan_fill(prefix_start, prefix_end, events);
ASSERT_EQ(1, events.size());
ASSERT_EQ(events[0].c_str(), event_data.dump());
//now set TTL to 1s and open analytics db
events.clear();
delete analytic_store;
analytic_store = new Store(analytics_dir_path, 24*60*60, 1024, true, 1);
sleep(2);
analytic_store->compact_all();
analytic_store->scan_fill(prefix_start, prefix_end, events);
ASSERT_EQ(0, events.size());
}
TEST_F(AnalyticsManagerTest, AnalyticsStoreGetLastN) {
analyticsManager.dispose();
analyticsManager.stop();
delete analytic_store;
//set TTL of an hour
LOG(INFO) << "Truncating and creating: " << analytics_dir_path;
system(("rm -rf "+ analytics_dir_path +" && mkdir -p "+analytics_dir_path).c_str());
analytic_store = new Store(analytics_dir_path, 24*60*60, 1024, true, FOURWEEKS_SECS);
analyticsManager.init(store, analytic_store, analytics_minute_rate_limit);
nlohmann::json titles_schema = R"({
"name": "titles",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
Collection* titles_coll = collectionManager.create_collection(titles_schema).get();
auto analytics_rule = R"({
"name": "product_events2",
"type": "log",
"params": {
"source": {
"collections": ["titles"],
"events": [{"type": "click", "name": "AB"}, {"type": "visit", "name": "AV"}]
}
}
})"_json;
auto create_op = analyticsManager.create_rule(analytics_rule, true, true);
ASSERT_TRUE(create_op.ok());
std::shared_ptr<http_req> req = std::make_shared<http_req>();
std::shared_ptr<http_res> res = std::make_shared<http_res>(nullptr);
nlohmann::json event1;
event1["type"] = "click";
event1["name"] = "AB";
event1["data"]["q"] = "technology";
event1["data"]["user_id"] = "13";
for(auto i = 0; i < 10; i++) {
event1["data"]["doc_id"] = std::to_string(i);
req->body = event1.dump();
ASSERT_TRUE(post_create_event(req, res));
}
//add more user events
for(auto i = 0; i < 7; i++) {
event1["data"]["user_id"] = "14";
event1["data"]["doc_id"] = std::to_string(i);
req->body = event1.dump();
ASSERT_TRUE(post_create_event(req, res));
}
for(auto i = 0; i < 5; i++) {
event1["data"]["user_id"] = "15";
event1["data"]["doc_id"] = std::to_string(i);
req->body = event1.dump();
ASSERT_TRUE(post_create_event(req, res));
}
//get events
nlohmann::json payload = nlohmann::json::array();
nlohmann::json event_data;
auto collection_events_map = analyticsManager.get_log_events();
for (auto &events_collection_it: collection_events_map) {
const auto& collection = events_collection_it.first;
for(const auto& event: events_collection_it.second) {
event.to_json(event_data, collection);
payload.push_back(event_data);
}
}
//manually trigger write to db
ASSERT_TRUE(analyticsManager.write_to_db(payload));
//basic test
std::vector<std::string> values;
analyticsManager.get_last_N_events("13", "*", 5, values);
ASSERT_EQ(5, values.size());
nlohmann::json parsed_json;
uint32_t start_index = 9;
for(auto i = 0; i < 5; i++) {
parsed_json = nlohmann::json::parse(values[i]);
ASSERT_EQ(std::to_string(start_index - i), parsed_json["doc_id"]);
}
//fetch events for middle user
values.clear();
analyticsManager.get_last_N_events("14", "*", 5, values);
ASSERT_EQ(5, values.size());
start_index = 6;
for(auto i = 0; i < 5; i++) {
parsed_json = nlohmann::json::parse(values[i]);
ASSERT_EQ(std::to_string(start_index - i), parsed_json["doc_id"]);
}
//fetch more events than stored in db
values.clear();
analyticsManager.get_last_N_events("15", "*", 8, values);
ASSERT_EQ(5, values.size());
start_index = 4;
for(auto i = 0; i < 5; i++) {
parsed_json = nlohmann::json::parse(values[i]);
ASSERT_EQ(std::to_string(start_index - i), parsed_json["doc_id"]);
}
//fetch events for non-existing user
values.clear();
analyticsManager.get_last_N_events("16", "*", 8, values);
ASSERT_EQ(0, values.size());
//get specific event type or user
//add different type events
event1["name"] = "AV";
event1["type"] = "visit";
event1["data"]["user_id"] = "14";
for(auto i = 0; i < 5; i++) {
event1["data"]["doc_id"] = std::to_string(i);
req->body = event1.dump();
ASSERT_TRUE(post_create_event(req, res));
}
payload.clear();
event_data.clear();
collection_events_map = analyticsManager.get_log_events();
for (auto &events_collection_it: collection_events_map) {
const auto& collection = events_collection_it.first;
for(const auto& event: events_collection_it.second) {
event.to_json(event_data, collection);
payload.push_back(event_data);
}
}
//manually trigger write to db
ASSERT_TRUE(analyticsManager.write_to_db(payload));
//get last 5 visit events for user_id 14
values.clear();
analyticsManager.get_last_N_events("14", "AV", 5, values);
ASSERT_EQ(5, values.size());
for(int i = 0; i < 5; ++i) {
parsed_json = nlohmann::json::parse(values[i]);
ASSERT_EQ("AV", parsed_json["name"]);
ASSERT_EQ(std::to_string(4-i), parsed_json["doc_id"]);
}
//get last 5 click events for user_id 14
values.clear();
analyticsManager.get_last_N_events("14", "AB", 5, values);
ASSERT_EQ(5, values.size());
for(int i = 0; i < 5; ++i) {
parsed_json = nlohmann::json::parse(values[i]);
ASSERT_EQ("AB", parsed_json["name"]);
ASSERT_EQ(std::to_string(6-i), parsed_json["doc_id"]);
}
event1["name"] = "AB";
event1["type"] = "click";
event1["data"]["user_id"] = "14";
for(auto i = 7; i < 10; i++) {
event1["data"]["doc_id"] = std::to_string(i);
req->body = event1.dump();
ASSERT_TRUE(post_create_event(req, res));
}
payload.clear();
event_data.clear();
collection_events_map = analyticsManager.get_log_events();
for (auto &events_collection_it: collection_events_map) {
const auto& collection = events_collection_it.first;
for(const auto& event: events_collection_it.second) {
event.to_json(event_data, collection);
payload.push_back(event_data);
}
}
ASSERT_TRUE(analyticsManager.write_to_db(payload));
values.clear();
analyticsManager.get_last_N_events("14", "AB", 10, values);
ASSERT_EQ(10, values.size());
for(int i = 0; i < 10; ++i) {
parsed_json = nlohmann::json::parse(values[i]);
ASSERT_EQ("AB", parsed_json["name"]);
ASSERT_EQ(std::to_string(9-i), parsed_json["doc_id"]);
}
//try adding userid with _
event1["name"] = "AB";
event1["type"] = "click";
event1["data"]["user_id"] = "14_U1";
for(auto i = 0; i < 5; i++) {
event1["data"]["doc_id"] = std::to_string(i);
req->body = event1.dump();
ASSERT_TRUE(post_create_event(req, res));
}
payload.clear();
event_data.clear();
collection_events_map = analyticsManager.get_log_events();
for (auto &events_collection_it: collection_events_map) {
const auto& collection = events_collection_it.first;
for(const auto& event: events_collection_it.second) {
event.to_json(event_data, collection);
payload.push_back(event_data);
}
}
ASSERT_TRUE(analyticsManager.write_to_db(payload));
values.clear();
analyticsManager.get_last_N_events("14_U1", "AB", 10, values);
ASSERT_EQ(5, values.size());
for(int i = 0; i < 5; ++i) {
parsed_json = nlohmann::json::parse(values[i]);
ASSERT_EQ("AB", parsed_json["name"]);
ASSERT_EQ(std::to_string(4-i), parsed_json["doc_id"]);
}
}
TEST_F(AnalyticsManagerTest, DISABLED_AnalyticsWithAliases) {
nlohmann::json titles_schema = R"({
"name": "titles",
"fields": [
{"name": "title", "type": "string"},
{"name": "popularity", "type" : "int32"}
]
})"_json;
Collection* titles_coll = collectionManager.create_collection(titles_schema).get();
//create alias
std::shared_ptr<http_req> req = std::make_shared<http_req>();
std::shared_ptr<http_res> res = std::make_shared<http_res>(nullptr);
nlohmann::json alias_json = R"({
"collection_name": "titles"
})"_json;
req->params["alias"] = "coll1";
req->body = alias_json.dump();
ASSERT_TRUE(put_upsert_alias(req, res));
auto analytics_rule = R"({
"name": "popular_titles",
"type": "counter",
"params": {
"source": {
"events": [{"type": "click", "weight": 1, "name": "CLK1"}, {"type": "conversion", "weight": 5, "name": "CNV1"} ]
},
"destination": {
"collection": "coll1",
"counter_field": "popularity"
}
}
})"_json;
auto create_op = analyticsManager.create_rule(analytics_rule, true, true);
ASSERT_TRUE(create_op.ok());
nlohmann::json event1;
event1["type"] = "click";
event1["name"] = "CLK1";
event1["data"]["q"] = "technology";
event1["data"]["user_id"] = "13";
event1["data"]["doc_id"] = "1";
req->body = event1.dump();
ASSERT_TRUE(post_create_event(req, res));
}
TEST_F(AnalyticsManagerTest, AddSuggestionByEvent) {
nlohmann::json titles_schema = R"({
"name": "titles",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
Collection* titles_coll = collectionManager.create_collection(titles_schema).get();
nlohmann::json doc;
doc["title"] = "Cool trousers";
ASSERT_TRUE(titles_coll->add(doc.dump()).ok());
// create a collection to store suggestions
nlohmann::json suggestions_schema = R"({
"name": "top_queries",
"fields": [
{"name": "q", "type": "string" },
{"name": "count", "type": "int32" }
]
})"_json;
Collection* suggestions_coll = collectionManager.create_collection(suggestions_schema).get();
nlohmann::json analytics_rule = R"({
"name": "top_search_queries",
"type": "popular_queries",
"params": {
"limit": 100,
"source": {
"collections": ["titles"],
"events": [{"type": "search", "name": "coll_search"}]
},
"destination": {
"collection": "top_queries"
}
}
})"_json;
auto create_op = analyticsManager.create_rule(analytics_rule, false, true);
ASSERT_TRUE(create_op.ok());
nlohmann::json event_data;
event_data["q"] = "coo";
event_data["user_id"] = "1";
analyticsManager.add_event("127.0.0.1", "search", "coll_search", event_data);
auto popularQueries = analyticsManager.get_popular_queries();
auto localCounts = popularQueries["top_queries"]->get_local_counts();
ASSERT_EQ(1, localCounts.size());
ASSERT_EQ(1, localCounts.count("coo"));
ASSERT_EQ(1, localCounts["coo"]);
// add another query which is more popular
event_data["q"] = "buzzfoo";
analyticsManager.add_event("127.0.0.1", "search", "coll_search", event_data);
event_data["user_id"] = "2";
analyticsManager.add_event("127.0.0.1", "search", "coll_search", event_data);
event_data["user_id"] = "3";
analyticsManager.add_event("127.0.0.1", "search", "coll_search", event_data);
popularQueries = analyticsManager.get_popular_queries();
localCounts = popularQueries["top_queries"]->get_local_counts();
ASSERT_EQ(2, localCounts.size());
ASSERT_EQ(1, localCounts.count("coo"));
ASSERT_EQ(1, localCounts["coo"]);
ASSERT_EQ(1, localCounts.count("buzzfoo"));
ASSERT_EQ(3, localCounts["buzzfoo"]);
//try with nohits analytic rule
analytics_rule = R"({
"name": "noresults_queries",
"type": "nohits_queries",
"params": {
"limit": 100,
"source": {
"collections": ["titles"],
"events": [{"type": "search", "name": "nohits_search"}]
},
"destination": {
"collection": "top_queries"
}
}
})"_json;
create_op = analyticsManager.create_rule(analytics_rule, false, true);
ASSERT_TRUE(create_op.ok());
event_data["q"] = "foobar";
analyticsManager.add_event("127.0.0.1", "search", "nohits_search", event_data);
auto noresults_queries = analyticsManager.get_nohits_queries();
localCounts = noresults_queries["top_queries"]->get_local_counts();
ASSERT_EQ(1, localCounts.size());
ASSERT_EQ(1, localCounts.count("foobar"));
ASSERT_EQ(1, localCounts["foobar"]);
//try creating event with same name
suggestions_schema = R"({
"name": "top_queries2",
"fields": [
{"name": "q", "type": "string" },
{"name": "count", "type": "int32" }
]
})"_json;
suggestions_coll = collectionManager.create_collection(suggestions_schema).get();
analytics_rule = R"({
"name": "noresults_queries2",
"type": "popular_queries",
"params": {
"limit": 100,
"source": {
"collections": ["titles"],
"events": [{"type": "search", "name": "nohits_search"}]
},
"destination": {
"collection": "top_queries2"
}
}
})"_json;
create_op = analyticsManager.create_rule(analytics_rule, false, true);
ASSERT_FALSE(create_op.ok());
ASSERT_EQ("Events must contain a unique name.", create_op.error());
}
TEST_F(AnalyticsManagerTest, EventsOnlySearchTest) {
nlohmann::json titles_schema = R"({
"name": "titles",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
Collection* titles_coll = collectionManager.create_collection(titles_schema).get();
nlohmann::json doc;
doc["title"] = "Cool trousers";
ASSERT_TRUE(titles_coll->add(doc.dump()).ok());
// create a collection to store suggestions
nlohmann::json suggestions_schema = R"({
"name": "top_queries",
"fields": [
{"name": "q", "type": "string" },
{"name": "count", "type": "int32" }
]
})"_json;
Collection* suggestions_coll = collectionManager.create_collection(suggestions_schema).get();
//enable_auto_aggregation flag enables query aggregation via events only
nlohmann::json analytics_rule = R"({
"name": "top_search_queries",
"type": "popular_queries",
"params": {
"limit": 100,
"enable_auto_aggregation": false,
"source": {
"collections": ["titles"],
"events": [{"type": "search", "name": "coll_search"}]
},
"destination": {
"collection": "top_queries"
}
}
})"_json;
auto create_op = analyticsManager.create_rule(analytics_rule, false, true);
ASSERT_TRUE(create_op.ok());
std::string q = "coo";
analyticsManager.add_suggestion("titles", q, "cool", true, "1");
auto popularQueries = analyticsManager.get_popular_queries();
auto userQueries = popularQueries["top_queries"]->get_user_prefix_queries();
ASSERT_EQ(0, userQueries.size());
//try sending via events api
nlohmann::json event_data;
event_data["q"] = "coo";
event_data["user_id"] = "1";
analyticsManager.add_event("127.0.0.1", "search", "coll_search", event_data);
popularQueries = analyticsManager.get_popular_queries();
auto localCounts = popularQueries["top_queries"]->get_local_counts();
ASSERT_EQ(1, localCounts.size());
ASSERT_EQ(1, localCounts.count("coo"));
ASSERT_EQ(1, localCounts["coo"]);
//try with nohits analytic rule
analytics_rule = R"({
"name": "noresults_queries",
"type": "nohits_queries",
"params": {
"limit": 100,
"enable_auto_aggregation": false,
"source": {
"collections": ["titles"],
"events": [{"type": "search", "name": "nohits_search"}]
},
"destination": {
"collection": "top_queries"
}
}
})"_json;
create_op = analyticsManager.create_rule(analytics_rule, false, true);
ASSERT_TRUE(create_op.ok());
q = "foobar";
analyticsManager.add_nohits_query("titles", q, true, "1");
auto noresults_queries = analyticsManager.get_nohits_queries();
userQueries = noresults_queries["top_queries"]->get_user_prefix_queries();
ASSERT_EQ(0, userQueries.size());
//send events for same
event_data["q"] = "foobar";
analyticsManager.add_event("127.0.0.1", "search", "nohits_search", event_data);
noresults_queries = analyticsManager.get_nohits_queries();
localCounts = noresults_queries["top_queries"]->get_local_counts();
ASSERT_EQ(1, localCounts.size());
ASSERT_EQ(1, localCounts.count("foobar"));
ASSERT_EQ(1, localCounts["foobar"]);
}
| 73,766
|
C++
|
.cpp
| 1,947
| 29.486389
| 154
| 0.560757
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,719
|
stopwords_manager_test.cpp
|
typesense_typesense/test/stopwords_manager_test.cpp
|
#include <gtest/gtest.h>
#include "collection.h"
#include <vector>
#include <collection_manager.h>
#include <core_api.h>
#include "stopwords_manager.h"
class StopwordsManagerTest : public ::testing::Test {
protected:
Store *store;
CollectionManager & collectionManager = CollectionManager::get_instance();
StopwordsManager& stopwordsManager = StopwordsManager::get_instance();
std::atomic<bool> quit = false;
virtual void SetUp() {
std::string state_dir_path = "/tmp/typesense_test/stopwords_manager";
LOG(INFO) << "Truncating and creating: " << state_dir_path;
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
store = new Store(state_dir_path);
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
stopwordsManager.init(store);
}
virtual void TearDown() {
delete store;
}
};
TEST_F(StopwordsManagerTest, UpsertGetStopwords) {
auto stopwords1 = R"(
{"stopwords": ["america", "europe"], "locale": "en"}
)"_json;
auto upsert_op = stopwordsManager.upsert_stopword("continents", stopwords1);
ASSERT_TRUE(upsert_op.ok());
auto stopwords2 = R"(
{"stopwords": ["a", "an", "the"], "locale": "en"}
)"_json;
upsert_op = stopwordsManager.upsert_stopword("articles", stopwords2);
ASSERT_TRUE(upsert_op.ok());
auto stopwords3 = R"(
{"stopwords": ["India", "United States", "Japan", "China"], "locale": "en"}
)"_json;
upsert_op = stopwordsManager.upsert_stopword("countries", stopwords3);
ASSERT_TRUE(upsert_op.ok());
auto stopword_config = stopwordsManager.get_stopwords();
ASSERT_EQ(3, stopword_config.size()); //total stopwords set
ASSERT_TRUE(stopword_config.find("countries") != stopword_config.end());
ASSERT_TRUE(stopword_config.find("articles") != stopword_config.end());
ASSERT_TRUE(stopword_config.find("continents") != stopword_config.end());
ASSERT_EQ(3, stopword_config["articles"].stopwords.size());
ASSERT_TRUE(stopword_config["articles"].stopwords.find("a") != stopword_config["articles"].stopwords.end());
ASSERT_TRUE(stopword_config["articles"].stopwords.find("an") != stopword_config["articles"].stopwords.end());
ASSERT_TRUE(stopword_config["articles"].stopwords.find("the") != stopword_config["articles"].stopwords.end());
ASSERT_EQ(2, stopword_config["continents"].stopwords.size());
ASSERT_TRUE(stopword_config["continents"].stopwords.find("america") != stopword_config["continents"].stopwords.end());
ASSERT_TRUE(stopword_config["continents"].stopwords.find("europe") != stopword_config["continents"].stopwords.end());
ASSERT_EQ(5, stopword_config["countries"].stopwords.size()); //with tokenization United States will be splited into two
ASSERT_TRUE(stopword_config["countries"].stopwords.find("india") != stopword_config["countries"].stopwords.end());
ASSERT_TRUE(stopword_config["countries"].stopwords.find("united") != stopword_config["countries"].stopwords.end());
ASSERT_TRUE(stopword_config["countries"].stopwords.find("states") != stopword_config["countries"].stopwords.end());
ASSERT_TRUE(stopword_config["countries"].stopwords.find("china") != stopword_config["countries"].stopwords.end());
ASSERT_TRUE(stopword_config["countries"].stopwords.find("japan") != stopword_config["countries"].stopwords.end());
}
TEST_F(StopwordsManagerTest, GetStopword) {
auto stopwords = R"({"stopwords": ["a", "an", "the"], "locale": "en"})"_json;
auto upsert_op = stopwordsManager.upsert_stopword("articles", stopwords);
ASSERT_TRUE(upsert_op.ok());
stopword_struct_t stopwordStruct;
auto get_op = stopwordsManager.get_stopword("articles", stopwordStruct);
ASSERT_TRUE(get_op.ok());
ASSERT_EQ(3, stopwordStruct.stopwords.size());
//try to fetch non-existing stopword
get_op = stopwordsManager.get_stopword("country", stopwordStruct);
ASSERT_FALSE(get_op.ok());
ASSERT_EQ(404, get_op.code());
ASSERT_EQ("Stopword `country` not found.", get_op.error());
//try fetching stopwords with token
stopwords = R"({"stopwords": ["India", "United States", "Japan"], "locale": "en"})"_json;
upsert_op = stopwordsManager.upsert_stopword("country", stopwords);
ASSERT_TRUE(upsert_op.ok());
get_op = stopwordsManager.get_stopword("country", stopwordStruct);
ASSERT_TRUE(get_op.ok());
ASSERT_EQ(4, stopwordStruct.stopwords.size()); //as United States will be tokenized and counted 2 stopwords
}
TEST_F(StopwordsManagerTest, DeleteStopword) {
auto stopwords1 = R"(
{"stopwords": ["america", "europe"], "locale": "en"}
)"_json;
auto upsert_op = stopwordsManager.upsert_stopword("continents", stopwords1);
ASSERT_TRUE(upsert_op.ok());
auto stopwords2 = R"(
{"stopwords": ["a", "an", "the"], "locale": "en"}
)"_json;
upsert_op = stopwordsManager.upsert_stopword("articles", stopwords2);
ASSERT_TRUE(upsert_op.ok());
stopword_struct_t stopwordStruct;
//delete a stopword
auto del_op = stopwordsManager.delete_stopword("articles");
ASSERT_TRUE(del_op.ok());
auto get_op = stopwordsManager.get_stopword("articles", stopwordStruct);
ASSERT_FALSE(get_op.ok());
ASSERT_EQ(404, get_op.code());
ASSERT_EQ("Stopword `articles` not found.", get_op.error());
//delete non-existing stopword
del_op = stopwordsManager.delete_stopword("states");
ASSERT_FALSE(del_op.ok());
ASSERT_EQ(404, del_op.code());
ASSERT_EQ("Stopword `states` not found.", del_op.error());
}
TEST_F(StopwordsManagerTest, UpdateStopword) {
auto stopwords_json = R"(
{"stopwords": ["america", "europe"], "locale": "en"}
)"_json;
auto upsert_op = stopwordsManager.upsert_stopword("continents", stopwords_json);
ASSERT_TRUE(upsert_op.ok());
auto stopword_config = stopwordsManager.get_stopwords();
ASSERT_EQ(2, stopword_config["continents"].stopwords.size());
ASSERT_TRUE(stopword_config["continents"].stopwords.find("america") != stopword_config["continents"].stopwords.end());
ASSERT_TRUE(stopword_config["continents"].stopwords.find("europe") != stopword_config["continents"].stopwords.end());
//adding new words with same name should replace the stopwords set
stopwords_json = R"(
{"stopwords": ["india", "china", "japan"], "locale": "en"}
)"_json;
upsert_op = stopwordsManager.upsert_stopword("continents", stopwords_json);
ASSERT_TRUE(upsert_op.ok());
stopword_config = stopwordsManager.get_stopwords();
ASSERT_EQ(3, stopword_config["continents"].stopwords.size());
ASSERT_TRUE(stopword_config["continents"].stopwords.find("china") != stopword_config["continents"].stopwords.end());
ASSERT_TRUE(stopword_config["continents"].stopwords.find("india") != stopword_config["continents"].stopwords.end());
ASSERT_TRUE(stopword_config["continents"].stopwords.find("japan") != stopword_config["continents"].stopwords.end());
}
TEST_F(StopwordsManagerTest, StopwordsBasics) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string" },
{"name": "points", "type": "int32" }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection *coll1 = op.get();
nlohmann::json doc;
doc["title"] = "The Dark Knight Europe";
doc["points"] = 10;
coll1->add(doc.dump(), CREATE);
doc["title"] = "An American America";
doc["points"] = 12;
coll1->add(doc.dump(), CREATE);
doc["title"] = "An the";
doc["points"] = 17;
coll1->add(doc.dump(), CREATE);
doc["title"] = "A Deadman";
doc["points"] = 13;
coll1->add(doc.dump(), CREATE);
doc["title"] = "A Village Of The Deadman";
doc["points"] = 20;
coll1->add(doc.dump(), CREATE);
//when all words in query are stopwords
auto stopword_value = R"(
{"stopwords": ["the", "a", "an"], "locale": "en"}
)"_json;
std::shared_ptr<http_req> req = std::make_shared<http_req>();
std::shared_ptr<http_res> res = std::make_shared<http_res>(nullptr);
req->params["collection"] = "coll1";
req->params["name"] = "articles";
req->body = stopword_value.dump();
auto result = put_upsert_stopword(req, res);
if(!result) {
LOG(ERROR) << res->body;
FAIL();
}
req->params["collection"] = "coll1";
req->params["q"] = "the";
req->params["query_by"] = "title";
req->params["stopwords"] = "articles";
nlohmann::json embedded_params;
auto now_ts = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
std::string json_results;
auto search_op = collectionManager.do_search(req->params, embedded_params, json_results, now_ts);
if(!search_op.error().empty()) {
LOG(ERROR) << search_op.error();
}
ASSERT_TRUE(search_op.ok());
nlohmann::json results = nlohmann::json::parse(json_results);
ASSERT_EQ(0, results["hits"].size());
req->params.clear();
json_results.clear();
//when not all words in query are stopwords then it should match the remaining words
stopword_value = R"(
{"stopwords": ["america", "europe"], "locale": "en"}
)"_json;
req->params["collection"] = "coll1";
req->params["name"] = "continents";
req->body = stopword_value.dump();
result = put_upsert_stopword(req, res);
if(!result) {
LOG(ERROR) << res->body;
FAIL();
}
req->params["q"] = "America Man";
req->params["query_by"] = "title";
req->params["stopwords"] = "continents";
search_op = collectionManager.do_search(req->params, embedded_params, json_results, now_ts);
ASSERT_TRUE(search_op.ok());
results = nlohmann::json::parse(json_results);
ASSERT_EQ(0, results["hits"].size());
req->params.clear();
json_results.clear();
req->params["collection"] = "coll1";
req->params["q"] = "a deadman";
req->params["query_by"] = "title";
req->params["stopwords"] = "articles";
search_op = collectionManager.do_search(req->params, embedded_params, json_results, now_ts);
ASSERT_TRUE(search_op.ok());
results = nlohmann::json::parse(json_results);
ASSERT_EQ(2, results["hits"].size());
req->params.clear();
json_results.clear();
//try deteting nonexisting stopword
req->params["collection"] = "coll1";
req->params["name"] = "state";
result = del_stopword(req, res);
ASSERT_EQ(404, res->status_code);
ASSERT_STREQ("{\"message\": \"Stopword `state` not found.\"}", res->body.c_str());
req->params.clear();
json_results.clear();
//detete stopword and apply in search
req->params["collection"] = "coll1";
req->params["name"] = "continents";
result = del_stopword(req, res);
if(!result) {
LOG(ERROR) << res->body;
FAIL();
}
req->params["collection"] = "coll1";
req->params["q"] = "America";
req->params["query_by"] = "title";
req->params["stopwords"] = "continents";
search_op = collectionManager.do_search(req->params, embedded_params, json_results, now_ts);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("Could not find the stopword set named `continents`.", search_op.error());
req->params.clear();
json_results.clear();
//typo while searching with stopword
req->params["collection"] = "coll1";
req->params["name"] = "the";
req->params["query_by"] = "title";
req->params["stopwords"] = "article";
search_op = collectionManager.do_search(req->params, embedded_params, json_results, now_ts);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("Could not find the stopword set named `article`.", search_op.error());
collectionManager.drop_collection("coll1");
}
TEST_F(StopwordsManagerTest, StopwordsValidation) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string" },
{"name": "points", "type": "int32" }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection *coll1 = op.get();
std::shared_ptr<http_req> req = std::make_shared<http_req>();
std::shared_ptr<http_res> res = std::make_shared<http_res>(nullptr);
//with a typo
auto stopword_value = R"(
{"stopword": ["america", "europe"], "locale": "en"}
)"_json;
req->params["collection"] = "coll1";
req->params["name"] = "continents";
req->body = stopword_value.dump();
auto result = put_upsert_stopword(req, res);
ASSERT_EQ(400, res->status_code);
ASSERT_STREQ("{\"message\": \"Parameter `stopwords` is required\"}", res->body.c_str());
//check for value types
stopword_value = R"(
{"stopwords": ["america", "europe"], "locale": 12}
)"_json;
req->params["collection"] = "coll1";
req->params["name"] = "continents";
req->body = stopword_value.dump();
result = put_upsert_stopword(req, res);
ASSERT_EQ(400, res->status_code);
ASSERT_STREQ("{\"message\": \"Parameter `locale` is required as string value\"}", res->body.c_str());
stopword_value = R"(
{"stopwords": [1, 5, 2], "locale": "ko"}
)"_json;
req->params["collection"] = "coll1";
req->params["name"] = "continents";
req->body = stopword_value.dump();
result = put_upsert_stopword(req, res);
ASSERT_EQ(400, res->status_code);
ASSERT_STREQ("{\"message\": \"Parameter `stopwords` is required as string array value\"}", res->body.c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(StopwordsManagerTest, ReloadStopwordsOnRestart) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string" },
{"name": "points", "type": "int32" }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection *coll1 = op.get();
auto stopword_value = R"(
{"stopwords": ["Pop", "Indie", "Rock", "Metal", "Folk"], "locale": "en"}
)"_json;
std::shared_ptr<http_req> req = std::make_shared<http_req>();
std::shared_ptr<http_res> res = std::make_shared<http_res>(nullptr);
req->params["collection"] = "coll1";
req->params["name"] = "genre";
req->body = stopword_value.dump();
auto result = put_upsert_stopword(req, res);
if(!result) {
LOG(ERROR) << res->body;
FAIL();
}
auto stopword_config = stopwordsManager.get_stopwords();
ASSERT_TRUE(stopword_config.find("genre") != stopword_config.end());
ASSERT_EQ(5, stopword_config["genre"].stopwords.size());
ASSERT_TRUE(stopword_config["genre"].stopwords.find("pop") != stopword_config["genre"].stopwords.end());
ASSERT_TRUE(stopword_config["genre"].stopwords.find("indie") != stopword_config["genre"].stopwords.end());
ASSERT_TRUE(stopword_config["genre"].stopwords.find("rock") != stopword_config["genre"].stopwords.end());
ASSERT_TRUE(stopword_config["genre"].stopwords.find("metal") != stopword_config["genre"].stopwords.end());
ASSERT_TRUE(stopword_config["genre"].stopwords.find("folk") != stopword_config["genre"].stopwords.end());
//dispose collection manager and reload all stopwords
collectionManager.dispose();
stopwordsManager.dispose();
delete store;
stopword_config.clear();
std::string state_dir_path = "/tmp/typesense_test/stopwords_manager";
store = new Store(state_dir_path);
stopwordsManager.init(store);
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
stopword_config = stopwordsManager.get_stopwords();
ASSERT_TRUE(stopword_config.find("genre") != stopword_config.end());
ASSERT_EQ(5, stopword_config["genre"].stopwords.size());
ASSERT_TRUE(stopword_config["genre"].stopwords.find("pop") != stopword_config["genre"].stopwords.end());
ASSERT_TRUE(stopword_config["genre"].stopwords.find("indie") != stopword_config["genre"].stopwords.end());
ASSERT_TRUE(stopword_config["genre"].stopwords.find("rock") != stopword_config["genre"].stopwords.end());
ASSERT_TRUE(stopword_config["genre"].stopwords.find("metal") != stopword_config["genre"].stopwords.end());
ASSERT_TRUE(stopword_config["genre"].stopwords.find("folk") != stopword_config["genre"].stopwords.end());
collectionManager.drop_collection("coll1");
}
| 16,788
|
C++
|
.cpp
| 348
| 42.16954
| 123
| 0.647617
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,720
|
collection_locale_test.cpp
|
typesense_typesense/test/collection_locale_test.cpp
|
#include <gtest/gtest.h>
#include <collection.h>
#include <collection_manager.h>
class CollectionLocaleTest : public ::testing::Test {
protected:
Store *store;
CollectionManager & collectionManager = CollectionManager::get_instance();
std::atomic<bool> quit = false;
std::vector<std::string> query_fields;
std::vector<sort_by> sort_fields;
void setupCollection() {
std::string state_dir_path = "/tmp/typesense_test/collection_locale";
LOG(INFO) << "Truncating and creating: " << state_dir_path;
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
store = new Store(state_dir_path);
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
}
virtual void SetUp() {
setupCollection();
}
virtual void TearDown() {
collectionManager.dispose();
delete store;
}
};
TEST_F(CollectionLocaleTest, SearchAgainstJapaneseText) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false, false, true, "ja"),
field("artist", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"今ぶり拍治ルツ", "Dustin Kensrue"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["artist"] = records[i][1];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("拍治",
{"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}).get();
//LOG(INFO) << results;
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
//ASSERT_EQ("今ぶり<mark>拍</mark><mark>治</mark>ルツ", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
}
TEST_F(CollectionLocaleTest, SearchAgainstChineseText) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false, false, true, "zh"),
field("artist", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"爱并不会因时间而", "Dustin Kensrue"},
{"很久以前,傳說在臺中北屯的一個地方", "Gord Downie"},
{"獻給我思念的每一朵雲──海", "Dustin Kensrue"},
{"看誰先跑到小山丘上。媽媽總是第", "Jamie Phua"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["artist"] = records[i][1];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("并",
{"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("爱<mark>并</mark>不会因时间而", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
// partial token should not match as prefix when prefix is set to false
results = coll1->search("并",
{"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["found"].get<size_t>());
results = coll1->search("上媽",
{"title", "artist"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("3", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("看誰先跑到小山丘<mark>上</mark>。<mark>媽</mark>媽總是第", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
// search using simplified chinese
results = coll1->search("妈",
{"title", "artist"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("3", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("看誰先跑到小山丘上。<mark>媽</mark>媽總是第", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
}
TEST_F(CollectionLocaleTest, SearchAgainstThaiText) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false, false, true, "th"),
field("artist", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"ลงที่นั่นโดยรถไฟ", "Dustin Kensrue"},
{"พกติดตัวเสมอ", "Gord Downie"},
{"พกไฟ\nเสมอ", "Dustin Kensrue"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["artist"] = records[i][1];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("ลงรถไฟ",
{"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("<mark>ลง</mark>ที่นั่นโดย<mark>รถไฟ</mark>", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
results = coll1->search("ลงรถไฟ downie",
{"title", "artist"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("<mark>ลง</mark>ที่นั่นโดย<mark>รถไฟ</mark>", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
ASSERT_EQ("1", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("Gord <mark>Downie</mark>", results["hits"][1]["highlights"][0]["snippet"].get<std::string>());
results = coll1->search("พกไฟ", {"title", "artist"}, "", {}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("<mark>พกไฟ</mark>\nเสมอ", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
}
TEST_F(CollectionLocaleTest, ThaiTextShouldBeNormalizedToNFKC) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false, false, true, "th"),
field("artist", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"น้ำมัน", "Dustin Kensrue"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["artist"] = records[i][1];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("น้ํามัน",{"title"}, "", {}, {},
{0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(1, results["found"].get<size_t>());
}
TEST_F(CollectionLocaleTest, ThaiTextShouldRespectSeparators) {
nlohmann::json coll_json = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string", "locale": "th"}
]
})"_json;
auto coll1 = collectionManager.create_collection(coll_json).get();
nlohmann::json doc;
doc["title"] = "alpha-beta-gamma";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("*",{}, "title:=alpha-beta-gamma", {}, {},
{0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(1, results["found"].get<size_t>());
// now with `symbols_to_index`
coll_json = R"({
"name": "coll2",
"symbols_to_index": ["-"],
"fields": [
{"name": "title", "type": "string", "locale": "th"}
]
})"_json;
auto coll2 = collectionManager.create_collection(coll_json).get();
ASSERT_TRUE(coll2->add(doc.dump()).ok());
results = coll2->search("*",{}, "title:=alpha-beta-gamma", {}, {},
{0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(1, results["found"].get<size_t>());
results = coll2->search("*",{}, "title:=alphabetagamma", {}, {},
{0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(0, results["found"].get<size_t>());
}
TEST_F(CollectionLocaleTest, SearchThaiTextPreSegmentedQuery) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false, false, true, "th"),
field("artist", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"ความเหลื่อมล้ำ", "Compound Word"}, // ความ, เหลื่อม, ล้ำ
{"การกระจายรายได้", "Doc A"},
{"จารีย์", "Doc B"},
{"Meiji", "Doc C"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["artist"] = records[i][1];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("เหลื่",
{"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {1}, 1000, true, true).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
results = coll1->search("meji",
{"title"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {1}, 1000, true, true).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ("3", results["hits"][0]["document"]["id"].get<std::string>());
results = coll1->search("ควม",
{"title"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {1}, 1000, true, true).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
}
TEST_F(CollectionLocaleTest, SearchAgainstThaiTextExactMatch) {
Collection* coll1;
std::vector<field> fields = {field("title", field_types::STRING, false, false, true, "th"),
field("artist", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::string word_9bytes = "น้ำ";
std::string word_12bytes = "น้ํา";
std::vector<std::vector<std::string>> records = {
{"ติดกับดักรายได้ปานกลาง", "Expected Result"},
{"ข้อมูลรายคนหรือรายบริษัทในการเชื่อมโยงส่วนได้ส่วนเสีย", "Another Result"},
{word_9bytes, "Another Result"}, // NKC normalization
};
for (size_t i = 0; i < records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["artist"] = records[i][1];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
std::vector<sort_by> sort_fields = { sort_by(sort_field_const::text_match, "DESC"), sort_by("points", "DESC") };
auto results = coll1->search("รายได้",
{"title"}, "", {}, sort_fields, {2}, 10, 1, FREQUENCY).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("ติดกับดัก<mark>ราย</mark><mark>ได้</mark>ปานกลาง",
results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
ASSERT_EQ("ข้อมูล<mark>ราย</mark>คนหรือ<mark>ราย</mark>บริษัทในการเชื่อมโยงส่วน<mark>ได้</mark>ส่วนเสีย",
results["hits"][1]["highlights"][0]["snippet"].get<std::string>());
// check text index overflow regression with NFC normalization + highlighting
results = coll1->search(word_12bytes, {"title"}, "", {}, sort_fields, {2}, 10, 1, FREQUENCY).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("<mark>น้ำ</mark>", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
}
TEST_F(CollectionLocaleTest, SearchAgainstKoreanText) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false, false, true, "ko"),
field("artist", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"경승지·산악·협곡", "Dustin Kensrue"},
{"안녕은하철도999극장판", "Gord Downie"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["artist"] = records[i][1];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("극장판",
{"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("안녕은하철도999<mark>극장판</mark>", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
results = coll1->search("산악",
{"title", "artist"}, "", {}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("경승지·<mark>산악</mark>·협곡", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
}
TEST_F(CollectionLocaleTest, KoreanTextPrefixConsonant) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false, false, true, "ko"),
field("artist", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"서울특별시 성북구", "Wrong Result"},
{"서울특별시 중구 초동", "Wrong Result"},
{"서울특별시 관악구", "Expected Result"},
{"서울특별시 용산구 용산동", "Wrong Result"},
{"서울특별시 동대문구 이문동", "Wrong Result"},
{"서울특별시 서대문구 현저동", "Wrong Result"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["artist"] = records[i][1];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
std::vector<sort_by> sort_fields = { sort_by(sort_field_const::text_match, "DESC"), sort_by("points", "DESC") };
// To ensure that NFKD works, we will test for both ᄀ (Hangul Choseong Kiyeok)
auto results = coll1->search("서울특별시 ᄀ",
{"title"}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {true}, 10,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10).get();
ASSERT_EQ(6, results["found"].get<size_t>());
ASSERT_EQ(6, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
// and ㄱ (Hangul Letter Kiyeok)
results = coll1->search("서울특별시 ㄱ",
{"title"}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {true}, 10,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10).get();
ASSERT_EQ(6, results["found"].get<size_t>());
ASSERT_EQ(6, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
// search for full word
results = coll1->search("서울특별시 관",
{"title"}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {true}, 10,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10).get();
ASSERT_EQ(6, results["found"].get<size_t>());
ASSERT_EQ(6, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
}
TEST_F(CollectionLocaleTest, KoreanTextPrefixVowel) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false, false, true, "ko"),
field("artist", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"서울특별시 강서구 공항동", "Wrong Result"},
{"서울특별시 관악구", "Wrong Result"},
{"서울특별시 강동구 고덕동", "Expected Result"},
{"서울특별시 관악구 관악산나들길", "Wrong Result"},
{"서울특별시 관악구 관악로", "Wrong Result"},
{"서울특별시 관악구 과천대로", "Wrong Result"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["artist"] = records[i][1];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
std::vector<sort_by> sort_fields = { sort_by(sort_field_const::text_match, "DESC"), sort_by("points", "DESC") };
auto results = coll1->search("서울특별시 고",
{"title"}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {true}, 10,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10).get();
ASSERT_EQ(6, results["found"].get<size_t>());
ASSERT_EQ(6, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
}
TEST_F(CollectionLocaleTest, SearchAgainstKoreanTextContainingEnglishChars) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false, false, true, "th"),
field("artist", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"개혁 등의 영향으로 11%나 위축됐다", "Dustin Kensrue"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["artist"] = records[i][1];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("위축됐다",
{"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("개혁 등의 영향으로 11%나 <mark>위축됐다</mark>", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
results = coll1->search("11%",
{"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("개혁 등의 영향으로 <mark>11</mark>%나 위축됐다", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
}
TEST_F(CollectionLocaleTest, SearchCyrillicText) {
std::vector<field> fields = {field("title", field_types::STRING, false, false, true, "sr"),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
nlohmann::json doc;
doc["title"] = "Test Тест";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["title"] = "TEST ТЕСТ";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("тест", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("<mark>TEST</mark> <mark>ТЕСТ</mark>", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
ASSERT_EQ("<mark>Test</mark> <mark>Тест</mark>", results["hits"][1]["highlights"][0]["snippet"].get<std::string>());
// with typo
results = coll1->search("тетст", {"title"}, "", {}, {}, {1}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("<mark>TEST</mark> <mark>ТЕСТ</mark>", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
ASSERT_EQ("<mark>Test</mark> <mark>Тест</mark>", results["hits"][1]["highlights"][0]["snippet"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionLocaleTest, SearchCyrillicTextWithDefaultLocale) {
std::vector<field> fields = {field("title", field_types::STRING, false, false, true, ""),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
nlohmann::json doc;
doc["title"] = "Test Тест";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["title"] = "TEST ТЕСТ";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("тетст", {"title"}, "", {}, {}, {1}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionLocaleTest, SearchCyrillicTextWithDropTokens) {
// this test ensures that even when tokens are dropped, the eventual text is highlighted on all query tokens
std::vector<field> fields = {field("description", field_types::STRING, false, false, true, "sr"),
field("points", field_types::INT32, false, false, true, "sr"),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["description"] = "HPE Aruba AP575 802.11ax Wireless Access Point - TAA Compliant - 2.40 GHz, "
"5 GHz - MIMO Technology - 1 x Network (RJ-45) - Gigabit Ethernet - Bluetooth 5";
doc1["points"] = 100;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
auto results = coll1->search("HPE Aruba AP575 Technology Gigabit Bluetooth 5", {"description"}, "", {}, {}, {0}, 10,
1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "description", 40, {}, {}, {}, 0,
"<mark>", "</mark>").get();
ASSERT_EQ(1, results["hits"][0]["highlights"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("<mark>HPE</mark> <mark>Aruba</mark> <mark>AP575</mark> 802.11ax Wireless Access Point - "
"TAA Compliant - 2.40 GHz, <mark>5</mark> GHz - MIMO <mark>Technology</mark> - 1 x Network (RJ-45) - "
"<mark>Gigabit</mark> Ethernet - <mark>Bluetooth</mark> <mark>5</mark>",
results["hits"][0]["highlights"][0]["value"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionLocaleTest, SearchAndFacetSearchForGreekText) {
std::vector<field> fields = {field("title", field_types::STRING, true, false, true, "el"),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
nlohmann::json doc;
doc["title"] = "Εμφάνιση κάθε μέρα.";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("Εμφάν", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title").get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("<mark>Εμφάν</mark>ιση κάθε μέρα.", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
ASSERT_EQ("<mark>Εμφάν</mark>ιση κάθε μέρα.", results["hits"][0]["highlights"][0]["value"].get<std::string>());
// with typo
results = coll1->search("Εμφάιση", {"title"}, "", {}, {}, {1}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("<mark>Εμφάνιση</mark> κάθε μέρα.", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
// facet search with prefix
results = coll1->search("*", {"title"}, "", {"title"}, {}, {1}, 10, 1, FREQUENCY, {false},
Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "title: Εμφάν").get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("<mark>Εμφάν</mark>ιση κάθε μέρα.", results["facet_counts"][0]["counts"][0]["highlighted"].get<std::string>());
// facet search with prefix typo
results = coll1->search("*", {"title"}, "", {"title"}, {}, {1}, 10, 1, FREQUENCY, {false},
Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "title: Εμφάνση").get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("<mark>Εμφάνισ</mark>η κάθε μέρα.", results["facet_counts"][0]["counts"][0]["highlighted"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionLocaleTest, SearchOnCyrillicTextWithSpecialCharacters) {
std::vector<field> fields = {field("title", field_types::STRING, true, false, true, "ru"),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
nlohmann::json doc;
doc["title"] = "«Сирый», «несчастный», «никчёмный» — принятое "
"особ, сейчас, впрочем, оттенок скромности. Посыл, "
"среди которых отсутствие мобильного страшное.";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("отсутствие", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(), spp::sparse_hash_set<std::string>(),
10, "", 10, 4, "title").get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("скромности. Посыл, среди которых <mark>отсутствие</mark> мобильного страшное.",
results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
ASSERT_EQ("«Сирый», «несчастный», «никчёмный» — принятое особ, сейчас, впрочем, оттенок скромности. "
"Посыл, среди которых <mark>отсутствие</mark> мобильного страшное.",
results["hits"][0]["highlights"][0]["value"].get<std::string>());
results = coll1->search("принятое", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("«Сирый», «несчастный», «никчёмный» — <mark>принятое</mark> особ, сейчас, впрочем, оттенок скромности. Посыл, среди которых отсутствие мобильного страшное.",
results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
results = coll1->search("*", {}, "", {"title"}, {}, {0}, 0, 1, FREQUENCY, {true}, 10,
spp::sparse_hash_set<std::string>(), spp::sparse_hash_set<std::string>(),
10, "title: отсутствие").get();
ASSERT_STREQ("«Сирый», «несчастный», «никчёмный» — принятое особ, сейчас, впрочем, оттенок скромности. "
"Посыл, среди которых <mark>отсутствие</mark> мобильного страшное.",
results["facet_counts"][0]["counts"][0]["highlighted"].get<std::string>().c_str());
results = coll1->search("*", {}, "", {"title"}, {}, {0}, 0, 1, FREQUENCY, {true}, 10,
spp::sparse_hash_set<std::string>(), spp::sparse_hash_set<std::string>(),
10, "title: отсутст").get();
ASSERT_STREQ("«Сирый», «несчастный», «никчёмный» — принятое особ, сейчас, впрочем, оттенок скромности. "
"Посыл, среди которых <mark>отсутст</mark>вие мобильного страшное.",
results["facet_counts"][0]["counts"][0]["highlighted"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionLocaleTest, SearchOnCyrillicLargeText) {
std::vector<field> fields = {field("title", field_types::STRING, true, false, true, "ru"),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
nlohmann::json doc;
doc["title"] = "Петр Великий, царь России, в начале 18 века санкционировал использование западных буквенных форм "
"(ru). Со временем они были в значительной степени приняты на других языках, использующих этот "
"сценарий. Таким образом, в отличие от большинства современных греческих шрифтов, которые сохранили "
"свой собственный набор принципов дизайна для строчных букв (таких как размещение засечек, форма "
"концов штриха и правила толщины штриха, хотя греческие заглавные буквы действительно используют "
"латинский дизайн принципы) современные кириллические шрифты во многом такие же, как современные "
"латинские шрифты того же семейства. Развитие некоторых кириллических компьютерных шрифтов из "
"латинских также способствовало визуальной латинизации кириллического шрифта.";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("Великий", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_STREQ("Петр <mark>Великий</mark>, царь России, в начале",
results["hits"][0]["highlights"][0]["snippet"].get<std::string>().c_str());
}
TEST_F(CollectionLocaleTest, SearchOnJapaneseLargeText) {
std::vector<field> fields = {field("title", field_types::STRING, true, false, true, "ja"),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
nlohmann::json doc;
doc["title"] = "王獣を倒すと入手した折れた角。追放された後、この世に存在すべきではないもの。\n獣域ウルブズの中で帝王と呼ばれていても、"
"魔獣たちの系譜では、その兄たちの万分の一にも満たないだろう。\n「黄"
"金」が無数の獣域ウルブズを捨て紙のように圧縮して偶然にできた異形の魔獣。その角には、黒いウルブズを命じて自分のため"
"に空間を溶かす権威が秘めている。";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("王獣を", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_STREQ("<mark>王</mark><mark>獣</mark><mark>を</mark><mark>倒す</mark>と入手した折",
results["hits"][0]["highlights"][0]["snippet"].get<std::string>().c_str());
results = coll1->search("業果材", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_STREQ("に空間を溶かす<mark>権威</mark><mark>が</mark><mark>秘</mark>めている。",
results["hits"][0]["highlights"][0]["snippet"].get<std::string>().c_str());
}
TEST_F(CollectionLocaleTest, SearchOnArabicText) {
std::vector<field> fields = {field("title", field_types::STRING, true, false, true, ""),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
std::string data = "جهينة";
std::string q = "جوهينة";
auto dchars = data.c_str();
auto qchars = q.c_str();
nlohmann::json doc;
doc["title"] = "جهينة";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("جوهينة", {"title"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {true}).get();
ASSERT_STREQ("<mark>جهينة</mark>",
results["hits"][0]["highlights"][0]["snippet"].get<std::string>().c_str());
}
TEST_F(CollectionLocaleTest, SearchOnArabicTextWithTypo) {
std::vector<field> fields = {field("title", field_types::STRING, true, false, true, ""),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
std::string q = "دوني";
std::string title1 = "سوني";
std::string title2 = "داوني";
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "ينوس";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "1";
doc["title"] = "ينواد";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("ينود", {"title"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {false}, 1,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 5, 5, "", 10).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
}
TEST_F(CollectionLocaleTest, SearchOnBulgarianText) {
std::vector<field> fields = {field("title", field_types::STRING, true, false, true, "bg"),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
std::string title1 = "Сърце от любов";
std::string title2 = "Съблезъб тигър";
std::string title3 = "Сърна";
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = title1;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "1";
doc["title"] = title2;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "2";
doc["title"] = title3;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("Сърце", {"title"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 1,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 5, 5, "", 10).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
}
TEST_F(CollectionLocaleTest, HighlightOfAllQueryTokensShouldConsiderUnicodePoints) {
// For perfomance reasons, we highlight all query tokens in a text only on smaller text
// Here, "small" threshold must be defined using unicode points and not raw string size.
std::vector<field> fields = {field("title", field_types::STRING, true, false, true, ""),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "رجلا منهم اجتهد اربعين ليله ثم دعا فلم يستجب له فاتي عيسي ابن مريم عليه السلام يشكو اليه ما هو فيه ويساله الدعاء له فتطهر عيسي وصلي ثم";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("لة ثم دعا فلم يستجب له فأتى عيسى ابن مريم عليه السلام يشكو إل", {"title"}, "", {}, {},
{2}, 10, 1, FREQUENCY, {true}, 1).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(17, results["hits"][0]["highlights"][0]["matched_tokens"].size());
}
TEST_F(CollectionLocaleTest, SearchInGermanLocaleShouldBeTypoTolerant) {
nlohmann::json coll_json = R"({
"name": "coll1",
"fields": [
{"name": "title_de", "type": "string", "locale": "de"}
]
})"_json;
auto coll1 = collectionManager.create_collection(coll_json).get();
nlohmann::json doc;
doc["title_de"] = "mülltonne";
doc["title_en"] = "trash bin";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("mulltonne", {"title_de"}, "", {}, {},
{2}, 10, 1, FREQUENCY, {true}, 1).get();
ASSERT_EQ(1, results["found"].get<size_t>());
}
TEST_F(CollectionLocaleTest, ExcludeQueryWithPt) {
nlohmann::json coll_json = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string", "locale": "pt"}
]
})"_json;
auto coll1 = collectionManager.create_collection(coll_json).get();
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "nescau em pó tabela nutricional";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "1";
doc["title"] = "nescau tabela nutricional";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("nescau -pó", {"title"}, "", {}, {},
{2}, 10, 1, FREQUENCY, {true}, 1).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
}
TEST_F(CollectionLocaleTest, HandleSpecialCharsInThai) {
nlohmann::json coll_json = R"({
"name": "coll1",
"fields": [
{"name": "title_th", "type": "string", "locale": "th"},
{"name": "sku", "type": "string"}
]
})"_json;
auto coll1 = collectionManager.create_collection(coll_json).get();
nlohmann::json doc;
doc["title_th"] = "สวัสดี";
doc["sku"] = "12345_";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
// query string is parsed using the locale of the first field in the query_by list
auto results = coll1->search("12345_", {"title_th", "sku"}, "", {}, {},
{2, 0}, 10, 1, FREQUENCY, {true, false}, 1).get();
ASSERT_EQ(1, results["found"].get<size_t>());
}
/*
TEST_F(CollectionLocaleTest, TranslitPad) {
UErrorCode translit_status = U_ZERO_ERROR;
auto transliterator = icu::Transliterator::createInstance("Any-Latin; Latin-ASCII",
UTRANS_FORWARD, translit_status);
icu::UnicodeString unicode_input = icu::UnicodeString::fromUTF8("எண்ணெய்");
transliterator->transliterate(unicode_input);
std::string output;
unicode_input.toUTF8String(output);
LOG(INFO) << output;
unicode_input = icu::UnicodeString::fromUTF8("எண்");
transliterator->transliterate(unicode_input);
unicode_input.toUTF8String(output);
LOG(INFO) << output;
unicode_input = icu::UnicodeString::fromUTF8("என்னை");
transliterator->transliterate(unicode_input);
unicode_input.toUTF8String(output);
LOG(INFO) << output;
delete transliterator;
}*/
| 45,259
|
C++
|
.cpp
| 745
| 46.169128
| 171
| 0.562789
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,721
|
collection_override_test.cpp
|
typesense_typesense/test/collection_override_test.cpp
|
#include <gtest/gtest.h>
#include <string>
#include <vector>
#include <fstream>
#include <algorithm>
#include <collection_manager.h>
#include "collection.h"
class CollectionOverrideTest : public ::testing::Test {
protected:
Store *store;
CollectionManager & collectionManager = CollectionManager::get_instance();
std::atomic<bool> quit = false;
Collection *coll_mul_fields;
void setupCollection() {
std::string state_dir_path = "/tmp/typesense_test/collection_override";
LOG(INFO) << "Truncating and creating: " << state_dir_path;
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
store = new Store(state_dir_path);
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
std::ifstream infile(std::string(ROOT_DIR)+"test/multi_field_documents.jsonl");
std::vector<field> fields = {
field("title", field_types::STRING, false),
field("starring", field_types::STRING, true),
field("cast", field_types::STRING_ARRAY, true),
field("points", field_types::INT32, false)
};
coll_mul_fields = collectionManager.get_collection("coll_mul_fields").get();
if(coll_mul_fields == nullptr) {
coll_mul_fields = collectionManager.create_collection("coll_mul_fields", 4, fields, "points").get();
}
std::string json_line;
while (std::getline(infile, json_line)) {
coll_mul_fields->add(json_line);
}
infile.close();
}
virtual void SetUp() {
setupCollection();
}
virtual void TearDown() {
collectionManager.drop_collection("coll_mul_fields");
collectionManager.dispose();
delete store;
}
};
TEST_F(CollectionOverrideTest, ExcludeIncludeExactQueryMatch) {
Config::get_instance().set_enable_search_analytics(true);
nlohmann::json override_json = {
{"id", "exclude-rule"},
{
"rule", {
{"query", "of"},
{"match", override_t::MATCH_EXACT}
}
}
};
override_json["excludes"] = nlohmann::json::array();
override_json["excludes"][0] = nlohmann::json::object();
override_json["excludes"][0]["id"] = "4";
override_json["excludes"][1] = nlohmann::json::object();
override_json["excludes"][1]["id"] = "11";
override_t override;
override_t::parse(override_json, "", override);
coll_mul_fields->add_override(override);
std::vector<std::string> facets = {"cast"};
Option<nlohmann::json> res_op = coll_mul_fields->search("of", {"title"}, "", facets, {}, {0}, 10);
ASSERT_TRUE(res_op.ok());
nlohmann::json results = res_op.get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ(3, results["found"].get<uint32_t>());
ASSERT_EQ(6, results["facet_counts"][0]["counts"].size());
ASSERT_STREQ("12", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("5", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("17", results["hits"][2]["document"]["id"].get<std::string>().c_str());
// include
nlohmann::json override_json_include = {
{"id", "include-rule"},
{
"rule", {
{"query", "in"},
{"match", override_t::MATCH_EXACT}
}
}
};
override_json_include["includes"] = nlohmann::json::array();
override_json_include["includes"][0] = nlohmann::json::object();
override_json_include["includes"][0]["id"] = "0";
override_json_include["includes"][0]["position"] = 1;
override_json_include["includes"][1] = nlohmann::json::object();
override_json_include["includes"][1]["id"] = "3";
override_json_include["includes"][1]["position"] = 2;
override_t override_include;
override_t::parse(override_json_include, "", override_include);
coll_mul_fields->add_override(override_include);
res_op = coll_mul_fields->search("in", {"title"}, "", {}, {}, {0}, 10);
ASSERT_TRUE(res_op.ok());
results = res_op.get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ(3, results["found"].get<uint32_t>());
ASSERT_FALSE(results.contains("metadata"));
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("3", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("13", results["hits"][2]["document"]["id"].get<std::string>().c_str());
// curated results should be marked as such
ASSERT_EQ(true, results["hits"][0]["curated"].get<bool>());
ASSERT_EQ(true, results["hits"][1]["curated"].get<bool>());
ASSERT_EQ(0, results["hits"][2].count("curated"));
coll_mul_fields->remove_override("exclude-rule");
coll_mul_fields->remove_override("include-rule");
// contains cases
nlohmann::json override_contains_inc = {
{"id", "include-rule"},
{
"rule", {
{"query", "will"},
{"match", override_t::MATCH_CONTAINS}
}
}
};
override_contains_inc["includes"] = nlohmann::json::array();
override_contains_inc["includes"][0] = nlohmann::json::object();
override_contains_inc["includes"][0]["id"] = "0";
override_contains_inc["includes"][0]["position"] = 1;
override_contains_inc["includes"][1] = nlohmann::json::object();
override_contains_inc["includes"][1]["id"] = "1";
override_contains_inc["includes"][1]["position"] = 7; // purposely setting it way out
override_t override_inc_contains;
override_t::parse(override_contains_inc, "", override_inc_contains);
coll_mul_fields->add_override(override_inc_contains);
res_op = coll_mul_fields->search("will smith", {"title"}, "", {}, {}, {0}, 10);
ASSERT_TRUE(res_op.ok());
results = res_op.get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ(4, results["found"].get<uint32_t>());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("3", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("2", results["hits"][2]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("1", results["hits"][3]["document"]["id"].get<std::string>().c_str());
// partial word should not match
res_op = coll_mul_fields->search("dowillow", {"title"}, "", {}, {}, {0}, 10);
ASSERT_TRUE(res_op.ok());
results = res_op.get();
ASSERT_EQ(0, results["hits"].size());
ASSERT_EQ(0, results["found"].get<uint32_t>());
// ability to disable overrides
bool enable_overrides = false;
res_op = coll_mul_fields->search("will", {"title"}, "", {}, {}, {0}, 10,
1, FREQUENCY, {false}, 0, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 0, {}, {}, {}, 0,
"<mark>", "</mark>", {1}, 10000, true, false, enable_overrides);
ASSERT_TRUE(res_op.ok());
results = res_op.get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ(2, results["found"].get<uint32_t>());
ASSERT_STREQ("3", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("2", results["hits"][1]["document"]["id"].get<std::string>().c_str());
enable_overrides = true;
res_op = coll_mul_fields->search("will", {"title"}, "", {}, {}, {0}, 10,
1, FREQUENCY, {false}, 0, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 0, {}, {}, {}, 0,
"<mark>", "</mark>", {1}, 10000, true, false, enable_overrides);
ASSERT_TRUE(res_op.ok());
results = res_op.get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ(4, results["found"].get<uint32_t>());
coll_mul_fields->remove_override("include-rule");
Config::get_instance().set_enable_search_analytics(false);
}
TEST_F(CollectionOverrideTest, OverrideJSONValidation) {
nlohmann::json exclude_json = {
{"id", "exclude-rule"},
{
"rule", {
{"query", "of"},
{"match", override_t::MATCH_EXACT}
}
}
};
exclude_json["excludes"] = nlohmann::json::array();
exclude_json["excludes"][0] = nlohmann::json::object();
exclude_json["excludes"][0]["id"] = 11;
override_t override1;
auto parse_op = override_t::parse(exclude_json, "", override1);
ASSERT_FALSE(parse_op.ok());
ASSERT_STREQ("Exclusion `id` must be a string.", parse_op.error().c_str());
nlohmann::json include_json = {
{"id", "include-rule"},
{
"rule", {
{"query", "of"},
{"match", override_t::MATCH_EXACT}
}
}
};
include_json["includes"] = nlohmann::json::array();
include_json["includes"][0] = nlohmann::json::object();
include_json["includes"][0]["id"] = "11";
override_t override2;
parse_op = override_t::parse(include_json, "", override2);
ASSERT_FALSE(parse_op.ok());
ASSERT_STREQ("Inclusion definition must define both `id` and `position` keys.", parse_op.error().c_str());
include_json["includes"][0]["position"] = "1";
parse_op = override_t::parse(include_json, "", override2);
ASSERT_FALSE(parse_op.ok());
ASSERT_STREQ("Inclusion `position` must be an integer.", parse_op.error().c_str());
include_json["includes"][0]["position"] = 1;
parse_op = override_t::parse(include_json, "", override2);
ASSERT_TRUE(parse_op.ok());
nlohmann::json include_json2 = {
{"id", "include-rule"},
{
"rule", {
{"query", "of"},
{"match", override_t::MATCH_EXACT}
}
}
};
parse_op = override_t::parse(include_json2, "", override2);
ASSERT_FALSE(parse_op.ok());
ASSERT_STREQ("Must contain one of: `includes`, `excludes`, `metadata`, `filter_by`, `sort_by`, "
"`remove_matched_tokens`, `replace_query`.", parse_op.error().c_str());
include_json2["includes"] = nlohmann::json::array();
include_json2["includes"][0] = 100;
parse_op = override_t::parse(include_json2, "", override2);
ASSERT_FALSE(parse_op.ok());
ASSERT_STREQ("The `includes` value must be an array of objects.", parse_op.error().c_str());
nlohmann::json exclude_json2 = {
{"id", "exclude-rule"},
{
"rule", {
{"query", "of"},
{"match", override_t::MATCH_EXACT}
}
}
};
exclude_json2["excludes"] = nlohmann::json::array();
exclude_json2["excludes"][0] = "100";
parse_op = override_t::parse(exclude_json2, "", override2);
ASSERT_FALSE(parse_op.ok());
ASSERT_STREQ("The `excludes` value must be an array of objects.", parse_op.error().c_str());
}
TEST_F(CollectionOverrideTest, IncludeHitsFilterOverrides) {
// Check facet field highlight for overridden results
nlohmann::json override_json_include = {
{"id", "include-rule"},
{
"rule", {
{"query", "not-found"},
{"match", override_t::MATCH_EXACT}
}
},
{"metadata", {{"foo", "bar"}}},
};
override_json_include["includes"] = nlohmann::json::array();
override_json_include["includes"][0] = nlohmann::json::object();
override_json_include["includes"][0]["id"] = "0";
override_json_include["includes"][0]["position"] = 1;
override_json_include["includes"][1] = nlohmann::json::object();
override_json_include["includes"][1]["id"] = "2";
override_json_include["includes"][1]["position"] = 2;
override_json_include["filter_curated_hits"] = true;
override_t override_include;
override_t::parse(override_json_include, "", override_include);
coll_mul_fields->add_override(override_include);
std::map<std::string, override_t*> overrides = coll_mul_fields->get_overrides().get();
ASSERT_EQ(1, overrides.size());
auto override_json = overrides.at("include-rule")->to_json();
ASSERT_TRUE(override_json.contains("filter_curated_hits"));
ASSERT_TRUE(override_json["filter_curated_hits"].get<bool>());
auto results = coll_mul_fields->search("not-found", {"title"}, "points:>70", {"starring"}, {}, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "starring: will").get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("bar", results["metadata"]["foo"].get<std::string>());
// disable filter curation option
override_json_include["filter_curated_hits"] = false;
override_t::parse(override_json_include, "", override_include);
coll_mul_fields->add_override(override_include);
results = coll_mul_fields->search("not-found", {"title"}, "points:>70", {"starring"}, {}, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "starring: will").get();
ASSERT_EQ(2, results["hits"].size());
// remove filter curation option: by default no filtering should be done
override_json_include.erase("filter_curated_hits");
override_t::parse(override_json_include, "", override_include);
coll_mul_fields->add_override(override_include);
results = coll_mul_fields->search("not-found", {"title"}, "points:>70", {"starring"}, {}, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "starring: will").get();
ASSERT_EQ(2, results["hits"].size());
// query param configuration should take precedence over override level config
results = coll_mul_fields->search("not-found", {"title"}, "points:>70", {"starring"}, {}, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "",
30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2, 1).get();
ASSERT_EQ(1, results["hits"].size());
// try disabling and overriding
override_json_include["filter_curated_hits"] = false;
override_t::parse(override_json_include, "", override_include);
coll_mul_fields->add_override(override_include);
results = coll_mul_fields->search("not-found", {"title"}, "points:>70", {"starring"}, {}, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "",
30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2, 1).get();
ASSERT_EQ(1, results["hits"].size());
// try enabling and overriding
override_json_include["filter_curated_hits"] = true;
override_t::parse(override_json_include, "", override_include);
coll_mul_fields->add_override(override_include);
results = coll_mul_fields->search("not-found", {"title"}, "points:>70", {"starring"}, {}, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "",
30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2, 0).get();
ASSERT_EQ(1, results["hits"].size());
}
TEST_F(CollectionOverrideTest, ExcludeIncludeFacetFilterQuery) {
// Check facet field highlight for overridden results
nlohmann::json override_json_include = {
{"id", "include-rule"},
{
"rule", {
{"query", "not-found"},
{"match", override_t::MATCH_EXACT}
}
}
};
override_json_include["includes"] = nlohmann::json::array();
override_json_include["includes"][0] = nlohmann::json::object();
override_json_include["includes"][0]["id"] = "0";
override_json_include["includes"][0]["position"] = 1;
override_json_include["includes"][1] = nlohmann::json::object();
override_json_include["includes"][1]["id"] = "2";
override_json_include["includes"][1]["position"] = 2;
override_t override_include;
override_t::parse(override_json_include, "", override_include);
coll_mul_fields->add_override(override_include);
std::map<std::string, override_t*> overrides = coll_mul_fields->get_overrides().get();
ASSERT_EQ(1, overrides.size());
auto override_json = overrides.at("include-rule")->to_json();
ASSERT_FALSE(override_json.contains("filter_by"));
ASSERT_TRUE(override_json.contains("remove_matched_tokens"));
ASSERT_TRUE(override_json.contains("filter_curated_hits"));
ASSERT_FALSE(override_json["remove_matched_tokens"].get<bool>());
ASSERT_FALSE(override_json["filter_curated_hits"].get<bool>());
auto results = coll_mul_fields->search("not-found", {"title"}, "", {"starring"}, {}, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "starring: will").get();
ASSERT_EQ("<mark>Will</mark> Ferrell", results["facet_counts"][0]["counts"][0]["highlighted"].get<std::string>());
ASSERT_EQ("Will Ferrell", results["facet_counts"][0]["counts"][0]["value"].get<std::string>());
ASSERT_EQ(1, results["facet_counts"][0]["counts"][0]["count"].get<size_t>());
coll_mul_fields->remove_override("include-rule");
// facet count is okay when results are excluded
nlohmann::json override_json_exclude = {
{"id", "exclude-rule"},
{
"rule", {
{"query", "the"},
{"match", override_t::MATCH_EXACT}
}
}
};
override_json_exclude["excludes"] = nlohmann::json::array();
override_json_exclude["excludes"][0] = nlohmann::json::object();
override_json_exclude["excludes"][0]["id"] = "10";
override_t override;
override_t::parse(override_json_exclude, "", override);
coll_mul_fields->add_override(override);
results = coll_mul_fields->search("the", {"title"}, "", {"starring"}, {}, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "starring: scott").get();
ASSERT_EQ(9, results["found"].get<size_t>());
// "count" would be `2` without exclusion
ASSERT_EQ("<mark>Scott</mark> Glenn", results["facet_counts"][0]["counts"][0]["highlighted"].get<std::string>());
ASSERT_EQ(1, results["facet_counts"][0]["counts"][0]["count"].get<size_t>());
ASSERT_EQ("Kristin <mark>Scott</mark> Thomas", results["facet_counts"][0]["counts"][1]["highlighted"].get<std::string>());
ASSERT_EQ(1, results["facet_counts"][0]["counts"][1]["count"].get<size_t>());
// ensure per_page is respected
// first with per_page = 0
results = coll_mul_fields->search("the", {"title"}, "", {"starring"}, {}, {0}, 0, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "starring: scott").get();
ASSERT_EQ(9, results["found"].get<size_t>());
ASSERT_EQ(0, results["hits"].size());
coll_mul_fields->remove_override("exclude-rule");
// now with per_page = 1, and an include query
coll_mul_fields->add_override(override_include);
results = coll_mul_fields->search("not-found", {"title"}, "", {"starring"}, {}, {0}, 1, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "").get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
// should be able to replace existing override
override_include.rule.query = "found";
coll_mul_fields->add_override(override_include);
ASSERT_STREQ("found", coll_mul_fields->get_overrides().get()["include-rule"]->rule.query.c_str());
coll_mul_fields->remove_override("include-rule");
}
TEST_F(CollectionOverrideTest, FilterCuratedHitsSlideToCoverMissingSlots) {
// when some of the curated hits are filtered away, lower ranked hits must be pulled up
nlohmann::json override_json_include = {
{"id", "include-rule"},
{
"rule", {
{"query", "scott"},
{"match", override_t::MATCH_EXACT}
}
}
};
// first 2 hits won't match the filter, 3rd position should float up to position 1
override_json_include["includes"] = nlohmann::json::array();
override_json_include["includes"][0] = nlohmann::json::object();
override_json_include["includes"][0]["id"] = "7";
override_json_include["includes"][0]["position"] = 1;
override_json_include["includes"][1] = nlohmann::json::object();
override_json_include["includes"][1]["id"] = "17";
override_json_include["includes"][1]["position"] = 2;
override_json_include["includes"][2] = nlohmann::json::object();
override_json_include["includes"][2]["id"] = "10";
override_json_include["includes"][2]["position"] = 3;
override_json_include["filter_curated_hits"] = true;
override_t override_include;
override_t::parse(override_json_include, "", override_include);
coll_mul_fields->add_override(override_include);
auto results = coll_mul_fields->search("scott", {"starring"}, "points:>55", {}, {}, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "").get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ("10", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("11", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("12", results["hits"][2]["document"]["id"].get<std::string>());
// another curation where there is an ID missing in the middle
override_json_include = {
{"id", "include-rule"},
{
"rule", {
{"query", "glenn"},
{"match", override_t::MATCH_EXACT}
}
}
};
// middle hit ("10") will not satisfy filter, so "11" will move to position 2
override_json_include["includes"] = nlohmann::json::array();
override_json_include["includes"][0] = nlohmann::json::object();
override_json_include["includes"][0]["id"] = "9";
override_json_include["includes"][0]["position"] = 1;
override_json_include["includes"][1] = nlohmann::json::object();
override_json_include["includes"][1]["id"] = "10";
override_json_include["includes"][1]["position"] = 2;
override_json_include["includes"][2] = nlohmann::json::object();
override_json_include["includes"][2]["id"] = "11";
override_json_include["includes"][2]["position"] = 3;
override_json_include["filter_curated_hits"] = true;
override_t override_include2;
override_t::parse(override_json_include, "", override_include2);
coll_mul_fields->add_override(override_include2);
results = coll_mul_fields->search("glenn", {"starring"}, "points:[43,86]", {}, {}, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "").get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("9", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("11", results["hits"][1]["document"]["id"].get<std::string>());
}
TEST_F(CollectionOverrideTest, SimpleOverrideStopProcessing) {
Collection *coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("price", field_types::FLOAT, true),
field("points", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "Amazing Shoes";
doc1["price"] = 399.99;
doc1["points"] = 30;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "Fast Joggers";
doc2["price"] = 49.99;
doc2["points"] = 5;
nlohmann::json doc3;
doc3["id"] = "2";
doc3["name"] = "Comfortable Sneakers";
doc3["price"] = 19.99;
doc3["points"] = 1;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
std::vector<sort_by> sort_fields = { sort_by("_text_match", "DESC"), sort_by("points", "DESC") };
nlohmann::json override_json_include = {
{"id", "include-rule-1"},
{
"rule", {
{"query", "shoes"},
{"match", override_t::MATCH_EXACT}
}
},
{"stop_processing", false}
};
// first 2 hits won't match the filter, 3rd position should float up to position 1
override_json_include["includes"] = nlohmann::json::array();
override_json_include["includes"][0] = nlohmann::json::object();
override_json_include["includes"][0]["id"] = "2";
override_json_include["includes"][0]["position"] = 1;
override_t override_include1;
auto op = override_t::parse(override_json_include, "include-rule-1", override_include1);
ASSERT_TRUE(op.ok());
coll1->add_override(override_include1);
override_json_include["id"] = "include-rule-2";
override_json_include["includes"] = nlohmann::json::array();
override_json_include["includes"][0] = nlohmann::json::object();
override_json_include["includes"][0]["id"] = "1";
override_json_include["includes"][0]["position"] = 2;
override_t override_include2;
op = override_t::parse(override_json_include, "include-rule-2", override_include2);
ASSERT_TRUE(op.ok());
coll1->add_override(override_include2);
auto results = coll1->search("shoes", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][2]["document"]["id"].get<std::string>());
// now with stop processing enabled for the first rule
override_include1.stop_processing = true;
coll1->add_override(override_include1);
results = coll1->search("shoes", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
// check that default value for stop_processing is true
nlohmann::json override_json_test = {
{"id", "include-rule-test"},
{
"rule", {
{"query", "fast"},
{"match", override_t::MATCH_CONTAINS}
}
},
};
override_json_test["includes"] = nlohmann::json::array();
override_json_test["includes"][0] = nlohmann::json::object();
override_json_test["includes"][0]["id"] = "2";
override_json_test["includes"][0]["position"] = 1;
override_t override_include_test;
op = override_t::parse(override_json_test, "include-rule-test", override_include_test);
ASSERT_TRUE(op.ok());
ASSERT_TRUE(override_include_test.stop_processing);
}
TEST_F(CollectionOverrideTest, IncludeOverrideWithFilterBy) {
Collection *coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("price", field_types::FLOAT, true),
field("points", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "Amazing Shoes";
doc1["price"] = 399.99;
doc1["points"] = 30;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "Fast Shoes";
doc2["price"] = 49.99;
doc2["points"] = 5;
nlohmann::json doc3;
doc3["id"] = "2";
doc3["name"] = "Comfortable Shoes";
doc3["price"] = 199.99;
doc3["points"] = 1;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
std::vector<sort_by> sort_fields = { sort_by("_text_match", "DESC"), sort_by("points", "DESC") };
nlohmann::json override_json_include = {
{"id", "include-rule-1"},
{
"rule", {
{"query", "shoes"},
{"match", override_t::MATCH_EXACT}
}
},
{"filter_curated_hits", false},
{"stop_processing", false},
{"remove_matched_tokens", false},
{"filter_by", "price: >55"}
};
override_json_include["includes"] = nlohmann::json::array();
override_json_include["includes"][0] = nlohmann::json::object();
override_json_include["includes"][0]["id"] = "2";
override_json_include["includes"][0]["position"] = 1;
override_t override_include1;
auto op = override_t::parse(override_json_include, "include-rule-1", override_include1);
ASSERT_TRUE(op.ok());
coll1->add_override(override_include1);
auto results = coll1->search("shoes", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
// when filter by does not match any result, curated result should still show up
// because `filter_curated_hits` is false
results = coll1->search("shoes", {"name"}, "points:1000",
{}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
// when bad filter by clause is used in override
override_json_include = {
{"id", "include-rule-2"},
{
"rule", {
{"query", "test"},
{"match", override_t::MATCH_EXACT}
}
},
{"filter_curated_hits", false},
{"stop_processing", false},
{"remove_matched_tokens", false},
{"filter_by", "price >55"}
};
override_json_include["includes"] = nlohmann::json::array();
override_json_include["includes"][0] = nlohmann::json::object();
override_json_include["includes"][0]["id"] = "2";
override_json_include["includes"][0]["position"] = 1;
override_t override_include2;
op = override_t::parse(override_json_include, "include-rule-2", override_include2);
ASSERT_TRUE(op.ok());
coll1->add_override(override_include2);
results = coll1->search("random-name", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(0, results["hits"].size());
}
TEST_F(CollectionOverrideTest, ReplaceQuery) {
Collection *coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("points", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "Amazing Shoes";
doc1["points"] = 30;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "Fast Shoes";
doc2["points"] = 50;
nlohmann::json doc3;
doc3["id"] = "2";
doc3["name"] = "Comfortable Socks";
doc3["points"] = 1;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
std::vector<sort_by> sort_fields = { sort_by("_text_match", "DESC"), sort_by("points", "DESC") };
nlohmann::json override_json = R"({
"id": "rule-1",
"rule": {
"query": "boots",
"match": "exact"
},
"replace_query": "shoes"
})"_json;
override_t override_rule;
auto op = override_t::parse(override_json, "rule-1", override_rule);
ASSERT_TRUE(op.ok());
coll1->add_override(override_rule);
auto results = coll1->search("boots", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
// don't allow both remove_matched_tokens and replace_query
override_json["remove_matched_tokens"] = true;
op = override_t::parse(override_json, "rule-1", override_rule);
ASSERT_FALSE(op.ok());
ASSERT_EQ("Only one of `replace_query` or `remove_matched_tokens` can be specified.", op.error());
// it's okay when it's explicitly set to false
override_json["remove_matched_tokens"] = false;
op = override_t::parse(override_json, "rule-1", override_rule);
ASSERT_TRUE(op.ok());
}
TEST_F(CollectionOverrideTest, RuleQueryMustBeCaseInsensitive) {
Collection *coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("points", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "Amazing Shoes";
doc1["points"] = 30;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "Tennis Ball";
doc2["points"] = 50;
nlohmann::json doc3;
doc3["id"] = "2";
doc3["name"] = "Golf Ball";
doc3["points"] = 1;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
std::vector<sort_by> sort_fields = { sort_by("_text_match", "DESC"), sort_by("points", "DESC") };
nlohmann::json override_json = R"({
"id": "rule-1",
"rule": {
"query": "GrEat",
"match": "contains"
},
"replace_query": "amazing"
})"_json;
override_t override_rule;
auto op = override_t::parse(override_json, "rule-1", override_rule);
ASSERT_TRUE(op.ok());
coll1->add_override(override_rule);
override_json = R"({
"id": "rule-2",
"rule": {
"query": "BaLL",
"match": "contains"
},
"filter_by": "points: 1"
})"_json;
override_t override_rule2;
op = override_t::parse(override_json, "rule-2", override_rule2);
ASSERT_TRUE(op.ok());
coll1->add_override(override_rule2);
auto results = coll1->search("great shoes", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
results = coll1->search("ball", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
}
TEST_F(CollectionOverrideTest, RuleQueryWithAccentedChars) {
Collection *coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("color", field_types::STRING, false),
field("points", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "Green";
doc1["color"] = "Green";
doc1["points"] = 30;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
std::vector<sort_by> sort_fields = { sort_by("_text_match", "DESC"), sort_by("points", "DESC") };
nlohmann::json override_json = R"({
"id": "rule-1",
"rule": {
"query": "Grün",
"match": "contains"
},
"filter_by":"color:green",
"filter_curated_hits":true
})"_json;
override_t override_rule;
auto op = override_t::parse(override_json, "rule-1", override_rule);
ASSERT_TRUE(op.ok());
coll1->add_override(override_rule);
auto results = coll1->search("grün", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
}
TEST_F(CollectionOverrideTest, WindowForRule) {
Collection *coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("points", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "Amazing Shoes";
doc1["points"] = 30;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
std::vector<sort_by> sort_fields = { sort_by("_text_match", "DESC"), sort_by("points", "DESC") };
nlohmann::json override_json = R"({
"id": "rule-1",
"rule": {
"query": "boots",
"match": "exact"
},
"replace_query": "shoes"
})"_json;
override_t override_rule;
auto op = override_t::parse(override_json, "rule-1", override_rule);
ASSERT_TRUE(op.ok());
coll1->add_override(override_rule);
auto results = coll1->search("boots", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
// rule must not match when window_start is set into the future
override_json["effective_from_ts"] = 35677971263; // year 3100, here we come! ;)
op = override_t::parse(override_json, "rule-1", override_rule);
ASSERT_TRUE(op.ok());
coll1->add_override(override_rule);
results = coll1->search("boots", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(0, results["hits"].size());
// rule must not match when window_end is set into the past
override_json["effective_from_ts"] = -1;
override_json["effective_to_ts"] = 965388863;
op = override_t::parse(override_json, "rule-1", override_rule);
ASSERT_TRUE(op.ok());
coll1->add_override(override_rule);
results = coll1->search("boots", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(0, results["hits"].size());
// resetting both should bring the override back in action
override_json["effective_from_ts"] = 965388863;
override_json["effective_to_ts"] = 35677971263;
op = override_t::parse(override_json, "rule-1", override_rule);
ASSERT_TRUE(op.ok());
coll1->add_override(override_rule);
results = coll1->search("boots", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(1, results["hits"].size());
}
TEST_F(CollectionOverrideTest, FilterRule) {
Collection *coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("points", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "Amazing Shoes";
doc1["points"] = 30;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "Fast Shoes";
doc2["points"] = 50;
nlohmann::json doc3;
doc3["id"] = "2";
doc3["name"] = "Comfortable Socks";
doc3["points"] = 1;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
std::vector<sort_by> sort_fields = { sort_by("_text_match", "DESC"), sort_by("points", "DESC") };
nlohmann::json override_json = R"({
"id": "rule-1",
"rule": {
"query": "*",
"match": "exact",
"filter_by": "points: 50"
},
"includes": [{
"id": "0",
"position": 1
}]
})"_json;
override_t override_rule;
auto op = override_t::parse(override_json, "rule-1", override_rule);
ASSERT_TRUE(op.ok());
coll1->add_override(override_rule);
auto results = coll1->search("*", {}, "points: 50",
{}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][1]["document"]["id"].get<std::string>());
// empty query should not trigger override even though it will be deemed as wildcard search
results = coll1->search("", {"name"}, "points: 50",
{}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
// check to_json
nlohmann::json override_json_ser = override_rule.to_json();
ASSERT_EQ("points: 50", override_json_ser["rule"]["filter_by"]);
// without q/match
override_json = R"({
"id": "rule-2",
"rule": {
"filter_by": "points: 1"
},
"includes": [{
"id": "0",
"position": 1
}]
})"_json;
override_t override_rule2;
op = override_t::parse(override_json, "rule-2", override_rule2);
ASSERT_TRUE(op.ok());
coll1->add_override(override_rule2);
results = coll1->search("socks", {"name"}, "points: 1",
{}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("2", results["hits"][1]["document"]["id"].get<std::string>());
override_json_ser = override_rule2.to_json();
ASSERT_EQ("points: 1", override_json_ser["rule"]["filter_by"]);
ASSERT_EQ(0, override_json_ser["rule"].count("query"));
ASSERT_EQ(0, override_json_ser["rule"].count("match"));
}
TEST_F(CollectionOverrideTest, CurationGroupingNonCuratedHitsShouldNotAppearOutside) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("group_id", field_types::STRING, true),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 3, fields).get();
}
nlohmann::json doc;
doc["id"] = "1";
doc["title"] = "The Harry Potter 1";
doc["group_id"] = "hp";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "2";
doc["title"] = "The Harry Potter 2";
doc["group_id"] = "hp";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "3";
doc["title"] = "Lord of the Rings";
doc["group_id"] = "lotr";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
nlohmann::json override_json = R"({
"id": "rule-1",
"rule": {
"query": "*",
"match": "exact"
},
"includes": [{
"id": "2",
"position": 1
}]
})"_json;
override_t override_rule;
auto op = override_t::parse(override_json, "rule-1", override_rule);
ASSERT_TRUE(op.ok());
coll1->add_override(override_rule);
override_json = R"({
"id": "rule-2",
"rule": {
"query": "the",
"match": "exact"
},
"includes": [{
"id": "2",
"position": 1
}]
})"_json;
override_t override_rule2;
op = override_t::parse(override_json, "rule-2", override_rule2);
ASSERT_TRUE(op.ok());
coll1->add_override(override_rule2);
auto results = coll1->search("*", {"title"}, "", {}, {}, {0}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10,
"", {}, {"group_id"}, 2).get();
// when only one of the 2 records belonging to a record is used for curation, the other record
// should not appear back
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(1, results["grouped_hits"][0]["hits"].size());
ASSERT_EQ(1, results["grouped_hits"][1]["hits"].size());
ASSERT_EQ("2", results["grouped_hits"][0]["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("3", results["grouped_hits"][1]["hits"][0]["document"]["id"].get<std::string>());
// same for keyword search
results = coll1->search("the", {"title"}, "", {}, {}, {0}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10,
"", {}, {"group_id"}, 2).get();
// when only one of the 2 records belonging to a record is used for curation, the other record
// should not appear back
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(1, results["grouped_hits"][0]["hits"].size());
ASSERT_EQ(1, results["grouped_hits"][1]["hits"].size());
ASSERT_EQ("2", results["grouped_hits"][0]["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("3", results["grouped_hits"][1]["hits"][0]["document"]["id"].get<std::string>());
}
TEST_F(CollectionOverrideTest, PinnedAndHiddenHits) {
auto pinned_hits = "13:1,4:2";
// basic pinning
auto results = coll_mul_fields->search("the", {"title"}, "", {"starring"}, {}, {0}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "starring: will", 30, 5,
"", 10,
pinned_hits, {}).get();
ASSERT_EQ(10, results["found"].get<size_t>());
ASSERT_STREQ("13", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("4", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("11", results["hits"][2]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("16", results["hits"][3]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("6", results["hits"][4]["document"]["id"].get<std::string>().c_str());
// pinning + filtering
results = coll_mul_fields->search("of", {"title"}, "points:>58", {}, {}, {0}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10,
pinned_hits, {}).get();
ASSERT_EQ(5, results["found"].get<size_t>());
ASSERT_STREQ("13", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("4", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("11", results["hits"][2]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("12", results["hits"][3]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("5", results["hits"][4]["document"]["id"].get<std::string>().c_str());
// pinning + filtering with filter_curated_hits: true
pinned_hits = "14:1,4:2";
results = coll_mul_fields->search("of", {"title"}, "points:>58", {}, {}, {0}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, pinned_hits, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2, 1).get();
ASSERT_EQ(4, results["found"].get<size_t>());
ASSERT_STREQ("14", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("11", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("12", results["hits"][2]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("5", results["hits"][3]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ("The Silence <mark>of</mark> the Lambs", results["hits"][1]["highlights"][0]["snippet"].get<std::string>());
ASSERT_EQ("Confessions <mark>of</mark> a Shopaholic", results["hits"][2]["highlights"][0]["snippet"].get<std::string>());
ASSERT_EQ("Percy Jackson: Sea <mark>of</mark> Monsters", results["hits"][3]["highlights"][0]["snippet"].get<std::string>());
// both pinning and hiding
pinned_hits = "13:1,4:2";
std::string hidden_hits="11,16";
results = coll_mul_fields->search("the", {"title"}, "", {"starring"}, {}, {0}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "starring: will", 30, 5,
"", 10,
pinned_hits, hidden_hits).get();
ASSERT_STREQ("13", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("4", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("6", results["hits"][2]["document"]["id"].get<std::string>().c_str());
// paginating such that pinned hits appear on second page
pinned_hits = "13:4,4:5";
results = coll_mul_fields->search("the", {"title"}, "", {"starring"}, {}, {0}, 2, 2, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "starring: will", 30, 5,
"", 10,
pinned_hits, hidden_hits).get();
ASSERT_STREQ("1", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("13", results["hits"][1]["document"]["id"].get<std::string>().c_str());
// take precedence over override rules
nlohmann::json override_json_include = {
{"id", "include-rule"},
{
"rule", {
{"query", "the"},
{"match", override_t::MATCH_EXACT}
}
}
};
// trying to include an ID that is also being hidden via `hidden_hits` query param will not work
// as pinned and hidden hits will take precedence over override rules
override_json_include["includes"] = nlohmann::json::array();
override_json_include["includes"][0] = nlohmann::json::object();
override_json_include["includes"][0]["id"] = "11";
override_json_include["includes"][0]["position"] = 2;
override_json_include["includes"][1] = nlohmann::json::object();
override_json_include["includes"][1]["id"] = "8";
override_json_include["includes"][1]["position"] = 1;
override_t override_include;
override_t::parse(override_json_include, "", override_include);
coll_mul_fields->add_override(override_include);
results = coll_mul_fields->search("the", {"title"}, "", {"starring"}, {}, {0}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "starring: will", 30, 5,
"", 10,
{}, {hidden_hits}).get();
ASSERT_EQ(8, results["found"].get<size_t>());
ASSERT_STREQ("8", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("6", results["hits"][1]["document"]["id"].get<std::string>().c_str());
}
TEST_F(CollectionOverrideTest, PinnedHitsSmallerThanPageSize) {
auto pinned_hits = "17:1,13:4,11:3";
// pinned hits larger than page size: check that pagination works
// without overrides:
// 11, 16, 6, 8, 1, 0, 10, 4, 13, 17
auto results = coll_mul_fields->search("the", {"title"}, "", {"starring"}, {}, {0}, 8, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "starring: will", 30, 5,
"", 10,
pinned_hits, {}).get();
std::vector<size_t> expected_ids_p1 = {17, 16, 11, 13, 6, 8, 1, 0};
ASSERT_EQ(10, results["found"].get<size_t>());
ASSERT_EQ(8, results["hits"].size());
for(size_t i=0; i<8; i++) {
ASSERT_EQ(expected_ids_p1[i], std::stoi(results["hits"][i]["document"]["id"].get<std::string>()));
}
std::vector<size_t> expected_ids_p2 = {10, 4};
results = coll_mul_fields->search("the", {"title"}, "", {"starring"}, {}, {0}, 8, 2, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "starring: will", 30, 5,
"", 10,
pinned_hits, {}).get();
ASSERT_EQ(10, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
for(size_t i=0; i<2; i++) {
ASSERT_EQ(expected_ids_p2[i], std::stoi(results["hits"][i]["document"]["id"].get<std::string>()));
}
}
TEST_F(CollectionOverrideTest, PinnedHitsLargerThanPageSize) {
auto pinned_hits = "6:1,1:2,16:3,11:4";
// pinned hits larger than page size: check that pagination works
auto results = coll_mul_fields->search("the", {"title"}, "", {"starring"}, {}, {0}, 2, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "starring: will", 30, 5,
"", 10,
pinned_hits, {}).get();
ASSERT_EQ(10, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
ASSERT_STREQ("6", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("1", results["hits"][1]["document"]["id"].get<std::string>().c_str());
results = coll_mul_fields->search("the", {"title"}, "", {"starring"}, {}, {0}, 2, 2, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "starring: will", 30, 5,
"", 10,
pinned_hits, {}).get();
ASSERT_EQ(10, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
ASSERT_STREQ("16", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("11", results["hits"][1]["document"]["id"].get<std::string>().c_str());
results = coll_mul_fields->search("the", {"title"}, "", {"starring"}, {}, {0}, 2, 3, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "starring: will", 30, 5,
"", 10,
pinned_hits, {}).get();
ASSERT_EQ(10, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
ASSERT_STREQ("8", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("0", results["hits"][1]["document"]["id"].get<std::string>().c_str());
}
TEST_F(CollectionOverrideTest, PinnedHitsWhenThereAreNotEnoughResults) {
auto pinned_hits = "6:1,1:2,11:5";
// multiple pinned hits specified, but query produces no result
auto results = coll_mul_fields->search("not-foundquery", {"title"}, "", {"starring"}, {}, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "starring: will", 30, 5,
"", 10,
pinned_hits, {}).get();
ASSERT_EQ(3, results["found"].get<size_t>());
ASSERT_EQ(3, results["hits"].size());
ASSERT_STREQ("6", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("1", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("11", results["hits"][2]["document"]["id"].get<std::string>().c_str());
// multiple pinned hits but only single result
results = coll_mul_fields->search("burgundy", {"title"}, "", {"starring"}, {}, {0}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "starring: will", 30, 5,
"", 10,
pinned_hits, {}).get();
ASSERT_EQ(4, results["found"].get<size_t>());
ASSERT_EQ(4, results["hits"].size());
ASSERT_STREQ("6", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("1", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("0", results["hits"][2]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("11", results["hits"][3]["document"]["id"].get<std::string>().c_str());
}
TEST_F(CollectionOverrideTest, HiddenHitsHidingSingleResult) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"Down There by the Train"}
};
for (size_t i = 0; i < records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
std::string hidden_hits="0";
auto results = coll1->search("the train", {"title"}, "", {}, {}, {0}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10,
"", hidden_hits).get();
ASSERT_EQ(0, results["found"].get<size_t>());
ASSERT_EQ(0, results["hits"].size());
results = coll1->search("the train", {"title"}, "points:0", {}, {}, {0}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10,
"", hidden_hits).get();
ASSERT_EQ(0, results["found"].get<size_t>());
ASSERT_EQ(0, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOverrideTest, PinnedHitsGrouping) {
auto pinned_hits = "6:1,8:1,1:2,13:3,4:3";
// without any grouping parameter, only the first ID in a position should be picked
// and other IDs should appear in their original positions
auto results = coll_mul_fields->search("the", {"title"}, "", {"starring"}, {}, {0}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "starring: will", 30, 5,
"", 10,
pinned_hits, {}).get();
ASSERT_EQ(10, results["found"].get<size_t>());
ASSERT_STREQ("6", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("1", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("13", results["hits"][2]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("11", results["hits"][3]["document"]["id"].get<std::string>().c_str());
// pinned hits should be marked as curated
ASSERT_EQ(true, results["hits"][0]["curated"].get<bool>());
ASSERT_EQ(true, results["hits"][1]["curated"].get<bool>());
ASSERT_EQ(true, results["hits"][2]["curated"].get<bool>());
ASSERT_EQ(0, results["hits"][3].count("curated"));
// with grouping
results = coll_mul_fields->search("the", {"title"}, "", {"starring"}, {}, {0}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "starring: will", 30, 5,
"", 10,
pinned_hits, {}, {"cast"}, 2).get();
ASSERT_EQ(8, results["found"].get<size_t>());
ASSERT_EQ(1, results["grouped_hits"][0]["group_key"].size());
ASSERT_EQ(2, results["grouped_hits"][0]["group_key"][0].size());
ASSERT_STREQ("Chris Evans", results["grouped_hits"][0]["group_key"][0][0].get<std::string>().c_str());
ASSERT_STREQ("Scarlett Johansson", results["grouped_hits"][0]["group_key"][0][1].get<std::string>().c_str());
ASSERT_STREQ("6", results["grouped_hits"][0]["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("8", results["grouped_hits"][0]["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("1", results["grouped_hits"][1]["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("13", results["grouped_hits"][2]["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("4", results["grouped_hits"][2]["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("11", results["grouped_hits"][3]["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("16", results["grouped_hits"][4]["hits"][0]["document"]["id"].get<std::string>().c_str());
}
TEST_F(CollectionOverrideTest, PinnedHitsGroupingNonPinnedHitsShouldNotAppearOutside) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("group_id", field_types::STRING, true),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 3, fields).get();
}
nlohmann::json doc;
doc["id"] = "1";
doc["title"] = "The Harry Potter 1";
doc["group_id"] = "hp";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "2";
doc["title"] = "The Harry Potter 2";
doc["group_id"] = "hp";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "3";
doc["title"] = "Lord of the Rings";
doc["group_id"] = "lotr";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto pinned_hits = "2:1";
auto results = coll1->search("*", {"title"}, "", {}, {}, {0}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10,
pinned_hits, {}, {"group_id"}, 2).get();
// when only one of the 2 records belonging to a record is used for curation, the other record
// should not appear back
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(1, results["grouped_hits"][0]["hits"].size());
ASSERT_EQ(1, results["grouped_hits"][1]["hits"].size());
ASSERT_EQ("2", results["grouped_hits"][0]["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("3", results["grouped_hits"][1]["hits"][0]["document"]["id"].get<std::string>());
// same for keyword search
results = coll1->search("the", {"title"}, "", {}, {}, {0}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10,
pinned_hits, {}, {"group_id"}, 2).get();
// when only one of the 2 records belonging to a record is used for curation, the other record
// should not appear back
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(1, results["grouped_hits"][0]["hits"].size());
ASSERT_EQ(1, results["grouped_hits"][1]["hits"].size());
ASSERT_EQ("2", results["grouped_hits"][0]["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("3", results["grouped_hits"][1]["hits"][0]["document"]["id"].get<std::string>());
}
TEST_F(CollectionOverrideTest, PinnedHitsWithWildCardQuery) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 3, fields, "points").get();
}
size_t num_indexed = 0;
for(size_t i=0; i<311; i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = "Title " + std::to_string(i);
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
num_indexed++;
}
auto pinned_hits = "7:1,4:2";
auto results = coll1->search("*", {"title"}, "", {}, {}, {0}, 30, 11, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10,
pinned_hits, {}, {}, {0}, "", "", {}).get();
ASSERT_EQ(311, results["found"].get<size_t>());
ASSERT_EQ(11, results["hits"].size());
std::vector<size_t> expected_ids = {12, 11, 10, 9, 8, 6, 5, 3, 2, 1, 0}; // 4 and 7 should be missing
for(size_t i=0; i<11; i++) {
ASSERT_EQ(expected_ids[i], std::stoi(results["hits"][i]["document"]["id"].get<std::string>()));
}
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOverrideTest, HiddenHitsWithWildCardQuery) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 3, fields, "points").get();
}
for(size_t i=0; i<5; i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = "Title " + std::to_string(i);
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto hidden_hits = "1";
auto results = coll1->search("*", {"title"}, "", {}, {}, {0}, 30, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10,
{}, hidden_hits, {}, {0}, "", "", {}).get();
ASSERT_EQ(4, results["found"].get<size_t>());
ASSERT_EQ(4, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOverrideTest, PinnedHitsIdsHavingColon) {
Collection *coll1;
std::vector<field> fields = {field("url", field_types::STRING, true),
field("points", field_types::INT32, false)};
std::vector<sort_by> sort_fields = { sort_by("points", "DESC") };
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "points").get();
}
for(size_t i=1; i<=10; i++) {
nlohmann::json doc;
doc["id"] = std::string("https://example.com/") + std::to_string(i);
doc["url"] = std::string("https://example.com/") + std::to_string(i);
doc["points"] = i;
coll1->add(doc.dump());
}
std::vector<std::string> query_fields = {"url"};
std::vector<std::string> facets;
std::string pinned_hits_str = "https://example.com/1:1, https://example.com/3:2"; // can have space
auto res_op = coll1->search("*", {"url"}, "", {}, {}, {0}, 25, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10,
pinned_hits_str, {});
ASSERT_TRUE(res_op.ok());
auto res = res_op.get();
ASSERT_EQ(10, res["found"].get<size_t>());
ASSERT_STREQ("https://example.com/1", res["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("https://example.com/3", res["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("https://example.com/10", res["hits"][2]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("https://example.com/9", res["hits"][3]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("https://example.com/2", res["hits"][9]["document"]["id"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOverrideTest, DynamicFilteringExactMatchBasics) {
Collection *coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("category", field_types::STRING, true),
field("brand", field_types::STRING, true),
field("points", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "Amazing Shoes";
doc1["category"] = "shoes";
doc1["brand"] = "Nike";
doc1["points"] = 3;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "Track Gym";
doc2["category"] = "shoes";
doc2["brand"] = "Adidas";
doc2["points"] = 5;
nlohmann::json doc3;
doc3["id"] = "2";
doc3["name"] = "Running Shoes";
doc3["category"] = "sports";
doc3["brand"] = "Nike";
doc3["points"] = 5;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
std::vector<sort_by> sort_fields = { sort_by("_text_match", "DESC"), sort_by("points", "DESC") };
auto results = coll1->search("shoes", {"name", "category", "brand"}, "",
{}, sort_fields, {2, 2, 2}, 10).get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("2", results["hits"][2]["document"]["id"].get<std::string>());
// with override, results will be different
nlohmann::json override_json = {
{"id", "dynamic-cat-filter"},
{
"rule", {
{"query", "{category}"},
{"match", override_t::MATCH_EXACT}
}
},
{"remove_matched_tokens", true},
{"filter_by", "category: {category}"}
};
override_t override;
auto op = override_t::parse(override_json, "dynamic-cat-filter", override);
ASSERT_TRUE(op.ok());
coll1->add_override(override);
override_json = {
{"id", "dynamic-brand-cat-filter"},
{
"rule", {
{"query", "{brand} {category}"},
{"match", override_t::MATCH_EXACT}
}
},
{"remove_matched_tokens", true},
{"filter_by", "category: {category} && brand: {brand}"}
};
op = override_t::parse(override_json, "dynamic-brand-cat-filter", override);
ASSERT_TRUE(op.ok());
coll1->add_override(override);
override_json = {
{"id", "dynamic-brand-filter"},
{
"rule", {
{"query", "{brand}"},
{"match", override_t::MATCH_EXACT}
}
},
{"remove_matched_tokens", true},
{"filter_by", "brand: {brand}"}
};
override_json["includes"] = nlohmann::json::array();
override_json["includes"][0] = nlohmann::json::object();
override_json["includes"][0]["id"] = "0";
override_json["includes"][0]["position"] = 1;
op = override_t::parse(override_json, "dynamic-brand-filter", override);
ASSERT_TRUE(op.ok());
coll1->add_override(override);
results = coll1->search("shoes", {"name", "category", "brand"}, "",
{}, sort_fields, {2, 2, 2}, 10).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ(0, results["hits"][0]["highlights"].size());
ASSERT_EQ(0, results["hits"][1]["highlights"].size());
// should not apply filter for non-exact case
results = coll1->search("running shoes", {"name", "category", "brand"}, "",
{}, sort_fields, {2, 2, 2}, 10, 1, FREQUENCY, {false}, 10).get();
ASSERT_EQ(3, results["hits"].size());
results = coll1->search("adidas shoes", {"name", "category", "brand"}, "",
{}, sort_fields, {2, 2, 2}, 10, 1, FREQUENCY, {false}, 10).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
// dynamic brand filter + explicit ID include
results = coll1->search("adidas", {"name", "category", "brand"}, "",
{}, sort_fields, {2, 2, 2}, 10).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][1]["document"]["id"].get<std::string>());
// with bad override
nlohmann::json override_json_bad1 = {
{"id", "dynamic-filters-bad1"},
{
"rule", {
{"query", "{brand}"},
{"match", override_t::MATCH_EXACT}
}
},
{"remove_matched_tokens", true},
{"filter_by", ""}
};
override_t override_bad1;
op = override_t::parse(override_json_bad1, "dynamic-filters-bad1", override_bad1);
ASSERT_FALSE(op.ok());
ASSERT_EQ("The `filter_by` must be a non-empty string.", op.error());
nlohmann::json override_json_bad2 = {
{"id", "dynamic-filters-bad2"},
{
"rule", {
{"query", "{brand}"},
{"match", override_t::MATCH_EXACT}
}
},
{"remove_matched_tokens", true},
{"filter_by", {"foo", "bar"}}
};
override_t override_bad2;
op = override_t::parse(override_json_bad2, "dynamic-filters-bad2", override_bad2);
ASSERT_FALSE(op.ok());
ASSERT_EQ("The `filter_by` must be a string.", op.error());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOverrideTest, DynamicFilteringPrefixMatchShouldNotWork) {
Collection *coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("category", field_types::STRING, true),
field("brand", field_types::STRING, true),
field("points", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "Amazing Shoes";
doc1["category"] = "shoe";
doc1["brand"] = "Nike";
doc1["points"] = 3;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "Track Gym";
doc2["category"] = "shoes";
doc2["brand"] = "Adidas";
doc2["points"] = 5;
nlohmann::json doc3;
doc3["id"] = "2";
doc3["name"] = "Running Shoe";
doc3["category"] = "shoes";
doc3["brand"] = "Nike";
doc3["points"] = 5;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
std::vector<sort_by> sort_fields = { sort_by("_text_match", "DESC"), sort_by("points", "DESC") };
// with override, results will be different
nlohmann::json override_json = {
{"id", "dynamic-cat-filter"},
{
"rule", {
{"query", "{category}"},
{"match", override_t::MATCH_EXACT}
}
},
{"remove_matched_tokens", true},
{"filter_by", "category: {category}"}
};
override_t override;
auto op = override_t::parse(override_json, "dynamic-cat-filter", override);
ASSERT_TRUE(op.ok());
coll1->add_override(override);
auto results = coll1->search("shoe", {"name", "category", "brand"}, "",
{}, sort_fields, {2, 2, 2}, 10).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOverrideTest, DynamicFilteringMissingField) {
Collection *coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("category", field_types::STRING, true),
field("points", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "Amazing Shoes";
doc1["category"] = "shoes";
doc1["points"] = 3;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
std::vector<sort_by> sort_fields = { sort_by("_text_match", "DESC"), sort_by("points", "DESC") };
nlohmann::json override_json = {
{"id", "dynamic-cat-filter"},
{
"rule", {
{"query", "{categories}"}, // this field does NOT exist
{"match", override_t::MATCH_EXACT}
}
},
{"remove_matched_tokens", true},
{"filter_by", "category: {categories}"}
};
override_t override;
auto op = override_t::parse(override_json, "dynamic-cat-filter", override);
ASSERT_TRUE(op.ok());
coll1->add_override(override);
auto results = coll1->search("shoes", {"name", "category"}, "",
{}, sort_fields, {2, 2}, 10).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOverrideTest, DynamicFilteringBadFilterBy) {
Collection *coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("category", field_types::STRING, true),
field("points", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "Amazing Shoes";
doc1["category"] = "shoes";
doc1["points"] = 3;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
std::vector<sort_by> sort_fields = { sort_by("_text_match", "DESC"), sort_by("points", "DESC") };
nlohmann::json override_json = {
{"id", "dynamic-cat-filter"},
{
"rule", {
{"query", "{category}"}, // this field does NOT exist
{"match", override_t::MATCH_EXACT}
}
},
{"remove_matched_tokens", true},
{"filter_by", "category: {category} && foo"}
};
override_t override;
auto op = override_t::parse(override_json, "dynamic-cat-filter", override);
ASSERT_TRUE(op.ok());
coll1->add_override(override);
auto results = coll1->search("shoes", {"name", "category"}, "",
{}, sort_fields, {2, 2}, 10).get();
ASSERT_EQ(1, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOverrideTest, DynamicFilteringMultiplePlaceholders) {
Collection* coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("category", field_types::STRING, true),
field("brand", field_types::STRING, true),
field("color", field_types::STRING, true),
field("points", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "Retro Shoes";
doc1["category"] = "shoes";
doc1["color"] = "yellow";
doc1["brand"] = "Nike Air Jordan";
doc1["points"] = 3;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "Baseball";
doc2["category"] = "shoes";
doc2["color"] = "white";
doc2["brand"] = "Adidas";
doc2["points"] = 5;
nlohmann::json doc3;
doc3["id"] = "2";
doc3["name"] = "Running Shoes";
doc3["category"] = "sports";
doc3["color"] = "grey";
doc3["brand"] = "Nike";
doc3["points"] = 5;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
std::vector<sort_by> sort_fields = {sort_by("_text_match", "DESC"), sort_by("points", "DESC")};
nlohmann::json override_json = {
{"id", "dynamic-cat-filter"},
{
"rule", {
{"query", "{brand} {color} shoes"},
{"match", override_t::MATCH_CONTAINS}
}
},
{"remove_matched_tokens", true},
{"filter_by", "brand: {brand} && color: {color}"}
};
override_t override;
auto op = override_t::parse(override_json, "dynamic-cat-filter", override);
ASSERT_TRUE(op.ok());
coll1->add_override(override);
// not an exact match of rule (because of "light") so all results will be fetched, not just Air Jordan brand
auto results = coll1->search("Nike Air Jordan light yellow shoes", {"name", "category", "brand"}, "",
{}, sort_fields, {2, 2, 2}, 10, 1, FREQUENCY, {false}, 10).get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("2", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][2]["document"]["id"].get<std::string>());
// query with tokens at the start that preceding the placeholders in the rule
results = coll1->search("New Nike Air Jordan yellow shoes", {"name", "category", "brand"}, "",
{}, sort_fields, {2, 2, 2}, 10, 1, FREQUENCY, {false}, 10).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOverrideTest, DynamicFilteringTokensBetweenPlaceholders) {
Collection* coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("category", field_types::STRING, true),
field("brand", field_types::STRING, true),
field("color", field_types::STRING, true),
field("points", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "Retro Shoes";
doc1["category"] = "shoes";
doc1["color"] = "yellow";
doc1["brand"] = "Nike Air Jordan";
doc1["points"] = 3;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "Baseball";
doc2["category"] = "shoes";
doc2["color"] = "white";
doc2["brand"] = "Adidas";
doc2["points"] = 5;
nlohmann::json doc3;
doc3["id"] = "2";
doc3["name"] = "Running Shoes";
doc3["category"] = "sports";
doc3["color"] = "grey";
doc3["brand"] = "Nike";
doc3["points"] = 5;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
std::vector<sort_by> sort_fields = {sort_by("_text_match", "DESC"), sort_by("points", "DESC")};
nlohmann::json override_json = {
{"id", "dynamic-cat-filter"},
{
"rule", {
{"query", "{brand} shoes {color}"},
{"match", override_t::MATCH_CONTAINS}
}
},
{"remove_matched_tokens", true},
{"filter_by", "brand: {brand} && color: {color}"}
};
override_t override;
auto op = override_t::parse(override_json, "dynamic-cat-filter", override);
ASSERT_TRUE(op.ok());
coll1->add_override(override);
auto results = coll1->search("Nike Air Jordan shoes yellow", {"name", "category", "brand"}, "",
{}, sort_fields, {2, 2, 2}, 10).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOverrideTest, DynamicFilteringWithNumericalFilter) {
Collection* coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("category", field_types::STRING, true),
field("brand", field_types::STRING, true),
field("color", field_types::STRING, true),
field("points", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "Retro Shoes";
doc1["category"] = "shoes";
doc1["color"] = "yellow";
doc1["brand"] = "Nike";
doc1["points"] = 15;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "Baseball Shoes";
doc2["category"] = "shoes";
doc2["color"] = "white";
doc2["brand"] = "Nike";
doc2["points"] = 5;
nlohmann::json doc3;
doc3["id"] = "2";
doc3["name"] = "Running Shoes";
doc3["category"] = "sports";
doc3["color"] = "grey";
doc3["brand"] = "Nike";
doc3["points"] = 5;
nlohmann::json doc4;
doc4["id"] = "3";
doc4["name"] = "Running Shoes";
doc4["category"] = "sports";
doc4["color"] = "grey";
doc4["brand"] = "Adidas";
doc4["points"] = 5;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
ASSERT_TRUE(coll1->add(doc4.dump()).ok());
std::vector<sort_by> sort_fields = {sort_by("_text_match", "DESC"), sort_by("points", "DESC")};
nlohmann::json override_json = {
{"id", "dynamic-cat-filter"},
{
"rule", {
{"query", "popular {brand} shoes"},
{"match", override_t::MATCH_CONTAINS}
}
},
{"remove_matched_tokens", false},
{"filter_by", "brand: {brand} && points:> 10"}
};
override_t override;
auto op = override_t::parse(override_json, "dynamic-cat-filter", override);
ASSERT_TRUE(op.ok());
auto results = coll1->search("popular nike shoes", {"name", "category", "brand"}, "",
{}, sort_fields, {2, 2, 2}, 10, 1, FREQUENCY, {false}, 10).get();
ASSERT_EQ(4, results["hits"].size());
coll1->add_override(override);
results = coll1->search("popular nike shoes", {"name", "category", "brand"}, "",
{}, sort_fields, {2, 2, 2}, 10, 1, FREQUENCY, {false}, 10).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
// when overrides are disabled
bool enable_overrides = false;
results = coll1->search("popular nike shoes", {"name", "category", "brand"}, "",
{}, sort_fields, {2, 2, 2}, 10, 1, FREQUENCY, {false, false, false}, 10,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 1, {}, {}, {}, 0,
"<mark>", "</mark>", {1, 1, 1}, 10000, true, false, enable_overrides).get();
ASSERT_EQ(4, results["hits"].size());
// should not match the defined override
results = coll1->search("running adidas shoes", {"name", "category", "brand"}, "",
{}, sort_fields, {2, 2, 2}, 10, 1, FREQUENCY, {false}, 10).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("3", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("2", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][2]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][3]["document"]["id"].get<std::string>());
results = coll1->search("adidas", {"name", "category", "brand"}, "",
{}, sort_fields, {2, 2, 2}, 10, 1, FREQUENCY, {false}, 10).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("3", results["hits"][0]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOverrideTest, DynamicFilteringExactMatch) {
Collection* coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("category", field_types::STRING, true),
field("brand", field_types::STRING, true),
field("color", field_types::STRING, true),
field("points", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "Retro Shoes";
doc1["category"] = "shoes";
doc1["color"] = "yellow";
doc1["brand"] = "Nike";
doc1["points"] = 15;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "Baseball Shoes";
doc2["category"] = "shoes";
doc2["color"] = "white";
doc2["brand"] = "Nike";
doc2["points"] = 5;
nlohmann::json doc3;
doc3["id"] = "2";
doc3["name"] = "Running Shoes";
doc3["category"] = "sports";
doc3["color"] = "grey";
doc3["brand"] = "Nike";
doc3["points"] = 5;
nlohmann::json doc4;
doc4["id"] = "3";
doc4["name"] = "Running Shoes";
doc4["category"] = "sports";
doc4["color"] = "grey";
doc4["brand"] = "Adidas";
doc4["points"] = 5;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
ASSERT_TRUE(coll1->add(doc4.dump()).ok());
std::vector<sort_by> sort_fields = {sort_by("_text_match", "DESC"), sort_by("points", "DESC")};
nlohmann::json override_json = {
{"id", "dynamic-cat-filter"},
{
"rule", {
{"query", "popular {brand} shoes"},
{"match", override_t::MATCH_EXACT}
}
},
{"remove_matched_tokens", false},
{"filter_by", "brand: {brand} && points:> 10"}
};
override_t override;
auto op = override_t::parse(override_json, "dynamic-cat-filter", override);
ASSERT_TRUE(op.ok());
coll1->add_override(override);
auto results = coll1->search("really popular nike shoes", {"name", "category", "brand"}, "",
{}, sort_fields, {2, 2, 2}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(4, results["hits"].size());
results = coll1->search("popular nike running shoes", {"name", "category", "brand"}, "",
{}, sort_fields, {2, 2, 2}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(4, results["hits"].size());
results = coll1->search("popular nike shoes running", {"name", "category", "brand"}, "",
{}, sort_fields, {2, 2, 2}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("3", results["hits"][1]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOverrideTest, DynamicFilteringWithSynonyms) {
Collection *coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("category", field_types::STRING, true),
field("brand", field_types::STRING, true),
field("points", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "Amazing Shoes";
doc1["category"] = "shoes";
doc1["brand"] = "Nike";
doc1["points"] = 3;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "Exciting Track Gym";
doc2["category"] = "shoes";
doc2["brand"] = "Adidas";
doc2["points"] = 5;
nlohmann::json doc3;
doc3["id"] = "2";
doc3["name"] = "Amazing Sneakers";
doc3["category"] = "sneakers";
doc3["brand"] = "Adidas";
doc3["points"] = 4;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
coll1->add_synonym(R"({"id": "sneakers-shoes", "root": "sneakers", "synonyms": ["shoes"]})"_json);
coll1->add_synonym(R"({"id": "boots-shoes", "root": "boots", "synonyms": ["shoes"]})"_json);
coll1->add_synonym(R"({"id": "exciting-amazing", "root": "exciting", "synonyms": ["amazing"]})"_json);
std::vector<sort_by> sort_fields = { sort_by("_text_match", "DESC"), sort_by("points", "DESC") };
// spaces around field name should still work e.g. "{ field }"
nlohmann::json override_json1 = {
{"id", "dynamic-filters"},
{
"rule", {
{"query", "{ category }"},
{"match", override_t::MATCH_EXACT}
}
},
{"filter_by", "category: {category}"}
};
override_t override1;
auto op = override_t::parse(override_json1, "dynamic-filters", override1);
ASSERT_TRUE(op.ok());
coll1->add_override(override1);
std::map<std::string, override_t*> overrides = coll1->get_overrides().get();
ASSERT_EQ(1, overrides.size());
auto override_json = overrides.at("dynamic-filters")->to_json();
ASSERT_EQ("category: {category}", override_json["filter_by"].get<std::string>());
ASSERT_EQ(true, override_json["remove_matched_tokens"].get<bool>()); // must be true by default
nlohmann::json override_json2 = {
{"id", "static-filters"},
{
"rule", {
{"query", "exciting"},
{"match", override_t::MATCH_CONTAINS}
}
},
{"remove_matched_tokens", true},
{"filter_by", "points: [5, 4]"}
};
override_t override2;
op = override_t::parse(override_json2, "static-filters", override2);
ASSERT_TRUE(op.ok());
coll1->add_override(override2);
auto results = coll1->search("sneakers", {"name", "category", "brand"}, "",
{}, sort_fields, {2, 2, 2}, 10).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
// keyword does not exist but has a synonym with results
results = coll1->search("boots", {"name", "category", "brand"}, "",
{}, sort_fields, {2, 2, 2}, 10).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][1]["document"]["id"].get<std::string>());
// keyword has no override, but synonym's override is used
results = coll1->search("exciting", {"name", "category", "brand"}, "",
{}, sort_fields, {2, 2, 2}, 10).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("2", results["hits"][1]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOverrideTest, StaticFiltering) {
Collection *coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("price", field_types::FLOAT, true),
field("points", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "Amazing Shoes";
doc1["price"] = 399.99;
doc1["points"] = 3;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "Track Shoes";
doc2["price"] = 49.99;
doc2["points"] = 5;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
std::vector<sort_by> sort_fields = { sort_by("_text_match", "DESC"), sort_by("points", "DESC") };
nlohmann::json override_json_contains = {
{"id", "static-filters"},
{
"rule", {
{"query", "expensive"},
{"match", override_t::MATCH_CONTAINS}
}
},
{"remove_matched_tokens", true},
{"filter_by", "price:> 100"}
};
override_t override_contains;
auto op = override_t::parse(override_json_contains, "static-filters", override_contains);
ASSERT_TRUE(op.ok());
coll1->add_override(override_contains);
nlohmann::json override_json_exact = {
{"id", "static-exact-filters"},
{
"rule", {
{"query", "cheap"},
{"match", override_t::MATCH_EXACT}
}
},
{"remove_matched_tokens", true},
{"filter_by", "price:< 100"}
};
override_t override_exact;
op = override_t::parse(override_json_exact, "static-exact-filters", override_exact);
ASSERT_TRUE(op.ok());
coll1->add_override(override_exact);
auto results = coll1->search("expensive shoes", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
results = coll1->search("expensive", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
// partial word should not match
results = coll1->search("inexpensive shoes", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(2, results["found"].get<uint32_t>());
ASSERT_EQ(2, results["hits"].size());
// with exact match
results = coll1->search("cheap", {"name"}, "",
{}, sort_fields, {2}, 10).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
// should not work in match contains context
results = coll1->search("cheap boots", {"name"}, "",
{}, sort_fields, {2}, 10).get();
ASSERT_EQ(0, results["hits"].size());
// with synonym for expensive: should NOT match as synonyms are resolved after override substitution
coll1->add_synonym(R"({"id": "costly-expensive", "root": "costly", "synonyms": ["expensive"]})"_json);
results = coll1->search("costly", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(0, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOverrideTest, StaticFilteringMultipleRuleMatch) {
Collection *coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("tags", field_types::STRING_ARRAY, true),
field("points", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "Amazing Shoes";
doc1["tags"] = {"twitter"};
doc1["points"] = 3;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "Track Shoes";
doc2["tags"] = {"starred"};
doc2["points"] = 5;
nlohmann::json doc3;
doc3["id"] = "2";
doc3["name"] = "Track Shoes";
doc3["tags"] = {"twitter", "starred"};
doc3["points"] = 10;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
std::vector<sort_by> sort_fields = { sort_by("_text_match", "DESC"), sort_by("points", "DESC") };
nlohmann::json override_filter1_json = {
{"id", "static-filter-1"},
{
"rule", {
{"query", "twitter"},
{"match", override_t::MATCH_CONTAINS}
}
},
{"remove_matched_tokens", true},
{"stop_processing", false},
{"filter_by", "tags: twitter"}
};
override_t override_filter1;
auto op = override_t::parse(override_filter1_json, "static-filter-1", override_filter1);
ASSERT_TRUE(op.ok());
coll1->add_override(override_filter1);
nlohmann::json override_filter2_json = {
{"id", "static-filter-2"},
{
"rule", {
{"query", "starred"},
{"match", override_t::MATCH_CONTAINS}
}
},
{"remove_matched_tokens", true},
{"stop_processing", false},
{"filter_by", "tags: starred"}
};
override_t override_filter2;
op = override_t::parse(override_filter2_json, "static-filter-2", override_filter2);
ASSERT_TRUE(op.ok());
coll1->add_override(override_filter2);
auto results = coll1->search("starred twitter", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
// when stop_processing is enabled (default is true)
override_filter1_json.erase("stop_processing");
override_filter2_json.erase("stop_processing");
override_t override_filter1_reset;
op = override_t::parse(override_filter1_json, "static-filter-1", override_filter1_reset);
ASSERT_TRUE(op.ok());
override_t override_filter2_reset;
op = override_t::parse(override_filter2_json, "static-filter-2", override_filter2_reset);
ASSERT_TRUE(op.ok());
coll1->add_override(override_filter1_reset);
coll1->add_override(override_filter2_reset);
results = coll1->search("starred twitter", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(0, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOverrideTest, DynamicFilteringMultipleRuleMatch) {
Collection *coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("brand", field_types::STRING, false),
field("tags", field_types::STRING_ARRAY, true),
field("points", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "Amazing Shoes";
doc1["brand"] = "Nike";
doc1["tags"] = {"twitter"};
doc1["points"] = 3;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "Track Shoes";
doc2["brand"] = "Adidas";
doc2["tags"] = {"starred"};
doc2["points"] = 5;
nlohmann::json doc3;
doc3["id"] = "2";
doc3["name"] = "Track Shoes";
doc3["brand"] = "Nike";
doc3["tags"] = {"twitter", "starred"};
doc3["points"] = 10;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
std::vector<sort_by> sort_fields = { sort_by("_text_match", "DESC"), sort_by("points", "DESC") };
nlohmann::json override_filter1_json = {
{"id", "dynamic-filter-1"},
{
"rule", {
{"query", "{brand}"},
{"match", override_t::MATCH_CONTAINS}
}
},
{"remove_matched_tokens", true},
{"stop_processing", false},
{"filter_by", "tags: twitter"},
{"metadata", {{"foo", "bar"}}},
};
override_t override_filter1;
auto op = override_t::parse(override_filter1_json, "dynamic-filter-1", override_filter1);
ASSERT_TRUE(op.ok());
coll1->add_override(override_filter1);
ASSERT_EQ("bar", override_filter1.to_json()["metadata"]["foo"].get<std::string>());
nlohmann::json override_filter2_json = {
{"id", "dynamic-filter-2"},
{
"rule", {
{"query", "{tags}"},
{"match", override_t::MATCH_CONTAINS}
}
},
{"remove_matched_tokens", true},
{"stop_processing", false},
{"filter_by", "tags: starred"}
};
override_t override_filter2;
op = override_t::parse(override_filter2_json, "dynamic-filter-2", override_filter2);
ASSERT_TRUE(op.ok());
coll1->add_override(override_filter2);
auto results = coll1->search("starred nike", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("bar", results["metadata"]["foo"].get<std::string>());
// when stop_processing is enabled (default is true)
override_filter1_json.erase("stop_processing");
override_filter2_json.erase("stop_processing");
override_t override_filter1_reset;
op = override_t::parse(override_filter1_json, "dynamic-filter-1", override_filter1_reset);
ASSERT_TRUE(op.ok());
override_t override_filter2_reset;
op = override_t::parse(override_filter2_json, "dynamic-filter-2", override_filter2_reset);
ASSERT_TRUE(op.ok());
coll1->add_override(override_filter1_reset);
coll1->add_override(override_filter2_reset);
results = coll1->search("starred nike", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(0, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOverrideTest, SynonymsAppliedToOverridenQuery) {
Collection *coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("price", field_types::FLOAT, true),
field("points", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "Amazing Shoes";
doc1["price"] = 399.99;
doc1["points"] = 3;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "White Sneakers";
doc2["price"] = 149.99;
doc2["points"] = 5;
nlohmann::json doc3;
doc3["id"] = "2";
doc3["name"] = "Red Sneakers";
doc3["price"] = 49.99;
doc3["points"] = 5;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
std::vector<sort_by> sort_fields = { sort_by("_text_match", "DESC"), sort_by("points", "DESC") };
nlohmann::json override_json_contains = {
{"id", "static-filters"},
{
"rule", {
{"query", "expensive"},
{"match", override_t::MATCH_CONTAINS}
}
},
{"remove_matched_tokens", true},
{"filter_by", "price:> 100"}
};
override_t override_contains;
auto op = override_t::parse(override_json_contains, "static-filters", override_contains);
ASSERT_TRUE(op.ok());
coll1->add_override(override_contains);
coll1->add_synonym(R"({"id": "", "root": "shoes", "synonyms": ["sneakers"]})"_json);
auto results = coll1->search("expensive shoes", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOverrideTest, StaticFilterWithAndWithoutQueryStringMutation) {
Collection *coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("price", field_types::FLOAT, true),
field("points", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "Apple iPad";
doc1["price"] = 399.99;
doc1["points"] = 3;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "Samsung Charger";
doc2["price"] = 49.99;
doc2["points"] = 5;
nlohmann::json doc3;
doc3["id"] = "2";
doc3["name"] = "Samsung Phone";
doc3["price"] = 249.99;
doc3["points"] = 5;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
std::vector<sort_by> sort_fields = { sort_by("_text_match", "DESC"), sort_by("points", "DESC") };
nlohmann::json override_json_contains = {
{"id", "static-filters"},
{
"rule", {
{"query", "apple"},
{"match", override_t::MATCH_CONTAINS}
}
},
{"remove_matched_tokens", false},
{"filter_by", "price:> 200"}
};
override_t override_contains;
auto op = override_t::parse(override_json_contains, "static-filters", override_contains);
ASSERT_TRUE(op.ok());
coll1->add_override(override_contains);
// first without query string mutation
auto results = coll1->search("apple", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
// now, with query string mutation
override_json_contains = {
{"id", "static-filters"},
{
"rule", {
{"query", "apple"},
{"match", override_t::MATCH_CONTAINS}
}
},
{"remove_matched_tokens", true},
{"filter_by", "price:> 200"}
};
op = override_t::parse(override_json_contains, "static-filters", override_contains);
ASSERT_TRUE(op.ok());
coll1->add_override(override_contains);
results = coll1->search("apple", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOverrideTest, DynamicFilteringWithJustRemoveTokens) {
Collection* coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("category", field_types::STRING, true),
field("brand", field_types::STRING, true),
field("points", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "Amazing Shoes";
doc1["category"] = "shoes";
doc1["brand"] = "Nike";
doc1["points"] = 3;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "Track Gym";
doc2["category"] = "shoes";
doc2["brand"] = "Adidas";
doc2["points"] = 5;
nlohmann::json doc3;
doc3["id"] = "2";
doc3["name"] = "Running Shoes";
doc3["category"] = "sports";
doc3["brand"] = "Nike";
doc3["points"] = 5;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
std::vector<sort_by> sort_fields = {sort_by("_text_match", "DESC"), sort_by("points", "DESC")};
auto results = coll1->search("all", {"name", "category", "brand"}, "",
{}, sort_fields, {0, 0, 0}, 10).get();
ASSERT_EQ(0, results["hits"].size());
// with override, we return all records
nlohmann::json override_json = {
{"id", "match-all"},
{
"rule", {
{"query", "all"},
{"match", override_t::MATCH_EXACT}
}
},
{"remove_matched_tokens", true}
};
override_t override;
auto op = override_t::parse(override_json, "match-all", override);
ASSERT_TRUE(op.ok());
coll1->add_override(override);
results = coll1->search("all", {"name", "category", "brand"}, "",
{}, sort_fields, {0, 0, 0}, 10).get();
ASSERT_EQ(3, results["hits"].size());
results = coll1->search("really amazing shoes", {"name", "category", "brand"}, "",
{}, sort_fields, {0, 0, 0}, 0).get();
ASSERT_EQ(0, results["hits"].size());
// with contains
override_json = {
{"id", "remove-some-tokens"},
{
"rule", {
{"query", "really"},
{"match", override_t::MATCH_CONTAINS}
}
},
{"remove_matched_tokens", true}
};
override_t override2;
op = override_t::parse(override_json, "remove-some-tokens", override2);
ASSERT_TRUE(op.ok());
coll1->add_override(override2);
results = coll1->search("really amazing shoes", {"name", "category", "brand"}, "",
{}, sort_fields, {0, 0, 0}, 1).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOverrideTest, StaticSorting) {
Collection *coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("price", field_types::FLOAT, true),
field("points", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "Amazing Shoes";
doc1["price"] = 399.99;
doc1["points"] = 3;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "Track Shoes";
doc2["price"] = 49.99;
doc2["points"] = 5;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
std::vector<sort_by> sort_fields = { sort_by("_text_match", "DESC"), sort_by("points", "DESC") };
nlohmann::json override_json_contains = {
{"id", "static-sort"},
{
"rule", {
{"query", "shoes"},
{"match", override_t::MATCH_CONTAINS}
}
},
{"remove_matched_tokens", true},
{"sort_by", "price:desc"}
};
override_t override_contains;
auto op = override_t::parse(override_json_contains, "static-sort", override_contains);
ASSERT_TRUE(op.ok());
// without override kicking in
auto results = coll1->search("shoes", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
// now add override
coll1->add_override(override_contains);
results = coll1->search("shoes", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 0).get();
// with override we will sort on price
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][1]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOverrideTest, DynamicFilteringWithPartialTokenMatch) {
// when query tokens do not match placeholder field value exactly, don't do filtering
Collection* coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("category", field_types::STRING, true),};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields).get();
}
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "Amazing Shoes";
doc1["category"] = "Running Shoes";
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "Magic Lamp";
doc2["category"] = "Shoo";
nlohmann::json doc3;
doc3["id"] = "2";
doc3["name"] = "Shox and Us";
doc3["category"] = "Socks";
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
std::vector<sort_by> sort_fields = {sort_by("_text_match", "DESC")};
auto results = coll1->search("shoes", {"name"}, "",
{}, sort_fields, {0}, 10).get();
ASSERT_EQ(1, results["hits"].size());
// with override, we return all records
nlohmann::json override_json = {
{"id", "dynamic-filter"},
{
"rule", {
{"query", "{ category }"},
{"match", override_t::MATCH_EXACT}
}
},
{"filter_by", "category:= {category}"},
{"remove_matched_tokens", true}
};
override_t override;
auto op = override_t::parse(override_json, "dynamic-filter", override);
ASSERT_TRUE(op.ok());
coll1->add_override(override);
results = coll1->search("shoes", {"name"}, "",
{}, sort_fields, {0}, 10).get();
ASSERT_EQ(1, results["hits"].size());
results = coll1->search("shox", {"name"}, "",
{}, sort_fields, {0}, 10).get();
ASSERT_EQ(1, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOverrideTest, OverrideWithSymbolsToIndex) {
Collection* coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("category", field_types::STRING, true),};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "", static_cast<uint64_t>(std::time(nullptr)),
"", {"-"}, {}).get();
}
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "Non-Stick";
doc1["category"] = "Cookware";
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "NonStick";
doc2["category"] = "Kitchen";
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
std::vector<sort_by> sort_fields = {sort_by("_text_match", "DESC")};
auto results = coll1->search("non-stick", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10).get();
ASSERT_EQ(2, results["hits"].size());
// with override, we return all records
nlohmann::json override_json = {
{"id", "ov-1"},
{
"rule", {
{"query", "non-stick"},
{"match", override_t::MATCH_EXACT}
}
},
{"filter_by", "category:= Cookware"}
};
override_t override;
auto op = override_t::parse(override_json, "ov-1", override, "", {'-'}, {});
ASSERT_TRUE(op.ok());
coll1->add_override(override);
results = coll1->search("non-stick", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
results = coll1->search("nonstick", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10).get();
ASSERT_EQ(2, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOverrideTest, OverrideWithTags) {
/*
If override1 is tagged tagA, tagB, override2 is tagged tagA, override3 is tagged with nothing:
Then if a search is tagged with tagA, we only consider overrides that contain tagA (override1 and override2)
with the usual logic - in alphabetic order of override name and then process both if stop rule processing is false.
If a search is tagged with tagA and tagB, we evaluate any rules that contain tagA and tagB first,
then tag A or tag B, but not overrides that contain no tags. Within each group, we evaluate in alphabetic order
and process multiple if stop rule processing is false
If a search has no tags, then we only consider rules that have no tags.
*/
Collection* coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("category", field_types::STRING, true),};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "").get();
}
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "queryA";
doc1["category"] = "kids";
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "queryA";
doc2["category"] = "kitchen";
nlohmann::json doc3;
doc3["id"] = "2";
doc3["name"] = "Clay Toy";
doc3["category"] = "home";
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
std::vector<sort_by> sort_fields = {sort_by("_text_match", "DESC")};
auto results = coll1->search("Clay", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10).get();
ASSERT_EQ(1, results["hits"].size());
// create overrides containing 2 tags, single tag and no tags:
nlohmann::json override_json1 = R"({
"id": "ov-1",
"rule": {
"query": "queryA",
"match": "exact",
"tags": ["alpha", "beta"]
},
"filter_by": "category: kids"
})"_json;
override_t override1;
auto op = override_t::parse(override_json1, "ov-1", override1);
ASSERT_TRUE(op.ok());
coll1->add_override(override1);
// single tag
nlohmann::json override_json2 = R"({
"id": "ov-2",
"rule": {
"query": "queryA",
"match": "exact",
"tags": ["alpha"]
},
"filter_by": "category: kitchen"
})"_json;
override_t override2;
override_t::parse(override_json2, "ov-2", override2);
ASSERT_TRUE(op.ok());
coll1->add_override(override2);
// no tag
nlohmann::json override_json3 = R"({
"id": "ov-3",
"rule": {
"query": "queryA",
"match": "exact"
},
"filter_by": "category: home"
})"_json;
override_t override3;
op = override_t::parse(override_json3, "ov-3", override3);
ASSERT_TRUE(op.ok());
coll1->add_override(override3);
// when tag doesn't match any override, no results will be found
results = coll1->search("queryA", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 10000,
4, 7, fallback, 4, {off}, 100, 100, 2, 2, false, "", true, 0, max_score, 100, 0,
0, "exhaustive", 30000, 2, "", {}, {}, "right_to_left",
true, true, false, "", "", "foo").get();
ASSERT_EQ(2, results["hits"].size());
// when multiple overrides match a given tag, return first matching record
results = coll1->search("queryA", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 10000,
4, 7, fallback, 4, {off}, 100, 100, 2, 2, false, "", true, 0, max_score, 100, 0,
0, "exhaustive", 30000, 2, "", {}, {}, "right_to_left",
true, true, false, "", "", "alpha").get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
// single tag matching rule with multiple tags
results = coll1->search("queryA", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 10000,
4, 7, fallback, 4, {off}, 100, 100, 2, 2, false, "", true, 0, max_score, 100, 0,
0, "exhaustive", 30000, 2, "", {}, {}, "right_to_left",
true, true, false, "", "", "beta").get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
// when multiple tags are passed, only consider rule with both tags
results = coll1->search("queryA", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 10000,
4, 7, fallback, 4, {off}, 100, 100, 2, 2, false, "", true, 0, max_score, 100, 0,
0, "exhaustive", 30000, 2, "", {}, {}, "right_to_left",
true, true, false, "", "", "alpha,beta").get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
// query with no tags should only trigger override with no tags
results = coll1->search("queryA", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 10000,
4, 7, fallback, 4, {off}, 100, 100, 2, 2, false, "", true, 0, max_score, 100, 0,
0, "exhaustive", 30000, 2, "", {}, {}, "right_to_left",
true, true, false, "", "", "").get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOverrideTest, OverrideWithTagsPartialMatch) {
Collection* coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("category", field_types::STRING, true),};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "").get();
}
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "queryA";
doc1["category"] = "kids";
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "queryA";
doc2["category"] = "kitchen";
nlohmann::json doc3;
doc3["id"] = "2";
doc3["name"] = "Clay Toy";
doc3["category"] = "home";
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
std::vector<sort_by> sort_fields = {sort_by("_text_match", "DESC")};
nlohmann::json override_json1 = R"({
"id": "ov-1",
"rule": {
"query": "queryA",
"match": "exact",
"tags": ["alpha", "beta"]
},
"filter_by": "category: kids"
})"_json;
override_t override1;
auto op = override_t::parse(override_json1, "ov-1", override1);
ASSERT_TRUE(op.ok());
coll1->add_override(override1);
//
nlohmann::json override_json2 = R"({
"id": "ov-2",
"rule": {
"query": "queryB",
"match": "exact",
"tags": ["alpha"]
},
"filter_by": "category: kitchen"
})"_json;
override_t override2;
override_t::parse(override_json2, "ov-2", override2);
ASSERT_TRUE(op.ok());
coll1->add_override(override2);
// when only one of the two tags are found, apply that rule
auto results = coll1->search("queryB", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 10000,
4, 7, fallback, 4, {off}, 100, 100, 2, 2, false, "", true, 0, max_score, 100, 0,
0, "exhaustive", 30000, 2, "", {}, {}, "right_to_left",
true, true, false, "", "", "alpha,zeta").get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOverrideTest, OverrideWithTagsWithoutStopProcessing) {
Collection* coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("category", field_types::STRING_ARRAY, true),};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "").get();
}
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "queryA";
doc1["category"] = {"kids"};
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "queryA";
doc2["category"] = {"kids", "kitchen"};
nlohmann::json doc3;
doc3["id"] = "2";
doc3["name"] = "Clay Toy";
doc3["category"] = {"home"};
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
std::vector<sort_by> sort_fields = {sort_by("_text_match", "DESC")};
nlohmann::json override_json1 = R"({
"id": "ov-1",
"rule": {
"query": "queryA",
"match": "exact",
"tags": ["alpha", "beta"]
},
"stop_processing": false,
"remove_matched_tokens": false,
"filter_by": "category: kids"
})"_json;
override_t override1;
auto op = override_t::parse(override_json1, "ov-1", override1);
ASSERT_TRUE(op.ok());
coll1->add_override(override1);
//
nlohmann::json override_json2 = R"({
"id": "ov-2",
"rule": {
"query": "queryA",
"match": "exact",
"tags": ["alpha"]
},
"stop_processing": false,
"remove_matched_tokens": false,
"filter_by": "category: kitchen",
"metadata": {"foo": "bar"}
})"_json;
override_t override2;
override_t::parse(override_json2, "ov-2", override2);
ASSERT_TRUE(op.ok());
coll1->add_override(override2);
//
nlohmann::json override_json3 = R"({
"id": "ov-3",
"rule": {
"query": "queryA",
"match": "exact"
},
"stop_processing": false,
"remove_matched_tokens": false,
"filter_by": "category: home"
})"_json;
override_t override3;
op = override_t::parse(override_json3, "ov-3", override3);
ASSERT_TRUE(op.ok());
coll1->add_override(override3);
auto results = coll1->search("queryA", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 10000,
4, 7, fallback, 4, {off}, 100, 100, 2, 2, false, "", true, 0, max_score, 100, 0,
0, "exhaustive", 30000, 2, "", {}, {}, "right_to_left",
true, true, false, "", "", "alpha").get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("bar", results["metadata"]["foo"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOverrideTest, WildcardTagRuleThatMatchesAllQueries) {
Collection* coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("category", field_types::STRING, true),};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "").get();
}
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "queryA";
doc1["category"] = "kids";
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "queryA";
doc2["category"] = "kitchen";
nlohmann::json doc3;
doc3["id"] = "2";
doc3["name"] = "Clay Toy";
doc3["category"] = "home";
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
std::vector<sort_by> sort_fields = {sort_by("_text_match", "DESC")};
nlohmann::json override_json1 = R"({
"id": "ov-1",
"rule": {
},
"filter_by": "category: kids"
})"_json;
override_t override1;
auto op = override_t::parse(override_json1, "ov-1", override1);
ASSERT_FALSE(op.ok());
ASSERT_EQ("The `rule` definition must contain either a `tags` or a `query` and `match`.", op.error());
override_json1 = R"({
"id": "ov-1",
"rule": {
"tags": ["*"]
},
"filter_by": "category: kids"
})"_json;
op = override_t::parse(override_json1, "ov-1", override1);
ASSERT_TRUE(op.ok());
coll1->add_override(override1);
// should match all search queries, even without passing any tags
std::string override_tags = "";
auto results = coll1->search("queryB", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 10000,
4, 7, fallback, 4, {off}, 100, 100, 2, 2, false, "", true, 0, max_score, 100, 0,
0, "exhaustive", 30000, 2, "", {}, {}, "right_to_left",
true, true, false, "", "", override_tags).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
results = coll1->search("queryA", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 10000,
4, 7, fallback, 4, {off}, 100, 100, 2, 2, false, "", true, 0, max_score, 100, 0,
0, "exhaustive", 30000, 2, "", {}, {}, "right_to_left",
true, true, false, "", "", override_tags).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
// includes instead of filter_by
coll1->remove_override("ov-1");
auto override_json2 = R"({
"id": "ov-1",
"rule": {
"tags": ["*"]
},
"includes": [
{"id": "1", "position": 1}
]
})"_json;
override_t override2;
op = override_t::parse(override_json2, "ov-2", override2);
ASSERT_TRUE(op.ok());
coll1->add_override(override2);
results = coll1->search("foobar", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 10000,
4, 7, fallback, 4, {off}, 100, 100, 2, 2, false, "", true, 0, max_score, 100, 0,
0, "exhaustive", 30000, 2, "", {}, {}, "right_to_left",
true, true, false, "", "", override_tags).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOverrideTest, TagsOnlyRule) {
Collection* coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("category", field_types::STRING_ARRAY, true),};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "").get();
}
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "queryA";
doc1["category"] = {"kids"};
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "queryA";
doc2["category"] = {"kitchen"};
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
std::vector<sort_by> sort_fields = {sort_by("_text_match", "DESC")};
override_t override1;
auto override_json1 = R"({
"id": "ov-1",
"rule": {
"tags": ["listing"]
},
"filter_by": "category: kids"
})"_json;
auto op = override_t::parse(override_json1, "ov-1", override1);
ASSERT_TRUE(op.ok());
coll1->add_override(override1);
auto results = coll1->search("queryA", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 10000,
4, 7, fallback, 4, {off}, 100, 100, 2, 2, false, "", true, 0, max_score, 100, 0,
0, "exhaustive", 30000, 2, "", {}, {}, "right_to_left",
true, true, false, "", "", "listing").get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
// with include rule
override_t override2;
auto override_json2 = R"({
"id": "ov-2",
"rule": {
"tags": ["listing2"]
},
"includes": [
{"id": "1", "position": 1}
]
})"_json;
op = override_t::parse(override_json2, "ov-2", override2);
ASSERT_TRUE(op.ok());
coll1->add_override(override2);
results = coll1->search("foobar", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 10000,
4, 7, fallback, 4, {off}, 100, 100, 2, 2, false, "", true, 0, max_score, 100, 0,
0, "exhaustive", 30000, 2, "", {}, {}, "right_to_left",
true, true, false, "", "", "listing2").get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
// no override tag passed: rule should not match
std::string override_tag = "";
results = coll1->search("foobar", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 10000,
4, 7, fallback, 4, {off}, 100, 100, 2, 2, false, "", true, 0, max_score, 100, 0,
0, "exhaustive", 30000, 2, "", {}, {}, "right_to_left",
true, true, false, "", "", override_tag).get();
ASSERT_EQ(0, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOverrideTest, MetadataValidation) {
Collection* coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("category", field_types::STRING_ARRAY, true),};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "").get();
}
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "queryA";
doc1["category"] = {"kids"};
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
std::vector<sort_by> sort_fields = {sort_by("_text_match", "DESC")};
nlohmann::json override_json1 = R"({
"id": "ov-1",
"rule": {
"query": "queryA",
"match": "exact"
},
"filter_by": "category: kids",
"metadata": "foo"
})"_json;
override_t override1;
auto op = override_t::parse(override_json1, "ov-1", override1);
ASSERT_FALSE(op.ok());
ASSERT_EQ("The `metadata` must be a JSON object.", op.error());
// don't allow empty rule without any action
override_json1 = R"({
"id": "ov-1",
"rule": {
"query": "queryA",
"match": "exact"
}
})"_json;
override_t override2;
op = override_t::parse(override_json1, "ov-2", override2);
ASSERT_FALSE(op.ok());
ASSERT_EQ("Must contain one of: `includes`, `excludes`, `metadata`, `filter_by`, `sort_by`, "
"`remove_matched_tokens`, `replace_query`.", op.error());
// should allow only metadata to be present as action
override_json1 = R"({
"id": "ov-1",
"rule": {
"query": "queryA",
"match": "exact"
},
"metadata": {"foo": "bar"}
})"_json;
override_t override3;
op = override_t::parse(override_json1, "ov-3", override3);
ASSERT_TRUE(op.ok());
coll1->add_override(override3);
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOverrideTest, WildcardSearchOverride) {
Collection* coll1;
std::vector<field> fields = {field("name", field_types::STRING, false),
field("category", field_types::STRING, true),};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "").get();
}
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "queryA";
doc1["category"] = "kids";
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "queryA";
doc2["category"] = "kitchen";
nlohmann::json doc3;
doc3["id"] = "2";
doc3["name"] = "Clay Toy";
doc3["category"] = "home";
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
std::vector<sort_by> sort_fields = {sort_by("_text_match", "DESC")};
nlohmann::json override_json1 = R"({
"id": "ov-1",
"rule": {
"query": "*",
"match": "exact"
},
"filter_by": "category: kids"
})"_json;
override_t override1;
auto op = override_t::parse(override_json1, "ov-1", override1);
ASSERT_TRUE(op.ok());
coll1->add_override(override1);
std::string override_tags = "";
auto results = coll1->search("*", {}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 10000,
4, 7, fallback, 4, {off}, 100, 100, 2, 2, false, "", true, 0, max_score, 100, 0,
0, "exhaustive", 30000, 2, "", {}, {}, "right_to_left",
true, true, false, "", "", override_tags).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
// includes instead of filter_by
coll1->remove_override("ov-1");
override_t override2;
auto override_json2 = R"({
"id": "ov-2",
"rule": {
"query": "*",
"match": "exact"
},
"includes": [
{"id": "1", "position": 1}
]
})"_json;
op = override_t::parse(override_json2, "ov-2", override2);
ASSERT_TRUE(op.ok());
coll1->add_override(override2);
results = coll1->search("*", {}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 10000,
4, 7, fallback, 4, {off}, 100, 100, 2, 2, false, "", true, 0, max_score, 100, 0,
0, "exhaustive", 30000, 2, "", {}, {}, "right_to_left",
true, true, false, "", "", override_tags).get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionOverrideTest, OverridesPagination) {
Collection *coll2;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false)};
coll2 = collectionManager.get_collection("coll2").get();
if(coll2 == nullptr) {
coll2 = collectionManager.create_collection("coll2", 1, fields, "points").get();
}
for(int i = 0; i < 5; ++i) {
nlohmann::json override_json = {
{"id", "override"},
{
"rule", {
{"query", "not-found"},
{"match", override_t::MATCH_EXACT}
}
},
{"metadata", { {"foo", "bar"}}},
};
override_json["id"] = override_json["id"].get<std::string>() + std::to_string(i + 1);
override_t override;
override_t::parse(override_json, "", override);
coll2->add_override(override);
}
uint32_t limit = 0, offset = 0, i = 0;
//limit collections by 2
limit=2;
auto override_op = coll2->get_overrides(limit);
auto override_map = override_op.get();
ASSERT_EQ(2, override_map.size());
i=offset;
for(const auto &kv : override_map) {
ASSERT_EQ("override" + std::to_string(i+1), kv.second->id);
++i;
}
//get 2 collection from offset 3
offset=3;
override_op = coll2->get_overrides(limit, offset);
override_map = override_op.get();
ASSERT_EQ(2, override_map.size());
i=offset;
for(const auto &kv : override_map) {
ASSERT_EQ("override" + std::to_string(i+1), kv.second->id);
++i;
}
//get all collection except first
offset=1; limit=0;
override_op = coll2->get_overrides(limit, offset);
override_map = override_op.get();
ASSERT_EQ(4, override_map.size());
i=offset;
for(const auto &kv : override_map) {
ASSERT_EQ("override" + std::to_string(i+1), kv.second->id);
++i;
}
//get last collection
offset=4, limit=1;
override_op = coll2->get_overrides(limit, offset);
override_map = override_op.get();
ASSERT_EQ(1, override_map.size());
ASSERT_EQ("override5", override_map.begin()->second->id);
//if limit is greater than number of collection then return all from offset
offset=0; limit=8;
override_op = coll2->get_overrides(limit, offset);
override_map = override_op.get();
ASSERT_EQ(5, override_map.size());
i=offset;
for(const auto &kv : override_map) {
ASSERT_EQ("override" + std::to_string(i+1), kv.second->id);
++i;
}
offset=3; limit=4;
override_op = coll2->get_overrides(limit, offset);
override_map = override_op.get();
ASSERT_EQ(2, override_map.size());
i=offset;
for(const auto &kv : override_map) {
ASSERT_EQ("override" + std::to_string(i+1), kv.second->id);
++i;
}
//invalid offset
offset=6; limit=0;
override_op = coll2->get_overrides(limit, offset);
ASSERT_FALSE(override_op.ok());
ASSERT_EQ("Invalid offset param.", override_op.error());
}
TEST_F(CollectionOverrideTest, RetrieveOverideByID) {
Collection *coll2;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false)};
coll2 = collectionManager.get_collection("coll2").get();
if (coll2 == nullptr) {
coll2 = collectionManager.create_collection("coll2", 1, fields, "points").get();
}
nlohmann::json override_json = {
{"id", "override"},
{
"rule", {
{"query", "not-found"},
{"match", override_t::MATCH_EXACT}
}
},
{"metadata", { {"foo", "bar"}}},
};
override_json["id"] = override_json["id"].get<std::string>() + "1";
override_t override;
override_t::parse(override_json, "", override);
coll2->add_override(override);
auto op = coll2->get_override("override1");
ASSERT_TRUE(op.ok());
}
TEST_F(CollectionOverrideTest, FilterPinnedHits) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false)};
Collection* coll3 = collectionManager.get_collection("coll3").get();
if(coll3 == nullptr) {
coll3 = collectionManager.create_collection("coll3", 1, fields, "points").get();
}
nlohmann::json doc;
doc["title"] = "Snapdragon 7 gen 2023";
doc["points"] = 100;
ASSERT_TRUE(coll3->add(doc.dump()).ok());
doc["title"] = "Snapdragon 732G 2023";
doc["points"] = 91;
ASSERT_TRUE(coll3->add(doc.dump()).ok());
doc["title"] = "Snapdragon 4 gen 2023";
doc["points"] = 65;
ASSERT_TRUE(coll3->add(doc.dump()).ok());
doc["title"] = "Mediatek Dimensity 720G 2022";
doc["points"] = 87;
ASSERT_TRUE(coll3->add(doc.dump()).ok());
doc["title"] = "Mediatek Dimensity 470G 2023";
doc["points"] = 63;
ASSERT_TRUE(coll3->add(doc.dump()).ok());
auto pinned_hits = "3:1, 4:2";
bool filter_curated_hits = false;
auto results = coll3->search("2023", {"title"}, "title: snapdragon", {}, {},
{0}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10,
"", 30, 5, "",
10, pinned_hits, {}, {}, 3,
"<mark>", "</mark>", {}, UINT_MAX,
true, false, true, "",
false, 6000 * 1000, 4, 7,
fallback, 4, {off}, INT16_MAX,
INT16_MAX, 2, filter_curated_hits ).get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ("3", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("4", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][2]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][3]["document"]["id"].get<std::string>());
ASSERT_EQ("2", results["hits"][4]["document"]["id"].get<std::string>());
// when filter does not match, we should return only curated results
results = coll3->search("2023", {"title"}, "title: foobarbaz", {}, {},
{0}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10,
"", 30, 5, "",
10, pinned_hits, {}, {}, 3,
"<mark>", "</mark>", {}, UINT_MAX,
true, false, true, "",
false, 6000 * 1000, 4, 7,
fallback, 4, {off}, INT16_MAX,
INT16_MAX, 2, filter_curated_hits ).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("3", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("4", results["hits"][1]["document"]["id"].get<std::string>());
// Filter does not match but with filter_curated_hits = true
filter_curated_hits = true;
results = coll3->search("2023", {"title"}, "title: foobarbaz", {}, {},
{0}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10,
"", 30, 5, "",
10, pinned_hits, {}, {}, 3,
"<mark>", "</mark>", {}, UINT_MAX,
true, false, true, "",
false, 6000 * 1000, 4, 7,
fallback, 4, {off}, INT16_MAX,
INT16_MAX, 2, filter_curated_hits ).get();
ASSERT_EQ(0, results["hits"].size());
// Filter should apply on curated results
results = coll3->search("2023", {"title"}, "points: >70", {}, {},
{0}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10,
"", 30, 5, "",
10, pinned_hits, {}, {}, 3,
"<mark>", "</mark>", {}, UINT_MAX,
true, false, true, "",
false, 6000 * 1000, 4, 7,
fallback, 4, {off}, INT16_MAX,
INT16_MAX, 2, filter_curated_hits ).get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ("3", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][2]["document"]["id"].get<std::string>());
results = coll3->search("2023", {"title"}, "title: snapdragon", {}, {},
{0}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10,
"", 30, 5, "",
10, pinned_hits, {}, {}, 3,
"<mark>", "</mark>", {}, UINT_MAX,
true, false, true, "",
false, 6000 * 1000, 4, 7,
fallback, 4, {off}, INT16_MAX,
INT16_MAX, 2, filter_curated_hits).get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("2", results["hits"][2]["document"]["id"].get<std::string>());
//partial filter out ids, remaining will take higher precedence than their assignment
results = coll3->search("snapdragon", {"title"}, "title: 2023", {}, {},
{0}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10,
"", 30, 5, "",
10, pinned_hits, {}, {}, 3,
"<mark>", "</mark>", {}, UINT_MAX,
true, false, true, "",
false, 6000 * 1000, 4, 7,
fallback, 4, {off}, INT16_MAX,
INT16_MAX, 2, filter_curated_hits).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("4", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][2]["document"]["id"].get<std::string>());
ASSERT_EQ("2", results["hits"][3]["document"]["id"].get<std::string>());
}
TEST_F(CollectionOverrideTest, AvoidTypoMatchingWhenOverlapWithCuratedData) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false)};
Collection* coll3 = collectionManager.get_collection("coll3").get();
if (coll3 == nullptr) {
coll3 = collectionManager.create_collection("coll3", 1, fields, "points").get();
}
nlohmann::json doc;
doc["title"] = "Snapdragon 7 gen 2023";
doc["points"] = 100;
ASSERT_TRUE(coll3->add(doc.dump()).ok());
doc["title"] = "Snapdragon 732G 2023";
doc["points"] = 91;
ASSERT_TRUE(coll3->add(doc.dump()).ok());
doc["title"] = "Mediatak 4 gen 2023";
doc["points"] = 65;
ASSERT_TRUE(coll3->add(doc.dump()).ok());
doc["title"] = "Mediatek Dimensity 720G 2022";
doc["points"] = 87;
ASSERT_TRUE(coll3->add(doc.dump()).ok());
doc["title"] = "Mediatek Dimensity 470G 2023";
doc["points"] = 63;
ASSERT_TRUE(coll3->add(doc.dump()).ok());
auto pinned_hits = "3:1, 4:2";
auto results = coll3->search("Mediatek", {"title"}, "", {}, {},
{2}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10,
"", 30, 5, "",
1, pinned_hits, {}, {}, 3,
"<mark>", "</mark>", {}, UINT_MAX,
true, false, true, "",
false, 6000 * 1000, 4, 7,
fallback, 4, {off}, INT16_MAX,
INT16_MAX, 2, false).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("3", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("4", results["hits"][1]["document"]["id"].get<std::string>());
// only typo match found: we should return both curated and typo hits
results = coll3->search("snapdragan", {"title"}, "", {}, {},
{2}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10,
"", 30, 5, "",
10, pinned_hits, {}, {}, 3,
"<mark>", "</mark>", {}, UINT_MAX,
true, false, true, "",
false, 6000 * 1000, 4, 7,
fallback, 4, {off}, INT16_MAX,
INT16_MAX, 2, false).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("3", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("4", results["hits"][1]["document"]["id"].get<std::string>());
}
TEST_F(CollectionOverrideTest, PinnedHitsAndFilteredFaceting) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "someprop", "index": true, "type": "string" },
{"name": "somefacet", "index": true, "type": "string", "facet": true },
{"name": "someotherfacet", "index": true, "type": "string", "facet": true }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
nlohmann::json doc1 = R"({"id": "4711", "someprop": "doc 4711", "somefacet": "sfa", "someotherfacet": "sofa"})"_json;
nlohmann::json doc2 = R"({"id": "4712", "someprop": "doc 4712", "somefacet": "sfb", "someotherfacet": "sofb"})"_json;
nlohmann::json doc3 = R"({"id": "4713", "someprop": "doc 4713", "somefacet": "sfc", "someotherfacet": "sofc"})"_json;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
auto pinned_hits = "4712:1";
bool filter_curated_hits = true;
auto results = coll1->search("*", {}, "somefacet:=sfa", {"somefacet"}, {},
{2}, 50, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10,
"", 30, 5, "",
1, pinned_hits, {}, {}, 3,
"<mark>", "</mark>", {}, UINT_MAX,
true, false, true, "",
false, 6000 * 1000, 4, 7,
fallback, 4, {off}, INT16_MAX,
INT16_MAX, 2, filter_curated_hits).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("4711", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("sfa", results["facet_counts"][0]["counts"][0]["value"].get<std::string>());
ASSERT_EQ(1, results["facet_counts"][0]["counts"][0]["count"].get<int>());
}
TEST_F(CollectionOverrideTest, OverridesWithSemanticSearch) {
auto schema_json = R"({
"name": "products",
"fields":[
{
"name": "product_name",
"type": "string"
},
{
"name": "embedding",
"type": "float[]",
"embed": {
"from": [
"product_name"
],
"model_config": {
"model_name": "ts/clip-vit-b-p32"
}
}
}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto coll_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(coll_op.ok());
auto coll = coll_op.get();
std::vector<std::string> products = {"Cell Phone", "Laptop", "Desktop", "Printer", "Keyboard", "Monitor", "Mouse"};
nlohmann::json doc;
for (auto product: products) {
doc["product_name"] = product;
ASSERT_TRUE(coll->add(doc.dump()).ok());
}
auto results = coll->search("phone", {"embedding"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
{"embedding"}).get();
ASSERT_EQ(results["found"], 7);
nlohmann::json override_json = {
{"id", "exclude-rule"},
{
"rule", {
{"query", "phone"},
{"match", override_t::MATCH_CONTAINS}
}
}
};
override_json["excludes"] = nlohmann::json::array();
override_json["excludes"][0] = nlohmann::json::object();
override_json["excludes"][0]["id"] = "0";
override_t override;
override_t::parse(override_json, "", override);
ASSERT_TRUE(coll->add_override(override).ok());
results = coll->search("phone", {"embedding"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
{"embedding"}).get();
ASSERT_EQ(results["found"], 6);
ASSERT_EQ(results["hits"][0]["document"]["id"], "4");
ASSERT_EQ(results["hits"][1]["document"]["id"], "6");
ASSERT_EQ(results["hits"][2]["document"]["id"], "1");
ASSERT_EQ(results["hits"][3]["document"]["id"], "5");
ASSERT_EQ(results["hits"][4]["document"]["id"], "2");
ASSERT_EQ(results["hits"][5]["document"]["id"], "3");
}
| 180,169
|
C++
|
.cpp
| 3,668
| 37.743457
| 128
| 0.521386
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,722
|
app_metrics_test.cpp
|
typesense_typesense/test/app_metrics_test.cpp
|
#include <gtest/gtest.h>
#include "app_metrics.h"
class AppMetricsTest : public ::testing::Test {
protected:
AppMetrics& metrics = AppMetrics::get_instance();
virtual void SetUp() {
}
virtual void TearDown() {
}
};
TEST_F(AppMetricsTest, StatefulRemoveDocs) {
metrics.increment_count("GET /collections", 1);
metrics.increment_count("GET /collections", 1);
metrics.increment_count("GET /operations/vote", 1);
metrics.increment_duration("GET /collections", 2);
metrics.increment_duration("GET /collections", 4);
metrics.increment_duration("GET /operations/vote", 5);
metrics.increment_count(AppMetrics::SEARCH_LABEL, 1);
metrics.increment_count(AppMetrics::SEARCH_LABEL, 1);
metrics.increment_duration(AppMetrics::SEARCH_LABEL, 16);
metrics.increment_duration(AppMetrics::SEARCH_LABEL, 12);
metrics.window_reset();
nlohmann::json result;
metrics.get("rps", "latency", result);
ASSERT_EQ(result["search_latency"].get<double>(), 14.0);
ASSERT_EQ(result["search_rps"].get<double>(), 0.2);
ASSERT_EQ(result["latency"]["GET /collections"].get<double>(), 3.0);
ASSERT_EQ(result["latency"]["GET /operations/vote"].get<double>(), 5.0);
ASSERT_EQ(result["rps"]["GET /collections"].get<double>(), 0.2);
ASSERT_EQ(result["rps"]["GET /operations/vote"].get<double>(), 0.1);
}
| 1,372
|
C++
|
.cpp
| 31
| 39.709677
| 76
| 0.69254
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,723
|
filter_test.cpp
|
typesense_typesense/test/filter_test.cpp
|
#include <gtest/gtest.h>
#include <string>
#include <vector>
#include <fstream>
#include <collection_manager.h>
#include <filter.h>
#include <posting.h>
#include <chrono>
#include "collection.h"
class FilterTest : public ::testing::Test {
protected:
Store *store;
CollectionManager & collectionManager = CollectionManager::get_instance();
std::atomic<bool> quit = false;
std::vector<std::string> query_fields;
std::vector<sort_by> sort_fields;
void setupCollection() {
std::string state_dir_path = "/tmp/typesense_test/collection_join";
LOG(INFO) << "Truncating and creating: " << state_dir_path;
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
store = new Store(state_dir_path);
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
}
virtual void SetUp() {
setupCollection();
}
virtual void TearDown() {
collectionManager.dispose();
delete store;
}
};
TEST_F(FilterTest, FilterTreeIterator) {
nlohmann::json schema =
R"({
"name": "Collection",
"fields": [
{"name": "name", "type": "string"},
{"name": "age", "type": "int32"},
{"name": "years", "type": "int32[]"},
{"name": "rating", "type": "float"},
{"name": "tags", "type": "string[]"}
]
})"_json;
Collection* coll = collectionManager.create_collection(schema).get();
std::ifstream infile(std::string(ROOT_DIR)+"test/numeric_array_documents.jsonl");
std::string json_line;
while (std::getline(infile, json_line)) {
auto add_op = coll->add(json_line);
ASSERT_TRUE(add_op.ok());
}
infile.close();
const std::string doc_id_prefix = std::to_string(coll->get_collection_id()) + "_" + Collection::DOC_ID_PREFIX + "_";
filter_node_t* filter_tree_root = nullptr;
auto const enable_lazy_evaluation = true;
auto iter_null_filter_tree_test = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_null_filter_tree_test.init_status().ok());
ASSERT_EQ(filter_result_iterator_t::invalid, iter_null_filter_tree_test.validity);
Option<bool> filter_op = filter::parse_filter_query("name: foo", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_no_match_test = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_no_match_test.init_status().ok());
ASSERT_EQ(filter_result_iterator_t::invalid, iter_no_match_test.validity);
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("name: [foo bar, baz]", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_no_match_multi_test = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_no_match_multi_test.init_status().ok());
ASSERT_EQ(filter_result_iterator_t::invalid, iter_no_match_multi_test.validity);
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("name: Jeremy", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_contains_test = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_contains_test.init_status().ok());
for (uint32_t i = 0; i < 5; i++) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_contains_test.validity);
ASSERT_EQ(i, iter_contains_test.seq_id);
iter_contains_test.next();
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_contains_test.validity);
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("name: [Jeremy, Howard, Richard]", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_contains_multi_test = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_contains_multi_test.init_status().ok());
for (uint32_t i = 0; i < 5; i++) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_contains_multi_test.validity);
ASSERT_EQ(i, iter_contains_multi_test.seq_id);
iter_contains_multi_test.next();
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_contains_multi_test.validity);
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("name:= Jeremy Howard", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_exact_match_1_test = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_exact_match_1_test.init_status().ok());
for (uint32_t i = 0; i < 5; i++) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_exact_match_1_test.validity);
ASSERT_EQ(i, iter_exact_match_1_test.seq_id);
iter_exact_match_1_test.next();
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_exact_match_1_test.validity);
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("tags:= PLATINUM", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_exact_match_2_test = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_exact_match_2_test.init_status().ok());
ASSERT_EQ(filter_result_iterator_t::invalid, iter_exact_match_2_test.validity);
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("tags:= [gold, silver]", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_exact_match_multi_test = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_exact_match_multi_test.init_status().ok());
std::vector<int> expected = {0, 2, 3, 4};
for (auto const& i : expected) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_exact_match_multi_test.validity);
ASSERT_EQ(i, iter_exact_match_multi_test.seq_id);
iter_exact_match_multi_test.next();
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_exact_match_multi_test.validity);
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("tags:!= gold", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_not_equals_test = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_not_equals_test.init_status().ok());
expected = {1, 3};
for (auto const& i : expected) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_not_equals_test.validity);
ASSERT_EQ(i, iter_not_equals_test.seq_id);
iter_not_equals_test.next();
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_not_equals_test.validity);
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("name: James || tags: bronze", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto doc =
R"({
"name": "James Rowdy",
"age": 36,
"years": [2005, 2022],
"rating": 6.03,
"tags": ["copper"]
})"_json;
auto add_op = coll->add(doc.dump());
ASSERT_TRUE(add_op.ok());
auto iter_or_test = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_or_test.init_status().ok());
expected = {2, 4, 5};
for (auto const& i : expected) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_or_test.validity);
ASSERT_EQ(i, iter_or_test.seq_id);
iter_or_test.next();
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_or_test.validity);
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("name: James || (tags: gold && tags: silver)", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_complex_filter_test = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_complex_filter_test.init_status().ok());
ASSERT_EQ(filter_result_iterator_t::valid, iter_complex_filter_test.validity);
ASSERT_EQ(0, iter_complex_filter_test.is_valid(3));
ASSERT_EQ(4, iter_complex_filter_test.seq_id);
expected = {4, 5};
for (auto const& i : expected) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_complex_filter_test.validity);
ASSERT_EQ(i, iter_complex_filter_test.seq_id);
iter_complex_filter_test.next();
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_complex_filter_test.validity);
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("name: James || (tags: gold && tags: [silver, bronze])", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_validate_ids_test1 = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_validate_ids_test1.init_status().ok());
std::vector<int> validate_ids = {0, 1, 2, 3, 4, 5, 6};
std::vector<int> seq_ids = {0, 2, 2, 4, 4, 5, 5};
expected = {1, 0, 1, 0, 1, 1, -1};
for (uint32_t i = 0; i < validate_ids.size(); i++) {
ASSERT_EQ(expected[i], iter_validate_ids_test1.is_valid(validate_ids[i]));
ASSERT_EQ(seq_ids[i], iter_validate_ids_test1.seq_id);
}
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("tags: platinum || name: James", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_validate_ids_test2 = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_validate_ids_test2.init_status().ok());
validate_ids = {0, 1, 2, 3, 4, 5, 6}, seq_ids = {1, 1, 5, 5, 5, 5, 5};
expected = {0, 1, 0, 0, 0, 1, -1};
for (uint32_t i = 0; i < validate_ids.size(); i++) {
ASSERT_EQ(expected[i], iter_validate_ids_test2.is_valid(validate_ids[i]));
ASSERT_EQ(seq_ids[i], iter_validate_ids_test2.seq_id);
}
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("tags: gold && rating: < 6", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_validate_ids_test3 = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_validate_ids_test3.init_status().ok());
ASSERT_TRUE(iter_validate_ids_test3._get_is_filter_result_initialized());
validate_ids = {0, 1, 2, 3, 4, 5, 6}, seq_ids = {0, 4, 4, 4, 4, 4, 4};
expected = {1, 0, 0, 0, 1, -1, -1};
for (uint32_t i = 0; i < validate_ids.size(); i++) {
ASSERT_EQ(expected[i], iter_validate_ids_test3.is_valid(validate_ids[i]));
ASSERT_EQ(seq_ids[i], iter_validate_ids_test3.seq_id);
}
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("tags: gold", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_compact_plist_contains_atleast_one_test1 = filter_result_iterator_t(coll->get_name(), coll->_get_index(),
filter_tree_root, enable_lazy_evaluation);
ASSERT_TRUE(iter_compact_plist_contains_atleast_one_test1.init_status().ok());
std::vector<uint32_t> ids = {1, 3, 5};
std::vector<uint32_t> offset_index = {0, 3, 6};
std::vector<uint32_t> offsets = {0, 3, 4, 0, 3, 4, 0, 3, 4};
compact_posting_list_t* c_list1 = compact_posting_list_t::create(3, &ids[0], &offset_index[0], 9, &offsets[0]);
ASSERT_FALSE(iter_compact_plist_contains_atleast_one_test1.contains_atleast_one(SET_COMPACT_POSTING(c_list1)));
free(c_list1);
auto iter_compact_plist_contains_atleast_one_test2 = filter_result_iterator_t(coll->get_name(), coll->_get_index(),
filter_tree_root, enable_lazy_evaluation);
ASSERT_TRUE(iter_compact_plist_contains_atleast_one_test2.init_status().ok());
ids = {1, 3, 4};
offset_index = {0, 3, 6};
offsets = {0, 3, 4, 0, 3, 4, 0, 3, 4};
compact_posting_list_t* c_list2 = compact_posting_list_t::create(3, &ids[0], &offset_index[0], 9, &offsets[0]);
ASSERT_TRUE(iter_compact_plist_contains_atleast_one_test2.contains_atleast_one(SET_COMPACT_POSTING(c_list2)));
free(c_list2);
auto iter_plist_contains_atleast_one_test1 = filter_result_iterator_t(coll->get_name(), coll->_get_index(),
filter_tree_root, enable_lazy_evaluation);
ASSERT_TRUE(iter_plist_contains_atleast_one_test1.init_status().ok());
posting_list_t p_list1(2);
ids = {1, 3};
for (const auto &i: ids) {
p_list1.upsert(i, {1, 2, 3});
}
ASSERT_FALSE(iter_plist_contains_atleast_one_test1.contains_atleast_one(&p_list1));
auto iter_plist_contains_atleast_one_test2 = filter_result_iterator_t(coll->get_name(), coll->_get_index(),
filter_tree_root);
ASSERT_TRUE(iter_plist_contains_atleast_one_test2.init_status().ok());
posting_list_t p_list2(2);
ids = {1, 3, 4};
for (const auto &i: ids) {
p_list1.upsert(i, {1, 2, 3});
}
ASSERT_TRUE(iter_plist_contains_atleast_one_test2.contains_atleast_one(&p_list1));
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("tags:= [gold, silver]", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_reset_test = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_reset_test.init_status().ok());
expected = {0, 2, 3, 4};
for (auto const& i : expected) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_reset_test.validity);
ASSERT_EQ(i, iter_reset_test.seq_id);
iter_reset_test.next();
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_reset_test.validity);
iter_reset_test.reset();
for (auto const& i : expected) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_reset_test.validity);
ASSERT_EQ(i, iter_reset_test.seq_id);
iter_reset_test.next();
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_reset_test.validity);
auto iter_move_assignment_test = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
iter_reset_test.reset();
iter_move_assignment_test = std::move(iter_reset_test);
expected = {0, 2, 3, 4};
for (auto const& i : expected) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_move_assignment_test.validity);
ASSERT_EQ(i, iter_move_assignment_test.seq_id);
iter_move_assignment_test.next();
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_move_assignment_test.validity);
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("tags: gold", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_to_array_test = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_to_array_test.init_status().ok());
uint32_t* filter_ids = nullptr;
uint32_t filter_ids_length;
iter_to_array_test.compute_iterators();
filter_ids_length = iter_to_array_test.to_filter_id_array(filter_ids);
ASSERT_EQ(3, filter_ids_length);
expected = {0, 2, 4};
for (uint32_t i = 0; i < filter_ids_length; i++) {
ASSERT_EQ(expected[i], filter_ids[i]);
}
delete[] filter_ids;
auto iter_and_scalar_test = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_and_scalar_test.init_status().ok());
uint32_t a_ids[6] = {0, 1, 3, 4, 5, 6};
uint32_t* and_result = nullptr;
uint32_t and_result_length;
and_result_length = iter_and_scalar_test.and_scalar(a_ids, 6, and_result);
ASSERT_EQ(2, and_result_length);
expected = {0, 4};
for (uint32_t i = 0; i < and_result_length; i++) {
ASSERT_EQ(expected[i], and_result[i]);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_and_scalar_test.validity);
delete[] and_result;
delete filter_tree_root;
doc = R"({
"name": "James Rowdy",
"age": 36,
"years": [2005, 2022],
"rating": 6.03,
"tags": ["FINE PLATINUM"]
})"_json;
add_op = coll->add(doc.dump());
ASSERT_TRUE(add_op.ok());
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("tags: bronze", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
auto iter_add_phrase_ids_test = new filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
std::unique_ptr<filter_result_iterator_t> filter_iter_guard(iter_add_phrase_ids_test);
ASSERT_TRUE(iter_add_phrase_ids_test->init_status().ok());
auto phrase_ids = new uint32_t[4];
for (uint32_t i = 0; i < 4; i++) {
phrase_ids[i] = i * 2;
}
filter_result_iterator_t::add_phrase_ids(iter_add_phrase_ids_test, phrase_ids, 4);
filter_iter_guard.release();
filter_iter_guard.reset(iter_add_phrase_ids_test);
ASSERT_EQ(filter_result_iterator_t::valid, iter_add_phrase_ids_test->validity);
ASSERT_EQ(2, iter_add_phrase_ids_test->seq_id);
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("tags: [gold]", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_string_multi_value_test = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_string_multi_value_test.init_status().ok());
ASSERT_FALSE(iter_string_multi_value_test._get_is_filter_result_initialized());
expected = {0, 2, 4};
for (auto const& i : expected) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_string_multi_value_test.validity);
ASSERT_EQ(i, iter_string_multi_value_test.seq_id);
iter_string_multi_value_test.next();
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_string_multi_value_test.validity);
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("tags:= bronze", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_string_equals_test = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_string_equals_test.init_status().ok());
ASSERT_TRUE(iter_string_equals_test._get_is_filter_result_initialized());
expected = {2, 4};
for (auto const& i : expected) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_string_equals_test.validity);
ASSERT_EQ(i, iter_string_equals_test.seq_id);
iter_string_equals_test.next();
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_string_equals_test.validity);
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("tags: gold", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_string_equals_test_2 = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_string_equals_test_2.init_status().ok());
ASSERT_FALSE(iter_string_equals_test_2._get_is_filter_result_initialized());
expected = {0, 2, 4};
for (auto const& i : expected) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_string_equals_test_2.validity);
ASSERT_EQ(i, iter_string_equals_test_2.seq_id);
iter_string_equals_test_2.next();
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_string_equals_test_2.validity);
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("tags: != [gold, silver]", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_string_not_equals_test_2 = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_string_not_equals_test_2.init_status().ok());
ASSERT_TRUE(iter_string_not_equals_test_2._get_is_filter_result_initialized());
expected = {1, 5, 6};
for (auto const& i : expected) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_string_not_equals_test_2.validity);
ASSERT_EQ(i, iter_string_not_equals_test_2.seq_id);
iter_string_not_equals_test_2.next();
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_string_not_equals_test_2.validity);
coll->remove("0");
coll->remove("2");
doc = R"({
"name": "James Rowdy",
"age": 16,
"years": [2022],
"rating": 2.03,
"tags": ["FINE PLATINUM"]
})"_json;
add_op = coll->add(doc.dump());
ASSERT_TRUE(add_op.ok());
delete filter_tree_root;
Collection *bool_coll;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("in_stock", field_types::BOOL, false),
field("points", field_types::INT32, false),};
bool_coll = collectionManager.get_collection("bool_coll").get();
if(bool_coll == nullptr) {
bool_coll = collectionManager.create_collection("bool_coll", 1, fields, "points").get();
}
for(size_t i=0; i<10; i++) {
nlohmann::json bool_doc;
bool_doc["title"] = "title_" + std::to_string(i);
bool_doc["in_stock"] = (i < 5 || i % 2) ? "true" : "false";
bool_doc["points"] = i;
ASSERT_TRUE(bool_coll->add(bool_doc.dump()).ok());
}
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("in_stock: false", bool_coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_boolean_test = filter_result_iterator_t(bool_coll->get_name(), bool_coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_boolean_test.init_status().ok());
ASSERT_TRUE(iter_boolean_test._get_is_filter_result_initialized());
ASSERT_EQ(2, iter_boolean_test.approx_filter_ids_length);
expected = {6, 8};
for (auto const& i : expected) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_boolean_test.validity);
ASSERT_EQ(i, iter_boolean_test.seq_id);
iter_boolean_test.next();
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_boolean_test.validity);
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("in_stock: true", bool_coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_boolean_test_2 = filter_result_iterator_t(bool_coll->get_name(), bool_coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_boolean_test_2.init_status().ok());
ASSERT_FALSE(iter_boolean_test_2._get_is_filter_result_initialized());
ASSERT_EQ(8, iter_boolean_test_2.approx_filter_ids_length);
expected = {0, 1, 2, 3, 4, 5, 7, 9};
for (auto const& i : expected) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_boolean_test_2.validity);
ASSERT_EQ(i, iter_boolean_test_2.seq_id);
iter_boolean_test_2.next();
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_boolean_test_2.validity);
iter_boolean_test_2.reset();
expected = {0, 1, 2, 3, 4, 5, 7, 9};
for (auto const& i : expected) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_boolean_test_2.validity);
ASSERT_EQ(i, iter_boolean_test_2.seq_id);
iter_boolean_test_2.next();
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_boolean_test_2.validity);
iter_boolean_test_2.reset();
ASSERT_EQ(0, iter_boolean_test_2.is_valid(6));
ASSERT_EQ(filter_result_iterator_t::valid, iter_boolean_test_2.validity);
ASSERT_EQ(7, iter_boolean_test_2.seq_id);
ASSERT_EQ(0, iter_boolean_test_2.is_valid(8));
ASSERT_EQ(filter_result_iterator_t::valid, iter_boolean_test_2.validity);
ASSERT_EQ(9, iter_boolean_test_2.seq_id);
ASSERT_EQ(-1, iter_boolean_test_2.is_valid(10));
ASSERT_EQ(filter_result_iterator_t::invalid, iter_boolean_test_2.validity);
delete filter_tree_root;
doc = R"({
"name": "James rock",
"age": 20,
"years": [],
"rating": 4.51,
"tags": ["gallium", "Gadolinium"]
})"_json;
add_op = coll->add(doc.dump());
ASSERT_TRUE(add_op.ok());
search_stop_us = UINT64_MAX; // `Index::fuzzy_search_fields` checks for timeout.
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("tags: g*", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_string_prefix_value_test = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_string_prefix_value_test.init_status().ok());
ASSERT_FALSE(iter_string_prefix_value_test._get_is_filter_result_initialized());
ASSERT_EQ(3, iter_string_prefix_value_test.approx_filter_ids_length); // document 0 and 2 have been deleted.
expected = {4, 8};
for (auto const& i : expected) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_string_prefix_value_test.validity);
ASSERT_EQ(i, iter_string_prefix_value_test.seq_id);
iter_string_prefix_value_test.next();
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_string_prefix_value_test.validity);
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("tags: != g*", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_string_prefix_value_test_2 = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_string_prefix_value_test_2.init_status().ok());
ASSERT_FALSE(iter_string_prefix_value_test_2._get_is_filter_result_initialized());
ASSERT_EQ(4, iter_string_prefix_value_test_2.approx_filter_ids_length); // 7 total docs, 3 approx count for equals.
validate_ids = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
seq_ids = {1, 2, 3, 4, 5, 6, 7, 8, 9, 9};
expected = {1, 1, 1, 1, 0, 1, 1, 1, 0, -1};
std::vector<uint32_t > equals_match_seq_ids = {4, 4, 4, 4, 4, 8, 8, 8, 8, 8};
std::vector<bool> equals_iterator_valid = {true, true, true, true, true, true, true, true, true, true};
for (uint32_t i = 0; i < validate_ids.size(); i++) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_string_prefix_value_test_2.validity);
ASSERT_EQ(expected[i], iter_string_prefix_value_test_2.is_valid(validate_ids[i]));
ASSERT_EQ(equals_match_seq_ids[i], iter_string_prefix_value_test_2._get_equals_iterator_id());
ASSERT_EQ(equals_iterator_valid[i], iter_string_prefix_value_test_2._get_is_equals_iterator_valid());
if (expected[i] == 1) {
iter_string_prefix_value_test_2.next();
}
ASSERT_EQ(seq_ids[i], iter_string_prefix_value_test_2.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_string_prefix_value_test_2.validity);
delete filter_tree_root;
}
TEST_F(FilterTest, FilterTreeIteratorTimeout) {
auto count = 20;
auto filter_ids = new uint32_t[count];
for (auto i = 0; i < count; i++) {
filter_ids[i] = i;
}
auto filter_iterator = new filter_result_iterator_t(filter_ids, count, DEFAULT_FILTER_BY_CANDIDATES,
std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count(),
10000000); // Timeout after 10 seconds
std::unique_ptr<filter_result_iterator_t> filter_iter_guard(filter_iterator);
ASSERT_EQ(filter_result_iterator_t::valid, filter_iterator->validity);
std::this_thread::sleep_for(std::chrono::seconds(5));
for (auto i = 0; i < 20; i++) {
ASSERT_EQ(filter_result_iterator_t::valid, filter_iterator->validity);
filter_iterator->next();
}
ASSERT_EQ(filter_result_iterator_t::invalid, filter_iterator->validity); // End of iterator reached.
filter_iterator->reset();
ASSERT_EQ(filter_result_iterator_t::valid, filter_iterator->validity);
std::this_thread::sleep_for(std::chrono::seconds(5));
for (auto i = 0; i < 9; i++) {
ASSERT_EQ(filter_result_iterator_t::valid, filter_iterator->validity);
filter_iterator->next();
}
ASSERT_EQ(filter_result_iterator_t::timed_out, filter_iterator->validity);
filter_iterator->reset();
ASSERT_EQ(filter_result_iterator_t::timed_out, filter_iterator->validity); // Resetting won't help with timeout.
uint32_t excluded_result_index = 0;
auto result = new filter_result_t();
filter_iterator->get_n_ids(count, excluded_result_index, nullptr, 0, result);
ASSERT_EQ(0, result->count); // Shouldn't return results
delete result;
filter_iterator->reset(true);
result = new filter_result_t();
filter_iterator->get_n_ids(count, excluded_result_index, nullptr, 0, result, true);
ASSERT_EQ(count, result->count); // With `override_timeout` true, we should get result.
delete result;
}
TEST_F(FilterTest, FilterTreeInitialization) {
nlohmann::json schema =
R"({
"name": "Collection",
"fields": [
{"name": "name", "type": "string"},
{"name": "age", "type": "int32"},
{"name": "years", "type": "int32[]"},
{"name": "rating", "type": "float"},
{"name": "tags", "type": "string[]"}
]
})"_json;
Collection* coll = collectionManager.create_collection(schema).get();
std::ifstream infile(std::string(ROOT_DIR)+"test/numeric_array_documents.jsonl");
std::string json_line;
while (std::getline(infile, json_line)) {
auto add_op = coll->add(json_line);
ASSERT_TRUE(add_op.ok());
}
infile.close();
const std::string doc_id_prefix = std::to_string(coll->get_collection_id()) + "_" + Collection::DOC_ID_PREFIX + "_";
filter_node_t* filter_tree_root = nullptr;
Option<bool> filter_op = filter::parse_filter_query("age: 0 && (rating: >0 && years: 2016)", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto const enable_lazy_evaluation = true;
auto iter_left_subtree_0_matches = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_left_subtree_0_matches.init_status().ok());
ASSERT_EQ(filter_result_iterator_t::invalid, iter_left_subtree_0_matches.validity);
ASSERT_EQ(0, iter_left_subtree_0_matches.approx_filter_ids_length);
ASSERT_TRUE(iter_left_subtree_0_matches._get_is_filter_result_initialized());
ASSERT_EQ(nullptr, iter_left_subtree_0_matches._get_left_it());
ASSERT_EQ(nullptr, iter_left_subtree_0_matches._get_right_it());
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("(rating: >0 && years: 2016) && age: 0", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_right_subtree_0_matches = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_right_subtree_0_matches.init_status().ok());
ASSERT_EQ(filter_result_iterator_t::invalid, iter_right_subtree_0_matches.validity);
ASSERT_EQ(0, iter_right_subtree_0_matches.approx_filter_ids_length);
ASSERT_TRUE(iter_right_subtree_0_matches._get_is_filter_result_initialized());
ASSERT_EQ(nullptr, iter_right_subtree_0_matches._get_left_it());
ASSERT_EQ(nullptr, iter_right_subtree_0_matches._get_right_it());
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("(age: 0 && rating: >0) || (age: 0 && rating: >0)", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_inner_subtree_0_matches = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_inner_subtree_0_matches.init_status().ok());
ASSERT_EQ(filter_result_iterator_t::invalid, iter_inner_subtree_0_matches.validity);
ASSERT_EQ(0, iter_inner_subtree_0_matches.approx_filter_ids_length);
ASSERT_FALSE(iter_inner_subtree_0_matches._get_is_filter_result_initialized());
ASSERT_NE(nullptr, iter_inner_subtree_0_matches._get_left_it());
ASSERT_NE(nullptr, iter_inner_subtree_0_matches._get_right_it());
delete filter_tree_root;
filter_tree_root = nullptr;
}
TEST_F(FilterTest, NotEqualsStringFilter) {
nlohmann::json schema =
R"({
"name": "Collection",
"fields": [
{"name": "name", "type": "string"},
{"name": "tags", "type": "string[]"}
]
})"_json;
Collection* coll = collectionManager.create_collection(schema).get();
std::ifstream infile(std::string(ROOT_DIR)+"test/numeric_array_documents.jsonl");
std::string json_line;
while (std::getline(infile, json_line)) {
auto add_op = coll->add(json_line);
ASSERT_TRUE(add_op.ok());
}
infile.close();
const std::string doc_id_prefix = std::to_string(coll->get_collection_id()) + "_" + Collection::DOC_ID_PREFIX + "_";
filter_node_t* filter_tree_root = nullptr;
Option<bool> filter_op = filter::parse_filter_query("tags:!= gold", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto const enable_lazy_evaluation = true;
auto computed_not_equals_test = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(computed_not_equals_test.init_status().ok());
ASSERT_TRUE(computed_not_equals_test._get_is_filter_result_initialized());
std::vector<int> expected = {1, 3};
for (auto const& i : expected) {
ASSERT_EQ(filter_result_iterator_t::valid, computed_not_equals_test.validity);
ASSERT_EQ(i, computed_not_equals_test.seq_id);
computed_not_equals_test.next();
}
ASSERT_EQ(filter_result_iterator_t::invalid, computed_not_equals_test.validity);
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("tags: != fine platinum", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_string_not_equals_test = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_string_not_equals_test.init_status().ok());
ASSERT_FALSE(iter_string_not_equals_test._get_is_filter_result_initialized());
std::vector<uint32_t> validate_ids = {0, 1, 2, 3, 4, 5};
std::vector<uint32_t> seq_ids = {1, 2, 3, 4, 5, 5};
std::vector<uint32_t> equals_match_seq_ids = {1, 1, 1, 1, 1, 1};
std::vector<bool> equals_iterator_valid = {true, true, false, false, false, false};
expected = {1, 0, 1, 1, 1, -1};
for (uint32_t i = 0; i < validate_ids.size(); i++) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_string_not_equals_test.validity);
ASSERT_EQ(expected[i], iter_string_not_equals_test.is_valid(validate_ids[i]));
ASSERT_EQ(equals_match_seq_ids[i], iter_string_not_equals_test._get_equals_iterator_id());
ASSERT_EQ(equals_iterator_valid[i], iter_string_not_equals_test._get_is_equals_iterator_valid());
if (expected[i] == 1) {
iter_string_not_equals_test.next();
}
ASSERT_EQ(seq_ids[i], iter_string_not_equals_test.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_string_not_equals_test.validity);
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("tags: != [gold, silver]", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_string_array_not_equals_test = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_string_array_not_equals_test.init_status().ok());
ASSERT_FALSE(iter_string_array_not_equals_test._get_is_filter_result_initialized());
ASSERT_EQ(5, iter_string_array_not_equals_test.approx_filter_ids_length);
validate_ids = {0, 1, 2, 3, 4, 5};
seq_ids = {1, 2, 3, 4, 5, 5};
expected = {0, 1, 0, 0, 0, -1};
for (uint32_t i = 0; i < validate_ids.size(); i++) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_string_array_not_equals_test.validity);
ASSERT_EQ(expected[i], iter_string_array_not_equals_test.is_valid(validate_ids[i]));
if (expected[i] == 1) {
iter_string_array_not_equals_test.next();
}
ASSERT_EQ(seq_ids[i], iter_string_array_not_equals_test.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_string_array_not_equals_test.validity);
delete filter_tree_root;
filter_tree_root = nullptr;
auto docs = {
R"({
"name": "James Rowdy",
"tags": ["copper"]
})"_json,
R"({
"name": "James Rowdy",
"tags": ["copper"]
})"_json,
R"({
"name": "James Rowdy",
"tags": ["gold"]
})"_json
};
for (auto const& doc: docs) {
auto add_op = coll->add(doc.dump());
ASSERT_TRUE(add_op.ok());
}
filter_op = filter::parse_filter_query("tags: != gold", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_string_not_equals_test_2 = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_string_not_equals_test_2.init_status().ok());
ASSERT_FALSE(iter_string_not_equals_test_2._get_is_filter_result_initialized());
validate_ids = {1, 2, 3, 4, 5, 6, 7, 8};
seq_ids = {2, 3, 4, 5, 6, 7, 8, 8};
expected = {1, 0, 1, 0, 1, 1, 0, -1};
equals_match_seq_ids = {2, 2, 4, 4, 7, 7, 7, 7};
equals_iterator_valid = {true, true, true, true, true, true, true, true};
for (uint32_t i = 0; i < validate_ids.size(); i++) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_string_not_equals_test_2.validity);
ASSERT_EQ(expected[i], iter_string_not_equals_test_2.is_valid(validate_ids[i]));
ASSERT_EQ(equals_match_seq_ids[i], iter_string_not_equals_test_2._get_equals_iterator_id());
ASSERT_EQ(equals_iterator_valid[i], iter_string_not_equals_test_2._get_is_equals_iterator_valid());
if (expected[i] == 1) {
iter_string_not_equals_test_2.next();
}
ASSERT_EQ(seq_ids[i], iter_string_not_equals_test_2.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_string_not_equals_test_2.validity);
iter_string_not_equals_test_2.reset();
validate_ids = {2, 5, 7, 8};
seq_ids = {3, 6, 8, 8};
expected = {0, 1, 0, -1};
equals_match_seq_ids = {2, 7, 7, 7};
equals_iterator_valid = {true, true, true, true};
for (uint32_t i = 0; i < validate_ids.size(); i++) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_string_not_equals_test_2.validity);
ASSERT_EQ(expected[i], iter_string_not_equals_test_2.is_valid(validate_ids[i]));
ASSERT_EQ(equals_match_seq_ids[i], iter_string_not_equals_test_2._get_equals_iterator_id());
ASSERT_EQ(equals_iterator_valid[i], iter_string_not_equals_test_2._get_is_equals_iterator_valid());
if (expected[i] == 1) {
iter_string_not_equals_test_2.next();
}
ASSERT_EQ(seq_ids[i], iter_string_not_equals_test_2.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_string_not_equals_test_2.validity);
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("name: James || tags: != bronze", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_not_equals_or_test = filter_result_iterator_t(coll->get_name(), coll->_get_index(),
filter_tree_root, enable_lazy_evaluation);
ASSERT_TRUE(iter_not_equals_or_test.init_status().ok());
ASSERT_FALSE(iter_not_equals_or_test._get_is_filter_result_initialized());
validate_ids = {0, 1, 2, 3, 4, 5, 6, 7, 8};
seq_ids = {1, 2, 3, 4, 5, 6, 7, 8, 8};
expected = {1, 1, 0, 1, 0, 1, 1, 1, -1};
for (uint32_t i = 0; i < validate_ids.size(); i++) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_not_equals_or_test.validity);
ASSERT_EQ(expected[i], iter_not_equals_or_test.is_valid(validate_ids[i]));
if (expected[i] == 1) {
iter_not_equals_or_test.next();
}
ASSERT_EQ(seq_ids[i], iter_not_equals_or_test.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_not_equals_or_test.validity);
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("tags: != silver || tags: != gold", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_not_equals_or_test_2 = filter_result_iterator_t(coll->get_name(), coll->_get_index(),
filter_tree_root, enable_lazy_evaluation);
ASSERT_TRUE(iter_not_equals_or_test_2.init_status().ok());
validate_ids = {0, 1, 2, 3, 4, 5, 6, 7, 8};
seq_ids = {1, 2, 3, 4, 5, 6, 7, 8, 8};
expected = {0, 1, 1, 1, 0, 1, 1, 1, -1};
for (uint32_t i = 0; i < validate_ids.size(); i++) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_not_equals_or_test_2.validity);
ASSERT_EQ(expected[i], iter_not_equals_or_test_2.is_valid(validate_ids[i]));
if (expected[i] == 1) {
iter_not_equals_or_test_2.next();
}
ASSERT_EQ(seq_ids[i], iter_not_equals_or_test_2.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_not_equals_or_test_2.validity);
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("name: James && tags: != gold", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_not_equals_and_test = filter_result_iterator_t(coll->get_name(), coll->_get_index(),
filter_tree_root, enable_lazy_evaluation);
ASSERT_TRUE(iter_not_equals_and_test.init_status().ok());
ASSERT_TRUE(iter_not_equals_and_test._get_is_filter_result_initialized());
validate_ids = {4, 5, 6, 7};
seq_ids = {5, 6, 6, 6};
expected = {0, 1, 1, -1};
ASSERT_EQ(filter_result_iterator_t::valid, iter_not_equals_and_test.validity);
for (uint32_t i = 0; i < validate_ids.size(); i++) {
ASSERT_EQ(expected[i], iter_not_equals_and_test.is_valid(validate_ids[i]));
if (expected[i] == 1) {
iter_not_equals_and_test.next();
}
ASSERT_EQ(seq_ids[i], iter_not_equals_and_test.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_not_equals_and_test.validity);
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("tags: != silver && tags: != gold", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
ASSERT_TRUE(iter_not_equals_and_test._get_is_filter_result_initialized());
auto iter_not_equals_and_test_2 = filter_result_iterator_t(coll->get_name(), coll->_get_index(),
filter_tree_root, enable_lazy_evaluation);
ASSERT_TRUE(iter_not_equals_and_test_2.init_status().ok());
validate_ids = {0, 1, 2, 3, 4, 5, 6, 7, 8};
seq_ids = {1, 5, 5, 5, 5, 6, 6, 6, 6};
expected = {0, 1, 0, 0, 0, 1, 1, -1, -1};
ASSERT_EQ(filter_result_iterator_t::valid, iter_not_equals_and_test_2.validity);
for (uint32_t i = 0; i < validate_ids.size(); i++) {
ASSERT_EQ(expected[i], iter_not_equals_and_test_2.is_valid(validate_ids[i]));
if (expected[i] == 1) {
iter_not_equals_and_test_2.next();
}
ASSERT_EQ(seq_ids[i], iter_not_equals_and_test_2.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_not_equals_and_test_2.validity);
delete filter_tree_root;
}
TEST_F(FilterTest, NumericFilterIterator) {
nlohmann::json schema =
R"({
"name": "Collection",
"fields": [
{"name": "rating", "type": "float"},
{"name": "age", "type": "int32"},
{"name": "years", "type": "int32[]"},
{"name": "timestamps", "type": "int64[]"}
]
})"_json;
Collection* coll = collectionManager.create_collection(schema).get();
std::ifstream infile(std::string(ROOT_DIR)+"test/numeric_array_documents.jsonl");
std::string json_line;
while (std::getline(infile, json_line)) {
auto add_op = coll->add(json_line);
ASSERT_TRUE(add_op.ok());
}
infile.close();
const std::string doc_id_prefix = std::to_string(coll->get_collection_id()) + "_" + Collection::DOC_ID_PREFIX + "_";
filter_node_t* filter_tree_root = nullptr;
Option<bool> filter_op = filter::parse_filter_query("age: > 32", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto const enable_lazy_evaluation = true;
auto const disable_lazy_evaluation = false;
auto computed_greater_than_test = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(computed_greater_than_test.init_status().ok());
ASSERT_TRUE(computed_greater_than_test._get_is_filter_result_initialized());
std::vector<int> expected = {1, 3};
for (auto const& i : expected) {
ASSERT_EQ(filter_result_iterator_t::valid, computed_greater_than_test.validity);
ASSERT_EQ(i, computed_greater_than_test.seq_id);
computed_greater_than_test.next();
}
ASSERT_EQ(filter_result_iterator_t::invalid, computed_greater_than_test.validity);
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("age: >= 32", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_greater_than_test = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_greater_than_test.init_status().ok());
ASSERT_FALSE(iter_greater_than_test._get_is_filter_result_initialized());
std::vector<uint32_t> validate_ids = {0, 1, 2, 3, 4, 5};
std::vector<uint32_t> seq_ids = {1, 3, 3, 4, 4, 4};
expected = {0, 1, 0, 1, 1, -1};
for (uint32_t i = 0; i < validate_ids.size(); i++) {
if (i < 5) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_greater_than_test.validity);
} else {
ASSERT_EQ(filter_result_iterator_t::invalid, iter_greater_than_test.validity);
}
ASSERT_EQ(expected[i], iter_greater_than_test.is_valid(validate_ids[i]));
if (expected[i] == 1) {
iter_greater_than_test.next();
}
ASSERT_EQ(seq_ids[i], iter_greater_than_test.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_greater_than_test.validity);
iter_greater_than_test.reset();
validate_ids = {0, 1, 3, 5};
seq_ids = {1, 3, 4, 4};
expected = {0, 1, 1, -1};
for (uint32_t i = 0; i < validate_ids.size(); i++) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_greater_than_test.validity);
ASSERT_EQ(expected[i], iter_greater_than_test.is_valid(validate_ids[i]));
if (expected[i] == 1) {
iter_greater_than_test.next();
}
ASSERT_EQ(seq_ids[i], iter_greater_than_test.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_greater_than_test.validity);
// With enable_lazy_evaluation = false, filter result should be initialized.
{
auto iter_greater_than_test_non_lazy = filter_result_iterator_t(coll->get_name(), coll->_get_index(),
filter_tree_root, disable_lazy_evaluation);
ASSERT_TRUE(iter_greater_than_test_non_lazy.init_status().ok());
ASSERT_TRUE(iter_greater_than_test_non_lazy._get_is_filter_result_initialized());
validate_ids = {0, 1, 2, 3, 4, 5};
seq_ids = {1, 3, 3, 4, 4, 4};
expected = {0, 1, 0, 1, 1, -1};
ASSERT_EQ(filter_result_iterator_t::valid, iter_greater_than_test_non_lazy.validity);
for (uint32_t i = 0; i < validate_ids.size(); i++) {
ASSERT_EQ(expected[i], iter_greater_than_test_non_lazy.is_valid(validate_ids[i]));
if (expected[i] == 1) {
iter_greater_than_test_non_lazy.next();
}
ASSERT_EQ(seq_ids[i], iter_greater_than_test_non_lazy.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_greater_than_test_non_lazy.validity);
}
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("age: != 21", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_not_equals_test = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_not_equals_test.init_status().ok());
ASSERT_FALSE(iter_not_equals_test._get_is_filter_result_initialized());
validate_ids = {0, 1, 2, 3, 4, 5};
seq_ids = {1, 2, 3, 4, 5, 5};
expected = {1, 1, 0, 1, 1, -1};
for (uint32_t i = 0; i < validate_ids.size(); i++) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_not_equals_test.validity);
ASSERT_EQ(expected[i], iter_not_equals_test.is_valid(validate_ids[i]));
if (expected[i] == 1) {
iter_not_equals_test.next();
}
ASSERT_EQ(seq_ids[i], iter_not_equals_test.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_not_equals_test.validity);
// With enable_lazy_evaluation = false, filter result should be initialized.
{
auto iter_not_equals_test_non_lazy = filter_result_iterator_t(coll->get_name(), coll->_get_index(),
filter_tree_root, disable_lazy_evaluation);
ASSERT_TRUE(iter_not_equals_test_non_lazy.init_status().ok());
ASSERT_TRUE(iter_not_equals_test_non_lazy._get_is_filter_result_initialized());
validate_ids = {0, 1, 2, 3, 4, 5};
seq_ids = {1, 3, 3, 4, 4, 4};
expected = {1, 1, 0, 1, 1, -1};
ASSERT_EQ(filter_result_iterator_t::valid, iter_not_equals_test_non_lazy.validity);
for (uint32_t i = 0; i < validate_ids.size(); i++) {
ASSERT_EQ(expected[i], iter_not_equals_test_non_lazy.is_valid(validate_ids[i]));
if (expected[i] == 1) {
iter_not_equals_test_non_lazy.next();
}
ASSERT_EQ(seq_ids[i], iter_not_equals_test_non_lazy.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_not_equals_test_non_lazy.validity);
}
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("age: != [21]", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_not_equals_test_2 = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_not_equals_test_2.init_status().ok());
ASSERT_FALSE(iter_not_equals_test_2._get_is_filter_result_initialized());
validate_ids = {0, 1, 2, 3, 4, 5};
seq_ids = {1, 2, 3, 4, 5, 5};
expected = {1, 1, 0, 1, 1, -1};
std::vector<bool> equals_iterator_valid = {true, true, true, false, false, false};
std::vector<uint32_t> equals_match_seq_ids = {2, 2, 2, 2, 2, 2};
for (uint32_t i = 0; i < validate_ids.size(); i++) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_not_equals_test_2.validity);
ASSERT_EQ(expected[i], iter_not_equals_test_2.is_valid(validate_ids[i]));
ASSERT_EQ(equals_iterator_valid[i], iter_not_equals_test_2._get_is_equals_iterator_valid());
ASSERT_EQ(equals_match_seq_ids[i], iter_not_equals_test_2._get_equals_iterator_id());
if (expected[i] == 1) {
iter_not_equals_test_2.next();
}
ASSERT_EQ(seq_ids[i], iter_not_equals_test_2.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_not_equals_test_2.validity);
// With enable_lazy_evaluation = false, filter result should be initialized.
{
auto iter_not_equals_test_2_non_lazy = filter_result_iterator_t(coll->get_name(), coll->_get_index(),
filter_tree_root, disable_lazy_evaluation);
ASSERT_TRUE(iter_not_equals_test_2_non_lazy.init_status().ok());
ASSERT_TRUE(iter_not_equals_test_2_non_lazy._get_is_filter_result_initialized());
validate_ids = {0, 1, 2, 3, 4, 5};
seq_ids = {1, 3, 3, 4, 4, 4};
expected = {1, 1, 0, 1, 1, -1};
ASSERT_EQ(filter_result_iterator_t::valid, iter_not_equals_test_2_non_lazy.validity);
for (uint32_t i = 0; i < validate_ids.size(); i++) {
ASSERT_EQ(expected[i], iter_not_equals_test_2_non_lazy.is_valid(validate_ids[i]));
if (expected[i] == 1) {
iter_not_equals_test_2_non_lazy.next();
}
ASSERT_EQ(seq_ids[i], iter_not_equals_test_2_non_lazy.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_not_equals_test_2_non_lazy.validity);
}
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("age: [<=21, >32]", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_multivalue_filter = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_multivalue_filter.init_status().ok());
ASSERT_FALSE(iter_multivalue_filter._get_is_filter_result_initialized());
validate_ids = {0, 1, 2, 3, 4, 5};
seq_ids = {1, 2, 3, 3, 3, 3};
expected = {0, 1, 1, 1, -1, -1};
for (uint32_t i = 0; i < validate_ids.size(); i++) {
if (i < 4) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_multivalue_filter.validity);
} else {
ASSERT_EQ(filter_result_iterator_t::invalid, iter_multivalue_filter.validity);
}
ASSERT_EQ(expected[i], iter_multivalue_filter.is_valid(validate_ids[i]));
if (expected[i] == 1) {
iter_multivalue_filter.next();
}
ASSERT_EQ(seq_ids[i], iter_multivalue_filter.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_multivalue_filter.validity);
// With enable_lazy_evaluation = false, filter result should be initialized.
{
auto iter_multivalue_filter_non_lazy = filter_result_iterator_t(coll->get_name(), coll->_get_index(),
filter_tree_root, disable_lazy_evaluation);
ASSERT_TRUE(iter_multivalue_filter_non_lazy.init_status().ok());
ASSERT_TRUE(iter_multivalue_filter_non_lazy._get_is_filter_result_initialized());
validate_ids = {0, 1, 2, 3, 4, 5};
seq_ids = {1, 2, 3, 3, 3, 3};
expected = {0, 1, 1, 1, -1, -1};
ASSERT_EQ(filter_result_iterator_t::valid, iter_multivalue_filter_non_lazy.validity);
for (uint32_t i = 0; i < validate_ids.size(); i++) {
ASSERT_EQ(expected[i], iter_multivalue_filter_non_lazy.is_valid(validate_ids[i]));
if (expected[i] == 1) {
iter_multivalue_filter_non_lazy.next();
}
ASSERT_EQ(seq_ids[i], iter_multivalue_filter_non_lazy.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_multivalue_filter_non_lazy.validity);
}
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("age: != [<24, >44]", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_multivalue_filter_2 = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_multivalue_filter_2.init_status().ok());
ASSERT_FALSE(iter_multivalue_filter_2._get_is_filter_result_initialized());
validate_ids = {0, 1, 2, 3, 4, 5};
seq_ids = {1, 2, 3, 4, 5, 5};
expected = {1, 1, 0, 0, 1, -1};
equals_iterator_valid = {true, true, true, true, false, false};
equals_match_seq_ids = {2, 2, 2, 3, 3, 3};
for (uint32_t i = 0; i < validate_ids.size(); i++) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_multivalue_filter_2.validity);
ASSERT_EQ(expected[i], iter_multivalue_filter_2.is_valid(validate_ids[i]));
ASSERT_EQ(equals_iterator_valid[i], iter_multivalue_filter_2._get_is_equals_iterator_valid());
ASSERT_EQ(equals_match_seq_ids[i], iter_multivalue_filter_2._get_equals_iterator_id());
if (expected[i] == 1) {
iter_multivalue_filter_2.next();
}
ASSERT_EQ(seq_ids[i], iter_multivalue_filter_2.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_multivalue_filter_2.validity);
// With enable_lazy_evaluation = false, filter result should be initialized.
{
auto iter_multivalue_filter_2_non_lazy = filter_result_iterator_t(coll->get_name(), coll->_get_index(),
filter_tree_root, disable_lazy_evaluation);
ASSERT_TRUE(iter_multivalue_filter_2_non_lazy.init_status().ok());
ASSERT_TRUE(iter_multivalue_filter_2_non_lazy._get_is_filter_result_initialized());
validate_ids = {0, 1, 2, 3, 4, 5};
seq_ids = {1, 4, 4, 4, 4, 4};
expected = {1, 1, 0, 0, 1, -1};
ASSERT_EQ(filter_result_iterator_t::valid, iter_multivalue_filter_2_non_lazy.validity);
for (uint32_t i = 0; i < validate_ids.size(); i++) {
ASSERT_EQ(expected[i], iter_multivalue_filter_2_non_lazy.is_valid(validate_ids[i]));
if (expected[i] == 1) {
iter_multivalue_filter_2_non_lazy.next();
}
ASSERT_EQ(seq_ids[i], iter_multivalue_filter_2_non_lazy.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_multivalue_filter_2_non_lazy.validity);
}
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("age: [21..32, >44]", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_multivalue_filter_3 = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_multivalue_filter_3.init_status().ok());
ASSERT_FALSE(iter_multivalue_filter_3._get_is_filter_result_initialized());
validate_ids = {0, 1, 2, 3, 4, 5};
seq_ids = {2, 2, 3, 4, 4, 4};
expected = {1, 0, 1, 1, 1, -1};
equals_iterator_valid = {true, true, true, true, true, false};
equals_match_seq_ids = {0, 2, 2, 3, 4, 4};
for (uint32_t i = 0; i < validate_ids.size(); i++) {
if (i < 5) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_multivalue_filter_3.validity);
} else {
ASSERT_EQ(filter_result_iterator_t::invalid, iter_multivalue_filter_3.validity);
}
ASSERT_EQ(expected[i], iter_multivalue_filter_3.is_valid(validate_ids[i]));
ASSERT_EQ(equals_iterator_valid[i], iter_multivalue_filter_3._get_is_equals_iterator_valid());
ASSERT_EQ(equals_match_seq_ids[i], iter_multivalue_filter_3._get_equals_iterator_id());
if (expected[i] == 1) {
iter_multivalue_filter_3.next();
}
ASSERT_EQ(seq_ids[i], iter_multivalue_filter_3.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_multivalue_filter_3.validity);
// With enable_lazy_evaluation = false, filter result should be initialized.
{
auto iter_multivalue_filter_3_non_lazy = filter_result_iterator_t(coll->get_name(), coll->_get_index(),
filter_tree_root, disable_lazy_evaluation);
ASSERT_TRUE(iter_multivalue_filter_3_non_lazy.init_status().ok());
ASSERT_TRUE(iter_multivalue_filter_3_non_lazy._get_is_filter_result_initialized());
validate_ids = {0, 1, 2, 3, 4, 5};
seq_ids = {2, 2, 3, 4, 4, 4};
expected = {1, 0, 1, 1, 1, -1};
ASSERT_EQ(filter_result_iterator_t::valid, iter_multivalue_filter_3_non_lazy.validity);
for (uint32_t i = 0; i < validate_ids.size(); i++) {
ASSERT_EQ(expected[i], iter_multivalue_filter_3_non_lazy.is_valid(validate_ids[i]));
if (expected[i] == 1) {
iter_multivalue_filter_3_non_lazy.next();
}
ASSERT_EQ(seq_ids[i], iter_multivalue_filter_3_non_lazy.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_multivalue_filter_3_non_lazy.validity);
}
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("rating: <5", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto computed_greater_than_test_2 = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(computed_greater_than_test_2.init_status().ok());
ASSERT_TRUE(computed_greater_than_test_2._get_is_filter_result_initialized());
expected = {0, 3};
for (auto const& i : expected) {
ASSERT_EQ(filter_result_iterator_t::valid, computed_greater_than_test_2.validity);
ASSERT_EQ(i, computed_greater_than_test_2.seq_id);
computed_greater_than_test_2.next();
}
ASSERT_EQ(filter_result_iterator_t::invalid, computed_greater_than_test_2.validity);
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("rating: >5", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_greater_than_test_2 = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_greater_than_test_2.init_status().ok());
ASSERT_FALSE(iter_greater_than_test_2._get_is_filter_result_initialized());
validate_ids = {0, 1, 2, 3, 4, 5};
seq_ids = {1, 2, 4, 4, 4, 4};
expected = {0, 1, 1, 0, 1, -1};
for (uint32_t i = 0; i < validate_ids.size(); i++) {
if (i < 5) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_greater_than_test_2.validity);
} else {
ASSERT_EQ(filter_result_iterator_t::invalid, iter_greater_than_test_2.validity);
}
ASSERT_EQ(expected[i], iter_greater_than_test_2.is_valid(validate_ids[i]));
if (expected[i] == 1) {
iter_greater_than_test_2.next();
}
ASSERT_EQ(seq_ids[i], iter_greater_than_test_2.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_greater_than_test_2.validity);
iter_greater_than_test_2.reset();
validate_ids = {0, 1, 4, 5};
seq_ids = {1, 2, 4, 4};
expected = {0, 1, 1, -1};
for (uint32_t i = 0; i < validate_ids.size(); i++) {
if (i < 3) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_greater_than_test_2.validity);
} else {
ASSERT_EQ(filter_result_iterator_t::invalid, iter_greater_than_test_2.validity);
}
ASSERT_EQ(expected[i], iter_greater_than_test_2.is_valid(validate_ids[i]));
if (expected[i] == 1) {
iter_greater_than_test_2.next();
}
ASSERT_EQ(seq_ids[i], iter_greater_than_test_2.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_greater_than_test_2.validity);
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("rating: != 7.812", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_not_equals_test_3 = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_not_equals_test_3.init_status().ok());
ASSERT_FALSE(iter_not_equals_test_3._get_is_filter_result_initialized());
validate_ids = {0, 1, 2, 3, 4, 5};
seq_ids = {1, 2, 3, 4, 5, 5};
expected = {1, 1, 0, 1, 1, -1};
for (uint32_t i = 0; i < validate_ids.size(); i++) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_not_equals_test_3.validity);
ASSERT_EQ(expected[i], iter_not_equals_test_3.is_valid(validate_ids[i]));
if (expected[i] == 1) {
iter_not_equals_test_3.next();
}
ASSERT_EQ(seq_ids[i], iter_not_equals_test_3.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_not_equals_test_3.validity);
// With enable_lazy_evaluation = false, filter result should be initialized.
{
auto iter_not_equals_test_3_non_lazy = filter_result_iterator_t(coll->get_name(), coll->_get_index(),
filter_tree_root, disable_lazy_evaluation);
ASSERT_TRUE(iter_not_equals_test_3_non_lazy.init_status().ok());
ASSERT_TRUE(iter_not_equals_test_3_non_lazy._get_is_filter_result_initialized());
validate_ids = {0, 1, 2, 3, 4, 5};
seq_ids = {1, 3, 3, 4, 4, 4};
expected = {1, 1, 0, 1, 1, -1};
ASSERT_EQ(filter_result_iterator_t::valid, iter_not_equals_test_3_non_lazy.validity);
for (uint32_t i = 0; i < validate_ids.size(); i++) {
ASSERT_EQ(expected[i], iter_not_equals_test_3_non_lazy.is_valid(validate_ids[i]));
if (expected[i] == 1) {
iter_not_equals_test_3_non_lazy.next();
}
ASSERT_EQ(seq_ids[i], iter_not_equals_test_3_non_lazy.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_not_equals_test_3_non_lazy.validity);
}
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("rating: != [7.812]", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_not_equals_test_4 = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_not_equals_test_4.init_status().ok());
ASSERT_FALSE(iter_not_equals_test_4._get_is_filter_result_initialized());
validate_ids = {0, 1, 2, 3, 4, 5};
seq_ids = {1, 2, 3, 4, 5, 5};
expected = {1, 1, 0, 1, 1, -1};
equals_iterator_valid = {true, true, true, false, false, false};
equals_match_seq_ids = {2, 2, 2, 2, 2, 2};
for (uint32_t i = 0; i < validate_ids.size(); i++) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_not_equals_test_4.validity);
ASSERT_EQ(expected[i], iter_not_equals_test_4.is_valid(validate_ids[i]));
ASSERT_EQ(equals_iterator_valid[i], iter_not_equals_test_4._get_is_equals_iterator_valid());
ASSERT_EQ(equals_match_seq_ids[i], iter_not_equals_test_4._get_equals_iterator_id());
if (expected[i] == 1) {
iter_not_equals_test_4.next();
}
ASSERT_EQ(seq_ids[i], iter_not_equals_test_4.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_not_equals_test_4.validity);
// With enable_lazy_evaluation = false, filter result should be initialized.
{
auto iter_not_equals_test_4_non_lazy = filter_result_iterator_t(coll->get_name(), coll->_get_index(),
filter_tree_root, disable_lazy_evaluation);
ASSERT_TRUE(iter_not_equals_test_4_non_lazy.init_status().ok());
ASSERT_TRUE(iter_not_equals_test_4_non_lazy._get_is_filter_result_initialized());
validate_ids = {0, 1, 2, 3, 4, 5};
seq_ids = {1, 3, 3, 4, 4, 4};
expected = {1, 1, 0, 1, 1, -1};
ASSERT_EQ(filter_result_iterator_t::valid, iter_not_equals_test_4_non_lazy.validity);
for (uint32_t i = 0; i < validate_ids.size(); i++) {
ASSERT_EQ(expected[i], iter_not_equals_test_4_non_lazy.is_valid(validate_ids[i]));
if (expected[i] == 1) {
iter_not_equals_test_4_non_lazy.next();
}
ASSERT_EQ(seq_ids[i], iter_not_equals_test_4_non_lazy.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_not_equals_test_4_non_lazy.validity);
}
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("rating: [< 1, >6]", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_multivalue_filter_4 = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_multivalue_filter_4.init_status().ok());
ASSERT_FALSE(iter_multivalue_filter_4._get_is_filter_result_initialized());
validate_ids = {0, 1, 2, 3, 4, 5};
seq_ids = {1, 2, 3, 3, 3, 3};
expected = {0, 1, 1, 1, -1, -1};
for (uint32_t i = 0; i < validate_ids.size(); i++) {
if (i < 4) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_multivalue_filter_4.validity);
} else {
ASSERT_EQ(filter_result_iterator_t::invalid, iter_multivalue_filter_4.validity);
}
ASSERT_EQ(expected[i], iter_multivalue_filter_4.is_valid(validate_ids[i]));
if (expected[i] == 1) {
iter_multivalue_filter_4.next();
}
ASSERT_EQ(seq_ids[i], iter_multivalue_filter_4.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_multivalue_filter_4.validity);
// With enable_lazy_evaluation = false, filter result should be initialized.
{
auto iter_multivalue_filter_4_non_lazy = filter_result_iterator_t(coll->get_name(), coll->_get_index(),
filter_tree_root, disable_lazy_evaluation);
ASSERT_TRUE(iter_multivalue_filter_4_non_lazy.init_status().ok());
ASSERT_TRUE(iter_multivalue_filter_4_non_lazy._get_is_filter_result_initialized());
validate_ids = {0, 1, 2, 3, 4, 5};
seq_ids = {1, 2, 3, 3, 3, 3};
expected = {0, 1, 1, 1, -1, -1};
ASSERT_EQ(filter_result_iterator_t::valid, iter_multivalue_filter_4_non_lazy.validity);
for (uint32_t i = 0; i < validate_ids.size(); i++) {
ASSERT_EQ(expected[i], iter_multivalue_filter_4_non_lazy.is_valid(validate_ids[i]));
if (expected[i] == 1) {
iter_multivalue_filter_4_non_lazy.next();
}
ASSERT_EQ(seq_ids[i], iter_multivalue_filter_4_non_lazy.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_multivalue_filter_4_non_lazy.validity);
}
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("rating: != [<1, >8]", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_multivalue_filter_5 = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_multivalue_filter_5.init_status().ok());
ASSERT_FALSE(iter_multivalue_filter_5._get_is_filter_result_initialized());
validate_ids = {0, 1, 2, 3, 4, 5};
seq_ids = {1, 2, 3, 4, 5, 5};
expected = {1, 0, 1, 0, 1, -1};
equals_iterator_valid = {true, true, true, true, false, false};
equals_match_seq_ids = {1, 1, 3, 3, 3, 3};
for (uint32_t i = 0; i < validate_ids.size(); i++) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_multivalue_filter_5.validity);
ASSERT_EQ(expected[i], iter_multivalue_filter_5.is_valid(validate_ids[i]));
ASSERT_EQ(equals_iterator_valid[i], iter_multivalue_filter_5._get_is_equals_iterator_valid());
ASSERT_EQ(equals_match_seq_ids[i], iter_multivalue_filter_5._get_equals_iterator_id());
if (expected[i] == 1) {
iter_multivalue_filter_5.next();
}
ASSERT_EQ(seq_ids[i], iter_multivalue_filter_5.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_multivalue_filter_5.validity);
// With enable_lazy_evaluation = false, filter result should be initialized.
{
auto iter_multivalue_filter_5_non_lazy = filter_result_iterator_t(coll->get_name(), coll->_get_index(),
filter_tree_root, disable_lazy_evaluation);
ASSERT_TRUE(iter_multivalue_filter_5_non_lazy.init_status().ok());
ASSERT_TRUE(iter_multivalue_filter_5_non_lazy._get_is_filter_result_initialized());
validate_ids = {0, 1, 2, 3, 4, 5};
seq_ids = {2, 2, 4, 4, 4, 4};
expected = {1, 0, 1, 0, 1, -1};
ASSERT_EQ(filter_result_iterator_t::valid, iter_multivalue_filter_5_non_lazy.validity);
for (uint32_t i = 0; i < validate_ids.size(); i++) {
ASSERT_EQ(expected[i], iter_multivalue_filter_5_non_lazy.is_valid(validate_ids[i]));
if (expected[i] == 1) {
iter_multivalue_filter_5_non_lazy.next();
}
ASSERT_EQ(seq_ids[i], iter_multivalue_filter_5_non_lazy.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_multivalue_filter_5_non_lazy.validity);
}
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("rating: [0..6, >8]", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_multivalue_filter_6 = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_multivalue_filter_6.init_status().ok());
ASSERT_FALSE(iter_multivalue_filter_6._get_is_filter_result_initialized());
validate_ids = {0, 1, 2, 3, 4, 5};
seq_ids = {1, 3, 3, 4, 4, 4};
expected = {1, 1, 0, 1, 1, -1};
equals_iterator_valid = {true, true, true, true, true, false};
equals_match_seq_ids = {0, 1, 3, 3, 4, 4};
for (uint32_t i = 0; i < validate_ids.size(); i++) {
if (i < 5) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_multivalue_filter_6.validity);
} else {
ASSERT_EQ(filter_result_iterator_t::invalid, iter_multivalue_filter_6.validity);
}
ASSERT_EQ(expected[i], iter_multivalue_filter_6.is_valid(validate_ids[i]));
ASSERT_EQ(equals_iterator_valid[i], iter_multivalue_filter_6._get_is_equals_iterator_valid());
ASSERT_EQ(equals_match_seq_ids[i], iter_multivalue_filter_6._get_equals_iterator_id());
if (expected[i] == 1) {
iter_multivalue_filter_6.next();
}
ASSERT_EQ(seq_ids[i], iter_multivalue_filter_6.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_multivalue_filter_6.validity);
// With enable_lazy_evaluation = false, filter result should be initialized.
{
auto iter_multivalue_filter_6_non_lazy = filter_result_iterator_t(coll->get_name(), coll->_get_index(),
filter_tree_root, disable_lazy_evaluation);
ASSERT_TRUE(iter_multivalue_filter_6_non_lazy.init_status().ok());
ASSERT_TRUE(iter_multivalue_filter_6_non_lazy._get_is_filter_result_initialized());
validate_ids = {0, 1, 2, 3, 4, 5};
seq_ids = {1, 3, 3, 4, 4, 4};
expected = {1, 1, 0, 1, 1, -1};
ASSERT_EQ(filter_result_iterator_t::valid, iter_multivalue_filter_6_non_lazy.validity);
for (uint32_t i = 0; i < validate_ids.size(); i++) {
ASSERT_EQ(expected[i], iter_multivalue_filter_6_non_lazy.is_valid(validate_ids[i]));
if (expected[i] == 1) {
iter_multivalue_filter_6_non_lazy.next();
}
ASSERT_EQ(seq_ids[i], iter_multivalue_filter_6_non_lazy.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_multivalue_filter_6_non_lazy.validity);
}
delete filter_tree_root;
}
TEST_F(FilterTest, PrefixStringFilter) {
auto schema_json =
R"({
"name": "Names",
"fields": [
{"name": "name", "type": "string"}
]
})"_json;
std::vector<nlohmann::json> documents = {
R"({
"name": "Steve Jobs"
})"_json,
R"({
"name": "Adam Stator"
})"_json,
};
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
Collection* coll = collection_create_op.get();
for (auto const &json: documents) {
auto add_op = coll->add(json.dump());
ASSERT_TRUE(add_op.ok());
}
const std::string doc_id_prefix = std::to_string(coll->get_collection_id()) + "_" + Collection::DOC_ID_PREFIX + "_";
filter_node_t* filter_tree_root = nullptr;
search_stop_us = UINT64_MAX; // `Index::fuzzy_search_fields` checks for timeout.
Option<bool> filter_op = filter::parse_filter_query("name:= S*", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto const enable_lazy_evaluation = true;
auto computed_exact_prefix_test = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(computed_exact_prefix_test.init_status().ok());
ASSERT_TRUE(computed_exact_prefix_test._get_is_filter_result_initialized());
std::vector<int> expected = {0};
for (auto const& i : expected) {
ASSERT_EQ(filter_result_iterator_t::valid, computed_exact_prefix_test.validity);
ASSERT_EQ(i, computed_exact_prefix_test.seq_id);
computed_exact_prefix_test.next();
}
ASSERT_EQ(filter_result_iterator_t::invalid, computed_exact_prefix_test.validity);
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("name: S*", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto computed_contains_prefix_test = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(computed_contains_prefix_test.init_status().ok());
ASSERT_TRUE(computed_contains_prefix_test._get_is_filter_result_initialized());
expected = {0, 1};
for (auto const& i : expected) {
ASSERT_EQ(filter_result_iterator_t::valid, computed_contains_prefix_test.validity);
ASSERT_EQ(i, computed_contains_prefix_test.seq_id);
computed_contains_prefix_test.next();
}
ASSERT_EQ(filter_result_iterator_t::invalid, computed_contains_prefix_test.validity);
delete filter_tree_root;
documents = {
R"({
"name": "Steve Reiley"
})"_json,
R"({
"name": "Storm"
})"_json,
R"({
"name": "Steve Rogers"
})"_json,
};
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
ASSERT_TRUE(add_op.ok());
}
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("name:= S*", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_exact_prefix_test = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_exact_prefix_test.init_status().ok());
ASSERT_FALSE(iter_exact_prefix_test._get_is_filter_result_initialized());
std::vector<uint32_t> validate_ids = {0, 1, 2, 3, 4, 5};
std::vector<uint32_t> seq_ids = {2, 2, 3, 4, 4, 4};
std::vector<uint32_t> equals_match_seq_ids = {0, 2, 2, 3, 4, 4};
std::vector<bool> equals_iterator_valid = {true, true, true, true, true, false};
expected = {1, 0, 1, 1, 1, -1};
for (uint32_t i = 0; i < validate_ids.size(); i++) {
if (i < 5) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_exact_prefix_test.validity);
} else {
ASSERT_EQ(filter_result_iterator_t::invalid, iter_exact_prefix_test.validity);
}
ASSERT_EQ(expected[i], iter_exact_prefix_test.is_valid(validate_ids[i]));
ASSERT_EQ(equals_match_seq_ids[i], iter_exact_prefix_test._get_equals_iterator_id());
ASSERT_EQ(equals_iterator_valid[i], iter_exact_prefix_test._get_is_equals_iterator_valid());
if (expected[i] == 1) {
iter_exact_prefix_test.next();
}
ASSERT_EQ(seq_ids[i], iter_exact_prefix_test.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_exact_prefix_test.validity);
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("name: S*", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_contains_prefix_test = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_contains_prefix_test.init_status().ok());
ASSERT_FALSE(iter_contains_prefix_test._get_is_filter_result_initialized());
validate_ids = {0, 1, 2, 3, 4, 5};
seq_ids = {1, 2, 3, 4, 4, 4};
equals_match_seq_ids = {0, 1, 2, 3, 4, 4};
equals_iterator_valid = {true, true, true, true, true, false};
expected = {1, 1, 1, 1, 1, -1};
for (uint32_t i = 0; i < validate_ids.size(); i++) {
if (i < 5) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_contains_prefix_test.validity);
} else {
ASSERT_EQ(filter_result_iterator_t::invalid, iter_contains_prefix_test.validity);
}
ASSERT_EQ(expected[i], iter_contains_prefix_test.is_valid(validate_ids[i]));
ASSERT_EQ(equals_match_seq_ids[i], iter_contains_prefix_test._get_equals_iterator_id());
ASSERT_EQ(equals_iterator_valid[i], iter_contains_prefix_test._get_is_equals_iterator_valid());
if (expected[i] == 1) {
iter_contains_prefix_test.next();
}
ASSERT_EQ(seq_ids[i], iter_contains_prefix_test.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_contains_prefix_test.validity);
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("name:= Steve R*", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto computed_exact_prefix_test_2 = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(computed_exact_prefix_test_2.init_status().ok());
ASSERT_TRUE(computed_exact_prefix_test_2._get_is_filter_result_initialized());
expected = {2, 4};
for (auto const& i : expected) {
ASSERT_EQ(filter_result_iterator_t::valid, computed_exact_prefix_test_2.validity);
ASSERT_EQ(i, computed_exact_prefix_test_2.seq_id);
computed_exact_prefix_test_2.next();
}
ASSERT_EQ(filter_result_iterator_t::invalid, computed_exact_prefix_test_2.validity);
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("name: Steve R*", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto computed_contains_prefix_test_2 = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(computed_contains_prefix_test_2.init_status().ok());
ASSERT_TRUE(computed_contains_prefix_test_2._get_is_filter_result_initialized());
expected = {2, 4};
for (auto const& i : expected) {
ASSERT_EQ(filter_result_iterator_t::valid, computed_contains_prefix_test_2.validity);
ASSERT_EQ(i, computed_contains_prefix_test_2.seq_id);
computed_contains_prefix_test_2.next();
}
ASSERT_EQ(filter_result_iterator_t::invalid, computed_contains_prefix_test_2.validity);
delete filter_tree_root;
documents = {
R"({
"name": "Steve Runner foo"
})"_json,
R"({
"name": "foo Steve Runner"
})"_json,
};
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
ASSERT_TRUE(add_op.ok());
}
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("name:= Steve R*", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_exact_prefix_test_2 = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_exact_prefix_test_2.init_status().ok());
ASSERT_FALSE(iter_exact_prefix_test_2._get_is_filter_result_initialized());
validate_ids = {0, 1, 2, 3, 4, 5, 6, 7};
seq_ids = {2, 2, 4, 4, 5, 5, 5, 5};
equals_match_seq_ids = {2, 2, 2, 4, 4, 5, 5, 5};
equals_iterator_valid = {true, true, true, true, true, true, false, false};
expected = {0, 0, 1, 0, 1, 1, -1, -1};
for (uint32_t i = 0; i < validate_ids.size(); i++) {
if (i < 6) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_exact_prefix_test_2.validity);
} else {
ASSERT_EQ(filter_result_iterator_t::invalid, iter_exact_prefix_test_2.validity);
}
ASSERT_EQ(expected[i], iter_exact_prefix_test_2.is_valid(validate_ids[i]));
ASSERT_EQ(equals_match_seq_ids[i], iter_exact_prefix_test_2._get_equals_iterator_id());
ASSERT_EQ(equals_iterator_valid[i], iter_exact_prefix_test_2._get_is_equals_iterator_valid());
if (expected[i] == 1) {
iter_exact_prefix_test_2.next();
}
ASSERT_EQ(seq_ids[i], iter_exact_prefix_test_2.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_exact_prefix_test_2.validity);
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("name: Steve R*", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto iter_contains_prefix_test_2 = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(iter_contains_prefix_test_2.init_status().ok());
ASSERT_FALSE(iter_contains_prefix_test_2._get_is_filter_result_initialized());
validate_ids = {0, 1, 2, 3, 4, 5, 6, 7};
seq_ids = {2, 2, 4, 4, 5, 6, 6, 6};
equals_match_seq_ids = {2, 2, 2, 4, 4, 5, 6, 6};
equals_iterator_valid = {true, true, true, true, true, true, true, false};
expected = {0, 0, 1, 0, 1, 1, 1, -1};
for (uint32_t i = 0; i < validate_ids.size(); i++) {
if (i < 7) {
ASSERT_EQ(filter_result_iterator_t::valid, iter_contains_prefix_test_2.validity);
} else {
ASSERT_EQ(filter_result_iterator_t::invalid, iter_contains_prefix_test_2.validity);
}
ASSERT_EQ(expected[i], iter_contains_prefix_test_2.is_valid(validate_ids[i]));
ASSERT_EQ(equals_match_seq_ids[i], iter_contains_prefix_test_2._get_equals_iterator_id());
ASSERT_EQ(equals_iterator_valid[i], iter_contains_prefix_test_2._get_is_equals_iterator_valid());
if (expected[i] == 1) {
iter_contains_prefix_test_2.next();
}
ASSERT_EQ(seq_ids[i], iter_contains_prefix_test_2.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, iter_contains_prefix_test_2.validity);
delete filter_tree_root;
}
TEST_F(FilterTest, IdFilterIterator) {
Collection *coll;
std::vector<field> fields = {field("company_name", field_types::STRING, false),
field("num_employees", field_types::INT32, false),};
coll = collectionManager.get_collection("coll1").get();
if(coll == nullptr) {
coll = collectionManager.create_collection("coll1", 1, fields, "num_employees").get();
}
std::vector<std::vector<std::string>> records = {
{"123", "Company 1", "50"},
{"125", "Company 2", "150"},
{"127", "Company 3", "250"},
{"129", "Stark Industries 4", "500"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = records[i][0];
doc["company_name"] = records[i][1];
doc["num_employees"] = std::stoi(records[i][2]);
ASSERT_TRUE(coll->add(doc.dump()).ok());
}
const std::string doc_id_prefix = std::to_string(coll->get_collection_id()) + "_" + Collection::DOC_ID_PREFIX + "_";
filter_node_t* filter_tree_root = nullptr;
Option<bool> filter_op = filter::parse_filter_query("id: *", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto const enable_lazy_evaluation = true;
auto all_ids_match_test = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(all_ids_match_test.init_status().ok());
ASSERT_FALSE(all_ids_match_test._get_is_filter_result_initialized());
ASSERT_EQ(4, all_ids_match_test.approx_filter_ids_length);
std::vector<uint32_t> validate_ids = {0, 1, 3, 4};
std::vector<uint32_t> seq_ids = {1, 2, 3, 3};
std::vector<int> expected = {1, 1, 1, -1};
for (uint32_t i = 0; i < validate_ids.size(); i++) {
if (i < 3) {
ASSERT_EQ(filter_result_iterator_t::valid, all_ids_match_test.validity);
} else {
ASSERT_EQ(filter_result_iterator_t::invalid, all_ids_match_test.validity);
}
ASSERT_EQ(expected[i], all_ids_match_test.is_valid(validate_ids[i]));
if (expected[i] == 1) {
all_ids_match_test.next();
}
ASSERT_EQ(seq_ids[i], all_ids_match_test.seq_id);
}
ASSERT_EQ(filter_result_iterator_t::invalid, all_ids_match_test.validity);
all_ids_match_test.reset();
ASSERT_EQ(filter_result_iterator_t::valid, all_ids_match_test.validity);
ASSERT_EQ(0, all_ids_match_test.seq_id);
ASSERT_EQ(1, all_ids_match_test.is_valid(2));
all_ids_match_test.compute_iterators();
seq_ids = {0, 1, 2, 3};
for (auto const& seq_id : seq_ids) {
ASSERT_EQ(filter_result_iterator_t::valid, all_ids_match_test.validity);
ASSERT_EQ(1, all_ids_match_test.is_valid(seq_id));
}
delete filter_tree_root;
filter_tree_root = nullptr;
filter_op = filter::parse_filter_query("id: != [foo, *, bar]", coll->get_schema(), store, doc_id_prefix,
filter_tree_root);
ASSERT_TRUE(filter_op.ok());
auto no_ids_match_test = filter_result_iterator_t(coll->get_name(), coll->_get_index(), filter_tree_root,
enable_lazy_evaluation);
ASSERT_TRUE(no_ids_match_test.init_status().ok());
ASSERT_TRUE(no_ids_match_test._get_is_filter_result_initialized());
ASSERT_EQ(0, no_ids_match_test.approx_filter_ids_length);
ASSERT_EQ(filter_result_iterator_t::invalid, no_ids_match_test.validity);
delete filter_tree_root;
filter_tree_root = nullptr;
}
| 100,521
|
C++
|
.cpp
| 1,799
| 44.603113
| 141
| 0.587973
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,724
|
personalization_model_manager_test.cpp
|
typesense_typesense/test/personalization_model_manager_test.cpp
|
#include <gtest/gtest.h>
#include "personalization_model_manager.h"
#include "store.h"
#include <filesystem>
#include "collection_manager.h"
class PersonalizationModelManagerTest : public ::testing::Test {
protected:
std::string temp_dir;
Store *store;
CollectionManager& collectionManager = CollectionManager::get_instance();
std::atomic<bool> quit = false;
void SetUp() override {
temp_dir = (std::filesystem::temp_directory_path() / "personalization_model_manager_test").string();
system(("rm -rf " + temp_dir + " && mkdir -p " + temp_dir).c_str());
std::string test_dir = "/tmp/typesense_test/models";
system(("rm -rf " + test_dir + " && mkdir -p " + test_dir).c_str());
EmbedderManager::set_model_dir(test_dir);
// Create test collection
std::string state_dir_path = "/tmp/typesense_test/personalization_model_manager_test";
Config::get_instance().set_data_dir(state_dir_path);
LOG(INFO) << "Truncating and creating: " << state_dir_path;
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
nlohmann::json collection_schema = R"({
"name": "companies",
"fields": [
{"name": "name", "type": "string"}
]
})"_json;
store = new Store(state_dir_path);
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.create_collection(collection_schema);
PersonalizationModelManager::init(store);
}
void TearDown() override {
std::string test_dir = "/tmp/typesense_test";
system(("rm -rf " + test_dir).c_str());
collectionManager.dispose();
delete store;
}
nlohmann::json create_valid_model(const std::string& id = "") {
nlohmann::json model;
model["id"] = id;
model["name"] = "ts/tyrec-1";
model["type"] = "recommendation";
model["collection"] = "companies";
return model;
}
std::string get_onnx_model_archive() {
std::string content = "This is a sample ONNX model content";
std::string filename = (temp_dir + "/model.onnx");
std::ofstream file(filename);
file << content;
file.close();
std::string archive_name = (temp_dir + "/model.tar.gz");
std::string command = "tar -czf " + archive_name + " -C " + temp_dir + " model.onnx";
system(command.c_str());
std::ifstream archive_file(archive_name, std::ios::binary);
std::string archive_content((std::istreambuf_iterator<char>(archive_file)), std::istreambuf_iterator<char>());
archive_file.close();
std::filesystem::remove(filename);
std::filesystem::remove(archive_name);
return archive_content;
}
};
TEST_F(PersonalizationModelManagerTest, AddModelSuccess) {
nlohmann::json model = create_valid_model("test_id");
std::string model_data = get_onnx_model_archive();
auto result = PersonalizationModelManager::add_model(model, "test_id", true, model_data);
ASSERT_TRUE(result.ok());
ASSERT_FALSE(result.get().empty());
}
TEST_F(PersonalizationModelManagerTest, AddModelDuplicate) {
nlohmann::json model = create_valid_model("test_id");
auto result1 = PersonalizationModelManager::add_model(model, "test_id", true);
ASSERT_FALSE(result1.ok());
ASSERT_EQ(result1.code(), 409);
ASSERT_EQ(result1.error(), "Model id already exists");
}
TEST_F(PersonalizationModelManagerTest, GetModelSuccess) {
auto get_result = PersonalizationModelManager::get_model("test_id");
ASSERT_TRUE(get_result.ok());
ASSERT_EQ(get_result.get()["id"], "test_id");
}
TEST_F(PersonalizationModelManagerTest, GetModelNotFound) {
auto result = PersonalizationModelManager::get_model("nonexistent");
ASSERT_FALSE(result.ok());
ASSERT_EQ(result.code(), 404);
ASSERT_EQ(result.error(), "Model not found");
}
TEST_F(PersonalizationModelManagerTest, DeleteModelSuccess) {
auto delete_result = PersonalizationModelManager::delete_model("test_id");
ASSERT_TRUE(delete_result.ok());
ASSERT_EQ(delete_result.get()["id"], "test_id");
auto get_result = PersonalizationModelManager::get_model("test_id");
ASSERT_FALSE(get_result.ok());
ASSERT_EQ(get_result.code(), 404);
ASSERT_EQ(get_result.error(), "Model not found");
}
TEST_F(PersonalizationModelManagerTest, DeleteModelNotFound) {
auto result = PersonalizationModelManager::delete_model("nonexistent");
ASSERT_FALSE(result.ok());
ASSERT_EQ(result.code(), 404);
ASSERT_EQ(result.error(), "Model not found");
}
TEST_F(PersonalizationModelManagerTest, GetAllModelsEmpty) {
auto result = PersonalizationModelManager::get_all_models();
ASSERT_TRUE(result.ok());
ASSERT_TRUE(result.get().empty());
}
TEST_F(PersonalizationModelManagerTest, GetAllModelsWithData) {
nlohmann::json model1 = create_valid_model("test_id1");
nlohmann::json model2 = create_valid_model("test_id2");
PersonalizationModelManager::add_model(model1, "test_id1", true, get_onnx_model_archive());
PersonalizationModelManager::add_model(model2, "test_id2", true, get_onnx_model_archive());
auto result = PersonalizationModelManager::get_all_models();
ASSERT_TRUE(result.ok());
ASSERT_EQ(result.get().size(), 2);
}
TEST_F(PersonalizationModelManagerTest, UpdateModelSuccess) {
nlohmann::json model = create_valid_model("test_id");
auto add_result = PersonalizationModelManager::add_model(model, "test_id", true, get_onnx_model_archive());
ASSERT_TRUE(add_result.ok());
nlohmann::json update;
update["name"] = "ts/tyrec-1";
auto update_result = PersonalizationModelManager::update_model("test_id", update, "");
ASSERT_TRUE(update_result.ok());
ASSERT_EQ(update_result.get()["name"], "ts/tyrec-1");
}
TEST_F(PersonalizationModelManagerTest, UpdateModelNotFound) {
nlohmann::json update;
update["name"] = "ts/tyrec-1";
auto result = PersonalizationModelManager::update_model("nonexistent", update, "");
ASSERT_FALSE(result.ok());
ASSERT_EQ(result.code(), 404);
ASSERT_EQ(result.error(), "Model not found");
}
TEST_F(PersonalizationModelManagerTest, UpdateModelInvalidData) {
nlohmann::json update;
update["name"] = "invalid/name";
auto update_result = PersonalizationModelManager::update_model("test_id", update, "");
ASSERT_FALSE(update_result.ok());
ASSERT_EQ(update_result.code(), 400);
ASSERT_EQ(update_result.error(), "Model namespace must be 'ts'.");
}
| 6,589
|
C++
|
.cpp
| 144
| 40.020833
| 118
| 0.674357
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,725
|
collection_specific_more_test.cpp
|
typesense_typesense/test/collection_specific_more_test.cpp
|
#include <gtest/gtest.h>
#include <string>
#include <vector>
#include <fstream>
#include <algorithm>
#include <collection_manager.h>
#include "collection.h"
class CollectionSpecificMoreTest : public ::testing::Test {
protected:
Store *store;
CollectionManager & collectionManager = CollectionManager::get_instance();
std::atomic<bool> quit = false;
std::vector<std::string> query_fields;
std::vector<sort_by> sort_fields;
void setupCollection() {
std::string state_dir_path = "/tmp/typesense_test/collection_specific_more";
LOG(INFO) << "Truncating and creating: " << state_dir_path;
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
store = new Store(state_dir_path);
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
}
virtual void SetUp() {
setupCollection();
}
virtual void TearDown() {
collectionManager.dispose();
delete store;
}
};
TEST_F(CollectionSpecificMoreTest, MaxCandidatesShouldBeRespected) {
std::vector<field> fields = {field("company", field_types::STRING, true)};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
for (size_t i = 0; i < 200; i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["company"] = "prefix"+std::to_string(i);
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("prefix", {"company"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, 0,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback, 1000).get();
ASSERT_EQ(200, results["found"].get<size_t>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificMoreTest, PrefixExpansionWhenExactMatchExists) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("author", field_types::STRING, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "The Little Prince [by] Antoine de Saint Exupéry : teacher guide";
doc1["author"] = "Barbara Valdez";
nlohmann::json doc2;
doc2["id"] = "1";
doc2["title"] = "Little Prince";
doc2["author"] = "Antoine de Saint-Exupery";
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
auto results = coll1->search("little prince antoine saint", {"title", "author"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 5, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true).get();
ASSERT_EQ(2, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificMoreTest, PrefixExpansionOnSingleField) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::string> tokens = {
"Mark Jack", "John Jack", "John James", "John Joseph", "John Jim", "John Jordan",
"Mark Nicholas", "Mark Abbey", "Mark Boucher", "Mark Bicks", "Mark Potter"
};
for(size_t i = 0; i < tokens.size(); i++) {
std::string title = tokens[i];
nlohmann::json doc;
doc["title"] = title;
doc["points"] = i;
coll1->add(doc.dump());
}
// max candidates as default 4
auto results = coll1->search("mark j", {"title"}, "", {}, {}, {0}, 100, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
results = coll1->search("mark b", {"title"}, "", {}, {}, {0}, 100, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("9", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("8", results["hits"][1]["document"]["id"].get<std::string>());
results = coll1->search("mark b", {"title"}, "points: < 9", {}, {}, {0}, 100, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("8", results["hits"][0]["document"]["id"].get<std::string>());
}
TEST_F(CollectionSpecificMoreTest, TypoCorrectionShouldUseMaxCandidates) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
for(size_t i = 0; i < 20; i++) {
nlohmann::json doc;
doc["title"] = "Independent" + std::to_string(i);
doc["points"] = i;
coll1->add(doc.dump());
}
size_t max_candidates = 20;
auto results = coll1->search("independent", {"title"}, "", {}, {}, {2}, 30, 1, FREQUENCY, {false}, 0,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000*1000, 4, 7,
off, max_candidates).get();
ASSERT_EQ(20, results["hits"].size());
}
TEST_F(CollectionSpecificMoreTest, PrefixExpansionOnMultiField) {
Collection *coll1;
std::vector<field> fields = {field("location", field_types::STRING, false),
field("name", field_types::STRING, false),
field("points", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::string> names = {
"John Stewart", "John Smith", "John Scott", "John Stone", "John Romero", "John Oliver", "John Adams"
};
std::vector<std::string> locations = {
"Switzerland", "Seoul", "Sydney", "Surat", "Stockholm", "Salem", "Sevilla"
};
for(size_t i = 0; i < names.size(); i++) {
nlohmann::json doc;
doc["location"] = locations[i];
doc["name"] = names[i];
doc["points"] = i;
coll1->add(doc.dump());
}
auto results = coll1->search("john s", {"location", "name"}, "", {}, {}, {0}, 100, 1, MAX_SCORE, {true},
0, spp::sparse_hash_set<std::string>(), spp::sparse_hash_set<std::string>(), 10, "",
30, 4, "title", 20, {}, {}, {}, 0, "<mark>", "</mark>", {}, 1000, true, false,
true, "", false, 6000*1000, 4, 7, off, 4).get();
// tokens are ordered by max_score and prefix continuation on the same field is prioritized
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("3", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("2", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][2]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][3]["document"]["id"].get<std::string>());
// when more than 4 candidates are requested, "s" matches with other fields are returned
results = coll1->search("john s", {"location", "name"}, "", {}, {}, {0}, 100, 1, MAX_SCORE, {true},
0, spp::sparse_hash_set<std::string>(), spp::sparse_hash_set<std::string>(), 10, "",
30, 4, "title", 20, {}, {}, {}, 0, "<mark>", "</mark>", {}, 1000, true, false,
true, "", false, 6000*1000, 4, 7, off, 10).get();
ASSERT_EQ(7, results["hits"].size());
ASSERT_EQ("3", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("2", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][2]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][3]["document"]["id"].get<std::string>());
ASSERT_EQ("6", results["hits"][4]["document"]["id"].get<std::string>());
}
TEST_F(CollectionSpecificMoreTest, ArrayElementMatchShouldBeMoreImportantThanTotalMatch) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("author", field_types::STRING, false),
field("tags", field_types::STRING_ARRAY, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "Harry Potter and the Prisoner of Azkaban";
doc1["author"] = "Rowling";
doc1["tags"] = {"harry", ""};
nlohmann::json doc2;
doc2["id"] = "1";
doc2["title"] = "Fantastic beasts and where to find them";
doc2["author"] = "Rowling";
doc2["tags"] = {"harry", "potter", "prisoner", "azkaban", "beasts", "guide", "rowling"};
nlohmann::json doc3;
doc3["id"] = "2";
doc3["title"] = "Fantastic beasts and where to find them";
doc3["author"] = "Rowling";
doc3["tags"] = {"harry potter", "prisoner azkaban", "beasts", "guide", "rowling"};
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
auto results = coll1->search("harry potter rowling prisoner azkaban", {"title", "author", "tags"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 5, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true).get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("2", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][2]["document"]["id"].get<std::string>());
}
TEST_F(CollectionSpecificMoreTest, ArrayMatchAcrossElementsMustNotMatter) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("author", field_types::STRING, false),
field("tags", field_types::STRING_ARRAY, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "Por do sol immateur";
doc1["author"] = "Vermelho";
doc1["tags"] = {"por do sol", "immateur", "gemsor", "praia", "sol", "vermelho", "suyay"};
nlohmann::json doc2;
doc2["id"] = "1";
doc2["title"] = "Sunset Rising";
doc2["author"] = "Vermelho";
doc2["tags"] = {"sunset", "por do sol", "praia", "somao", "vermelho"};
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
auto results = coll1->search("praia por sol vermelho", {"title", "author", "tags"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 5, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][1]["document"]["id"].get<std::string>());
}
TEST_F(CollectionSpecificMoreTest, MatchedSegmentMoreImportantThanTotalMatches) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("author", field_types::STRING, false)};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "One Two Three Four Five Six Seven Eight Nine Ten Eleven Twelve Thirteen Fourteen";
doc1["author"] = "Rowling";
nlohmann::json doc2;
doc2["id"] = "1";
doc2["title"] = "One Four Five Six Seven Eight Nine Ten Eleven Twelve Thirteen Fourteen Three Rowling";
doc2["author"] = "Two";
nlohmann::json doc3;
doc3["id"] = "2";
doc3["title"] = "One Three Four Five Six Seven Eight Nine Ten Eleven Twelve Thirteen Fourteen Two Rowling";
doc3["author"] = "Foo";
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
auto results = coll1->search("one two three rowling", {"title", "author"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 5, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true).get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("2", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][2]["document"]["id"].get<std::string>());
}
TEST_F(CollectionSpecificMoreTest, VerbatimMatchNotOnPartialTokenMatch) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("tags", field_types::STRING_ARRAY, false)};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "Thirteen Fourteen";
doc1["tags"] = {"foo", "bar", "Hundred", "Thirteen Fourteen"};
nlohmann::json doc2;
doc2["id"] = "1";
doc2["title"] = "One Eleven Thirteen Fourteen Three";
doc2["tags"] = {"foo", "bar", "Hundred", "One Eleven Thirteen Fourteen Three"};
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
auto results = coll1->search("hundred thirteen fourteen", {"tags"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 5, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("1", results["hits"][1]["document"]["id"].get<std::string>().c_str());
}
TEST_F(CollectionSpecificMoreTest, SortByStringEmptyValuesConfigFirstField) {
std::vector<field> fields = {field("points", field_types::INT32, false, true),
field("points2", field_types::INT32, false, true),
field("points3", field_types::INT32, false, true)};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
for(size_t i = 0; i < 4; i++) {
nlohmann::json doc;
if(i == 2) {
doc["points"] = nullptr;
} else {
doc["points"] = i;
}
doc["points2"] = 100;
doc["points3"] = 100;
coll1->add(doc.dump());
}
// without any order config: missing integers always end up last
sort_fields = {sort_by("points", "asc"),};
auto results = coll1->search("*", {}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][3]["document"]["id"].get<std::string>());
sort_fields = {sort_by("points", "desc"),};
results = coll1->search("*", {}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][3]["document"]["id"].get<std::string>());
// ascending
sort_fields = {sort_by("points(missing_values: first)", "ASC"),};
results = coll1->search("*", {}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
sort_fields = {sort_by("points(missing_values: last)", "ASC"),};
results = coll1->search("*", {}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][3]["document"]["id"].get<std::string>());
// descending
sort_fields = {sort_by("points(missing_values: first)", "DESC"),};
results = coll1->search("*", {}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
sort_fields = {sort_by("points(missing_values: last)", "DESC"),};
results = coll1->search("*", {}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][3]["document"]["id"].get<std::string>());
// bad syntax
sort_fields = {sort_by("points(foo: bar)", "desc"),};
auto res_op = coll1->search("*", {}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true});
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Bad syntax for sorting field `points`", res_op.error());
sort_fields = {sort_by("points(missing_values: bar)", "desc"),};
res_op = coll1->search("*", {}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true});
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Bad syntax for sorting field `points`", res_op.error());
}
TEST_F(CollectionSpecificMoreTest, SortByStringEmptyValuesConfigSecondField) {
std::vector<field> fields = {field("points", field_types::INT32, false, true),
field("points2", field_types::INT32, false, true),
field("points3", field_types::INT32, false, true)};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
for(size_t i = 0; i < 4; i++) {
nlohmann::json doc;
if(i == 2) {
doc["points"] = nullptr;
} else {
doc["points"] = i;
}
doc["points2"] = 100;
doc["points3"] = 100;
coll1->add(doc.dump());
}
// without any order config: missing integers always end up last
sort_fields = {sort_by("points2", "asc"),sort_by("points", "asc")};
auto results = coll1->search("*", {}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][3]["document"]["id"].get<std::string>());
sort_fields = {sort_by("points2", "asc"),sort_by("points", "desc"),};
results = coll1->search("*", {}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][3]["document"]["id"].get<std::string>());
// ascending
sort_fields = {sort_by("points2", "asc"),sort_by("points(missing_values: first)", "ASC"),};
results = coll1->search("*", {}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
sort_fields = {sort_by("points2", "asc"),sort_by("points(missing_values: last)", "ASC"),};
results = coll1->search("*", {}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][3]["document"]["id"].get<std::string>());
// descending
sort_fields = {sort_by("points2", "asc"),sort_by("points(missing_values: first)", "DESC"),};
results = coll1->search("*", {}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
sort_fields = {sort_by("points2", "asc"),sort_by("points(missing_values: last)", "DESC"),};
results = coll1->search("*", {}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][3]["document"]["id"].get<std::string>());
}
TEST_F(CollectionSpecificMoreTest, SortByStringEmptyValuesConfigThirdField) {
std::vector<field> fields = {field("points", field_types::INT32, false, true),
field("points2", field_types::INT32, false, true),
field("points3", field_types::INT32, false, true)};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
for(size_t i = 0; i < 4; i++) {
nlohmann::json doc;
if(i == 2) {
doc["points"] = nullptr;
} else {
doc["points"] = i;
}
doc["points2"] = 100;
doc["points3"] = 100;
coll1->add(doc.dump());
}
// without any order config: missing integers always end up last
sort_fields = {sort_by("points2", "asc"),sort_by("points3", "asc"),sort_by("points", "asc")};
auto results = coll1->search("*", {}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][3]["document"]["id"].get<std::string>());
sort_fields = {sort_by("points2", "asc"),sort_by("points3", "asc"),sort_by("points", "desc"),};
results = coll1->search("*", {}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][3]["document"]["id"].get<std::string>());
// ascending
sort_fields = {sort_by("points2", "asc"),sort_by("points3", "asc"),sort_by("points(missing_values: first)", "ASC"),};
results = coll1->search("*", {}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
sort_fields = {sort_by("points2", "asc"),sort_by("points3", "asc"),sort_by("points(missing_values: last)", "ASC"),};
results = coll1->search("*", {}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][3]["document"]["id"].get<std::string>());
// descending
sort_fields = {sort_by("points2", "asc"),sort_by("points3", "asc"),sort_by("points(missing_values: first)", "DESC"),};
results = coll1->search("*", {}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
sort_fields = {sort_by("points2", "asc"),sort_by("points3", "asc"),sort_by("points(missing_values: last)", "DESC"),};
results = coll1->search("*", {}, "", {}, sort_fields, {0}, 10, 1, MAX_SCORE, {true}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("2", results["hits"][3]["document"]["id"].get<std::string>());
}
TEST_F(CollectionSpecificMoreTest, WrongTypoCorrection) {
std::vector<field> fields = {field("title", field_types::STRING, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "Gold plated arvin";
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
auto results = coll1->search("earrings", {"title"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 5, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true).get();
ASSERT_EQ(0, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificMoreTest, PositionalTokenRanking) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::string> tokens = {
"Alpha Beta Gamma", "Omega Alpha Theta", "Omega Theta Alpha", "Indigo Omega Theta Alpha"
};
for(size_t i = 0; i < tokens.size(); i++) {
std::string title = tokens[i];
nlohmann::json doc;
doc["title"] = title;
doc["points"] = i;
coll1->add(doc.dump());
}
auto results = coll1->search("alpha", {"title"}, "", {}, {}, {0}, 100, 1, MAX_SCORE, {true},
Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("2", results["hits"][2]["document"]["id"].get<std::string>());
ASSERT_EQ("3", results["hits"][3]["document"]["id"].get<std::string>());
results = coll1->search("alpha", {"title"}, "", {}, {}, {0}, 100, 1, MAX_SCORE, {true},
Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, false).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("3", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("2", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][2]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][3]["document"]["id"].get<std::string>());
results = coll1->search("theta alpha", {"title"}, "", {}, {}, {0}, 100, 1, MAX_SCORE, {true},
Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, false).get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ("3", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("2", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][2]["document"]["id"].get<std::string>());
results = coll1->search("theta alpha", {"title"}, "", {}, {}, {0}, 100, 1, MAX_SCORE, {true},
Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true).get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("3", results["hits"][2]["document"]["id"].get<std::string>());
}
TEST_F(CollectionSpecificMoreTest, PositionalTokenRankingWithArray) {
Collection *coll1;
std::vector<field> fields = {field("tags", field_types::STRING_ARRAY, false),
field("points", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
nlohmann::json doc1;
doc1["tags"] = {"alpha foo", "gamma", "beta alpha"};
doc1["points"] = 100;
nlohmann::json doc2;
doc2["tags"] = {"omega", "omega beta alpha"};
doc2["points"] = 200;
coll1->add(doc1.dump());
coll1->add(doc2.dump());
auto results = coll1->search("alpha", {"tags"}, "", {}, {}, {0}, 100, 1, MAX_SCORE, {true},
Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, false).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
results = coll1->search("alpha", {"tags"}, "", {}, {}, {0}, 100, 1, MAX_SCORE, {true},
Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][1]["document"]["id"].get<std::string>());
}
TEST_F(CollectionSpecificMoreTest, ExactFilteringOnArray) {
Collection *coll1;
std::vector<field> fields = {field("tags", field_types::STRING_ARRAY, false),
field("points", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
nlohmann::json doc1;
doc1["tags"] = {"§ 23",
"§ 34d EStG",
"§ 23 Satz EStG"};
doc1["points"] = 100;
coll1->add(doc1.dump());
auto results = coll1->search("*", {"tags"}, "tags:=§ 23 EStG", {}, {}, {0}, 100, 1, MAX_SCORE, {true},
Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, false).get();
ASSERT_EQ(0, results["hits"].size());
results = coll1->search("*", {"tags"}, "tags:=§ 23", {}, {}, {0}, 100, 1, MAX_SCORE, {true},
Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, false).get();
ASSERT_EQ(1, results["hits"].size());
results = coll1->search("*", {"tags"}, "tags:=§ 23 Satz", {}, {}, {0}, 100, 1, MAX_SCORE, {true},
Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, false).get();
ASSERT_EQ(0, results["hits"].size());
}
TEST_F(CollectionSpecificMoreTest, ExactFilteringOnArray2) {
auto schema = R"({
"name": "coll1",
"fields": [
{"name": "capability", "type": "string[]", "facet": true}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc1;
doc1["capability"] = {"Encoding capabilities for network communications",
"Obfuscation capabilities"};
coll1->add(doc1.dump());
auto results = coll1->search("*", {}, "capability:=Encoding capabilities", {}, {}, {0}, 100, 1, MAX_SCORE, {true},
Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, false).get();
ASSERT_EQ(0, results["hits"].size());
}
TEST_F(CollectionSpecificMoreTest, SplitTokensCrossFieldMatching) {
std::vector<field> fields = {field("name", field_types::STRING, false),
field("brand", field_types::STRING, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "Vitamin C1";
doc1["brand"] = "Paulas Choice";
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
auto results = coll1->search("paulaschoice c1", {"name", "brand"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
0).get();
ASSERT_EQ(1, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificMoreTest, PrefixSearchOnSpecificFields) {
std::vector<field> fields = {field("name", field_types::STRING, false),
field("brand", field_types::STRING, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
// atleast 4 tokens that begin with "girl" to trigger this regression
std::vector<std::string> names = {
"Jungle Girl", "Jungle Girlz", "Jam Foo1", "Jam Foo2", "Jam Foo3", "Jam Foo4", "Jam Foo"
};
std::vector<std::string> brands = {
"Foobar", "Foobar2", "Girlx", "Girly", "Girlz", "Girlz", "Girlzz"
};
for(size_t i = 0; i < names.size(); i++) {
nlohmann::json doc1;
doc1["name"] = names[i];
doc1["brand"] = brands[i];
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
}
auto results = coll1->search("jungle girl", {"name", "brand"},
"", {}, {}, {0}, 10,
1, FREQUENCY, {false, true},
0).get();
ASSERT_EQ(1, results["hits"].size());
results = coll1->search("jam foo", {"name"},
"", {}, {}, {0}, 10,
1, FREQUENCY, {true},
0).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ("6", results["hits"][0]["document"]["id"].get<std::string>());
results = coll1->search("jam foo", {"name"},
"", {}, {}, {0}, 10,
1, FREQUENCY, {false},
0).get();
ASSERT_EQ(1, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificMoreTest, OrderWithThreeSortFields) {
std::vector<field> fields = {field("name", field_types::STRING, false),
field("type", field_types::INT32, false),
field("valid_from", field_types::INT64, false),
field("created_at", field_types::INT64, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
nlohmann::json doc1;
doc1["name"] = "should be 1st";
doc1["type"] = 2;
doc1["valid_from"] = 1655741107972;
doc1["created_at"] = 1655741107724;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
doc1["name"] = "should be 2nd";
doc1["type"] = 1;
doc1["valid_from"] = 1656309617303;
doc1["created_at"] = 1656309617194;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
doc1["name"] = "should be 3rd";
doc1["type"] = 0;
doc1["valid_from"] = 0;
doc1["created_at"] = 1656309677131;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
sort_fields = {sort_by("type", "desc"), sort_by("valid_from", "desc"), sort_by("created_at", "desc")};
auto results = coll1->search("s", {"name"},
"", {}, sort_fields, {2}, 10,
1, FREQUENCY, {true},
0).get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("2", results["hits"][2]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificMoreTest, LongString) {
std::vector<field> fields = {field("name", field_types::STRING, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
std::string name;
for(size_t i = 0; i < 100; i++) {
name += "foo" + std::to_string(i) + " ";
}
nlohmann::json doc1;
doc1["name"] = name;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
auto results = coll1->search(name, {"name"},
"", {}, sort_fields, {2}, 10,
1, FREQUENCY, {true},
0).get();
ASSERT_EQ(1, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificMoreTest, RelevanceConsiderAllFields) {
std::vector<field> fields = {field("f1", field_types::STRING, false),
field("f2", field_types::STRING, false),
field("f3", field_types::STRING, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
nlohmann::json doc1;
doc1["f1"] = "alpha";
doc1["f2"] = "alpha";
doc1["f3"] = "alpha";
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
doc1["f1"] = "alpha";
doc1["f2"] = "alpha";
doc1["f3"] = "beta";
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
doc1["f1"] = "alpha";
doc1["f2"] = "beta";
doc1["f3"] = "gamma";
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
auto results = coll1->search("alpha", {"f1", "f2", "f3"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
0, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {3, 2, 1}).get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("2", results["hits"][2]["document"]["id"].get<std::string>());
// verify match score component values
ASSERT_EQ("578730123365711899", results["hits"][0]["text_match_info"]["score"].get<std::string>());
ASSERT_EQ(3, results["hits"][0]["text_match_info"]["fields_matched"].get<size_t>());
ASSERT_EQ(2, results["hits"][1]["text_match_info"]["fields_matched"].get<size_t>());
ASSERT_EQ(1, results["hits"][2]["text_match_info"]["fields_matched"].get<size_t>());
ASSERT_EQ(1, results["hits"][0]["text_match_info"]["tokens_matched"].get<size_t>());
ASSERT_EQ(1, results["hits"][1]["text_match_info"]["tokens_matched"].get<size_t>());
ASSERT_EQ(1, results["hits"][2]["text_match_info"]["tokens_matched"].get<size_t>());
ASSERT_EQ("1108091339008", results["hits"][0]["text_match_info"]["best_field_score"].get<std::string>());
ASSERT_EQ("1108091339008", results["hits"][1]["text_match_info"]["best_field_score"].get<std::string>());
ASSERT_EQ("1108091339008", results["hits"][2]["text_match_info"]["best_field_score"].get<std::string>());
ASSERT_EQ(3, results["hits"][0]["text_match_info"]["best_field_weight"].get<size_t>());
ASSERT_EQ(3, results["hits"][1]["text_match_info"]["best_field_weight"].get<size_t>());
ASSERT_EQ(3, results["hits"][2]["text_match_info"]["best_field_weight"].get<size_t>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificMoreTest, CrossFieldWeightIsNotAugmentated) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("type", field_types::STRING, false)};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
nlohmann::json doc1;
doc1["title"] = "Nike Shoerack";
doc1["type"] = "shoe_rack";
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
doc1["title"] = "Nike Air Force 1";
doc1["type"] = "shoe";
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
auto results = coll1->search("nike shoe", {"type", "title"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
0, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {5, 1}).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results["hits"][1]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificMoreTest, HighlightWithAccentedChars) {
std::vector<field> fields = {field(".*", field_types::AUTO, false)};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "", 0, field_types::AUTO, {}, {}, true).get();
auto nested_doc = R"({
"title": "Rāpeti Early Learning Centre",
"companies": [
{"title": "Rāpeti Early Learning Centre"}
]
})"_json;
ASSERT_TRUE(coll1->add(nested_doc.dump()).ok());
auto results = coll1->search("rap", {"title", "companies"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("<mark>Rāp</mark>eti Early Learning Centre", results["hits"][0]["highlights"][0]["snippet"].get<std::string>());
auto highlight_doc = R"({
"companies": [
{
"title": {
"matched_tokens": [
"Rāp"
],
"snippet": "<mark>Rāp</mark>eti Early Learning Centre"
}
}
],
"title": {
"matched_tokens": [
"Rāp"
],
"snippet": "<mark>Rāp</mark>eti Early Learning Centre"
}
})"_json;
ASSERT_EQ(highlight_doc.dump(), results["hits"][0]["highlight"].dump());
}
TEST_F(CollectionSpecificMoreTest, FieldWeightNormalization) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("brand", field_types::STRING, false),
field("type", field_types::STRING, false)};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields).get();
std::vector<search_field_t> raw_search_fields = {
search_field_t("title", "title", 110, 2, true, off),
search_field_t("brand", "brand", 25, 2, true, off),
search_field_t("type", "type", 55, 2, true, off),
};
std::vector<uint32_t> query_by_weights = {110, 25, 55};
std::vector<search_field_t> weighted_search_fields;
coll1->process_search_field_weights(raw_search_fields, query_by_weights, weighted_search_fields);
ASSERT_EQ(3, weighted_search_fields.size());
ASSERT_EQ("title", weighted_search_fields[0].name);
ASSERT_EQ("type", weighted_search_fields[1].name);
ASSERT_EQ("brand", weighted_search_fields[2].name);
ASSERT_EQ(15, weighted_search_fields[0].weight);
ASSERT_EQ(14, weighted_search_fields[1].weight);
ASSERT_EQ(13, weighted_search_fields[2].weight);
// same weights
weighted_search_fields.clear();
query_by_weights = {15, 15, 15};
raw_search_fields = {
search_field_t{"title", "title", 15, 2, true, off},
search_field_t{"brand", "brand", 15, 2, true, off},
search_field_t{"type", "type", 15, 2, true, off},
};
coll1->process_search_field_weights(raw_search_fields, query_by_weights, weighted_search_fields);
ASSERT_EQ("title", weighted_search_fields[0].name);
ASSERT_EQ("brand", weighted_search_fields[1].name);
ASSERT_EQ("type", weighted_search_fields[2].name);
ASSERT_EQ(15, weighted_search_fields[0].weight);
ASSERT_EQ(15, weighted_search_fields[1].weight);
ASSERT_EQ(15, weighted_search_fields[2].weight);
// same weights large
weighted_search_fields.clear();
query_by_weights = {800, 800, 800};
raw_search_fields = {
search_field_t{"title", "title", 800, 2, true, off},
search_field_t{"brand", "brand", 800, 2, true, off},
search_field_t{"type", "type", 800, 2, true, off},
};
coll1->process_search_field_weights(raw_search_fields, query_by_weights, weighted_search_fields);
ASSERT_EQ("title", weighted_search_fields[0].name);
ASSERT_EQ("brand", weighted_search_fields[1].name);
ASSERT_EQ("type", weighted_search_fields[2].name);
ASSERT_EQ(15, weighted_search_fields[0].weight);
ASSERT_EQ(15, weighted_search_fields[1].weight);
ASSERT_EQ(15, weighted_search_fields[2].weight);
// weights desc ordered but exceed max weight
weighted_search_fields.clear();
query_by_weights = {603, 602, 601};
raw_search_fields = {
search_field_t{"title", "title", 603, 2, true, off},
search_field_t{"brand", "brand", 602, 2, true, off},
search_field_t{"type", "type", 601, 2, true, off},
};
coll1->process_search_field_weights(raw_search_fields, query_by_weights, weighted_search_fields);
ASSERT_EQ("title", weighted_search_fields[0].name);
ASSERT_EQ("brand", weighted_search_fields[1].name);
ASSERT_EQ("type", weighted_search_fields[2].name);
ASSERT_EQ(15, weighted_search_fields[0].weight);
ASSERT_EQ(14, weighted_search_fields[1].weight);
ASSERT_EQ(13, weighted_search_fields[2].weight);
// number of fields > 15 (must cap least important fields to weight 0)
raw_search_fields.clear();
weighted_search_fields.clear();
query_by_weights.clear();
for(size_t i = 0; i < 17; i++) {
auto fname = "field" + std::to_string(17 - i);
raw_search_fields.push_back(search_field_t{fname, fname, 17 - i, 2, true, off});
query_by_weights.push_back(17 - i);
}
coll1->process_search_field_weights(raw_search_fields, query_by_weights, weighted_search_fields);
ASSERT_EQ("field3", weighted_search_fields[14].name);
ASSERT_EQ("field2", weighted_search_fields[15].name);
ASSERT_EQ("field1", weighted_search_fields[16].name);
ASSERT_EQ(1, weighted_search_fields[14].weight);
ASSERT_EQ(0, weighted_search_fields[15].weight);
ASSERT_EQ(0, weighted_search_fields[16].weight);
// when weights are not given
raw_search_fields.clear();
weighted_search_fields.clear();
query_by_weights.clear();
for(size_t i = 0; i < 17; i++) {
auto field_name = "field" + std::to_string(17 - i);
raw_search_fields.push_back(search_field_t{"field" + std::to_string(17 - i),
field_name, 0, 2, true, off});
}
coll1->process_search_field_weights(raw_search_fields, query_by_weights, weighted_search_fields);
ASSERT_EQ("field3", weighted_search_fields[14].name);
ASSERT_EQ("field2", weighted_search_fields[15].name);
ASSERT_EQ("field1", weighted_search_fields[16].name);
ASSERT_EQ(1, weighted_search_fields[14].weight);
ASSERT_EQ(0, weighted_search_fields[15].weight);
ASSERT_EQ(0, weighted_search_fields[16].weight);
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSpecificMoreTest, SearchingForMinusCharacter) {
// when the minus character is part of symbols_to_index it should not be used as exclusion operator
std::vector<field> fields = {field("name", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection(
"coll1", 1, fields, "points", 0, "", {"-"}, {}
).get();
nlohmann::json doc1;
doc1["name"] = "y = -x + 3 + 2 * x";
doc1["points"] = 100;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
doc1["name"] = "foo bar";
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
auto results = coll1->search("-x + 3", {"name"},
"", {}, {}, {0}, 10,
1, FREQUENCY, {true},
0).get();
ASSERT_EQ(1, results["hits"].size());
results = coll1->search("-", {"name"},
"", {}, {}, {0}, 10,
1, FREQUENCY, {true},
0).get();
ASSERT_EQ(1, results["hits"].size());
}
TEST_F(CollectionSpecificMoreTest, UpsertUpdateEmplaceShouldAllRemoveIndex) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title1", "type": "string", "optional": true},
{"name": "title2", "type": "string", "optional": true},
{"name": "title3", "type": "string", "optional": true}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"id": "0",
"title1": "Foo",
"title2": "Bar",
"title3": "Baz",
"data": "abcdefghijk"
})"_json;
ASSERT_TRUE(coll1->add(doc1.dump(), CREATE).ok());
// via upsert
auto doc_update = R"({
"id": "0",
"title2": "Bar",
"title3": "Baz"
})"_json;
ASSERT_TRUE(coll1->add(doc_update.dump(), UPSERT).ok());
auto results = coll1->search("foo", {"title1"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["found"].get<size_t>());
results = coll1->search("bar", {"title2"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(3, results["hits"][0]["document"].size());
// via update, existing index should not be removed because update can send partial doc
doc_update = R"({
"id": "0",
"title3": "Baz"
})"_json;
ASSERT_TRUE(coll1->add(doc_update.dump(), UPDATE).ok());
results = coll1->search("bar", {"title2"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
// via emplace, existing index should not be removed because emplace could send partial doc
doc_update = R"({
"id": "0"
})"_json;
ASSERT_TRUE(coll1->add(doc_update.dump(), EMPLACE).ok());
results = coll1->search("baz", {"title3"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
}
TEST_F(CollectionSpecificMoreTest, UpdateWithEmptyArray) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "tags", "type": "string[]"}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"id": "0",
"tags": ["alpha", "beta", "gamma"]
})"_json;
ASSERT_TRUE(coll1->add(doc1.dump(), CREATE).ok());
auto doc2 = R"({
"id": "1",
"tags": ["one", "two"]
})"_json;
ASSERT_TRUE(coll1->add(doc2.dump(), CREATE).ok());
// via update
auto doc_update = R"({
"id": "0",
"tags": []
})"_json;
ASSERT_TRUE(coll1->add(doc_update.dump(), UPDATE).ok());
auto results = coll1->search("alpha", {"tags"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["found"].get<size_t>());
// via upsert
doc_update = R"({
"id": "1",
"tags": []
})"_json;
ASSERT_TRUE(coll1->add(doc_update.dump(), UPSERT).ok());
results = coll1->search("one", {"tags"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["found"].get<size_t>());
}
TEST_F(CollectionSpecificMoreTest, UpdateArrayWithNullValue) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "tags", "type": "string[]", "optional": true}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"id": "0",
"tags": ["alpha", "beta", "gamma"]
})"_json;
ASSERT_TRUE(coll1->add(doc1.dump(), CREATE).ok());
auto doc2 = R"({
"id": "1",
"tags": ["one", "two"]
})"_json;
ASSERT_TRUE(coll1->add(doc2.dump(), CREATE).ok());
// via update
auto doc_update = R"({
"id": "0",
"tags": null
})"_json;
ASSERT_TRUE(coll1->add(doc_update.dump(), UPDATE).ok());
auto results = coll1->search("alpha", {"tags"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["found"].get<size_t>());
// update document with no value (optional field) with a null value
auto doc3 = R"({
"id": "2"
})"_json;
ASSERT_TRUE(coll1->add(doc3.dump(), CREATE).ok());
results = coll1->search("alpha", {"tags"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["found"].get<size_t>());
doc_update = R"({
"id": "2",
"tags": null
})"_json;
ASSERT_TRUE(coll1->add(doc_update.dump(), UPDATE).ok());
// via upsert
doc_update = R"({
"id": "1",
"tags": null
})"_json;
ASSERT_TRUE(coll1->add(doc_update.dump(), UPSERT).ok());
results = coll1->search("one", {"tags"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["found"].get<size_t>());
}
TEST_F(CollectionSpecificMoreTest, ReplaceArrayElement) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "tags", "type": "string[]"}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"id": "0",
"tags": ["alpha", "beta", "gamma"]
})"_json;
ASSERT_TRUE(coll1->add(doc1.dump(), CREATE).ok());
auto doc2 = R"({
"id": "1",
"tags": ["one", "two", "three"]
})"_json;
ASSERT_TRUE(coll1->add(doc2.dump(), CREATE).ok());
// via update
auto doc_update = R"({
"id": "0",
"tags": ["alpha", "gamma"]
})"_json;
ASSERT_TRUE(coll1->add(doc_update.dump(), UPDATE).ok());
auto results = coll1->search("beta", {"tags"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["found"].get<size_t>());
// via upsert
doc_update = R"({
"id": "1",
"tags": ["one", "three"]
})"_json;
ASSERT_TRUE(coll1->add(doc_update.dump(), UPSERT).ok());
results = coll1->search("two", {"tags"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["found"].get<size_t>());
}
TEST_F(CollectionSpecificMoreTest, UnorderedWeightingOfFields) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"},
{"name": "brand", "type": "string"},
{"name": "sku", "type": "string"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["title"] = "42f05db9-373a-4372-9bd0-ff4b5aaba28d";
doc["brand"] = "brand";
doc["sku"] = "rgx761";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
// with num_typos
auto res_op = coll1->search("rg0761", {"title","brand","sku"}, "", {}, {}, {2,2,0}, 10, 1,
FREQUENCY, {true},
0, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {10, 7, 10});
ASSERT_TRUE(res_op.ok());
ASSERT_EQ(0, res_op.get()["hits"].size());
// with prefix
res_op = coll1->search("rgx", {"title","brand","sku"}, "", {}, {}, {2,2,0}, 10, 1,
FREQUENCY, {true, true, false},
0, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {10, 7, 10});
ASSERT_TRUE(res_op.ok());
ASSERT_EQ(0, res_op.get()["hits"].size());
}
TEST_F(CollectionSpecificMoreTest, IncludeFieldsOnlyId) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["title"] = "Sample Title";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto res_op = coll1->search("*", {}, "", {}, {}, {2}, 10, 1,
FREQUENCY, {true}, 0, {"id"});
ASSERT_TRUE(res_op.ok());
auto res = res_op.get();
ASSERT_EQ(1, res["hits"][0]["document"].size());
ASSERT_EQ("0", res["hits"][0]["document"]["id"].get<std::string>());
}
TEST_F(CollectionSpecificMoreTest, QueryWithOnlySpecialChars) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["title"] = "Sample Title";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto res_op = coll1->search("--", {"title"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {true});
ASSERT_TRUE(res_op.ok());
auto res = res_op.get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ("0", res["hits"][0]["document"]["id"].get<std::string>());
}
TEST_F(CollectionSpecificMoreTest, HandleStringFieldWithObjectValueEarlier) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": ".*", "type": "auto"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
// index a "bad" document with title as an object field
nlohmann::json doc;
doc["id"] = "12345";
doc["title"] = R"({"id": 12345})"_json;
auto add_op = coll1->add(doc.dump());
ASSERT_TRUE(add_op.ok());
// now add another document where `title` is a string
doc["id"] = "12346";
doc["title"] = "Title 2";
add_op = coll1->add(doc.dump());
ASSERT_TRUE(add_op.ok());
// try to update the former document
doc["id"] = "12345";
doc["title"] = "Title 1";
add_op = coll1->add(doc.dump(), UPSERT);
ASSERT_TRUE(add_op.ok());
}
TEST_F(CollectionSpecificMoreTest, CopyDocHelper) {
std::vector<highlight_field_t> hightlight_items = {
highlight_field_t("foo.bar", false, false, true),
highlight_field_t("baz", false, false, true),
highlight_field_t("not-found", false, false, true),
};
nlohmann::json src = R"({
"baz": {"name": "John"},
"foo.bar": 12345
})"_json;
nlohmann::json dst;
Collection::copy_highlight_doc(hightlight_items, true, src, dst);
ASSERT_EQ(2, dst.size());
ASSERT_EQ(1, dst.count("baz"));
ASSERT_EQ(1, dst.count("foo.bar"));
// when both nested & flattened forms are present, copy only flat form for collection without nesting enabled
src = R"({
"baz": {"name": "John"},
"baz.name": "John"
})"_json;
dst.clear();
hightlight_items = {
highlight_field_t("baz.name", false, false, true),
};
Collection::copy_highlight_doc(hightlight_items, false, src, dst);
ASSERT_EQ(1, dst.size());
ASSERT_EQ(1, dst.count("baz.name"));
}
TEST_F(CollectionSpecificMoreTest, HighlightFieldWithBothFlatAndNestedForm) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "name.first", "type": "string"}
]
})"_json;
Collection *coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["name.first"] = "John";
doc["name"]["first"] = "John";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto res = coll1->search("john", {"name.first"}, "", {}, {}, {2}, 10, 1,
FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>()).get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ("<mark>John</mark>", res["hits"][0]["highlight"]["name.first"]["snippet"].get<std::string>());
}
TEST_F(CollectionSpecificMoreTest, HighlightWordWithSymbols) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
Collection *coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["title"] = "var(--icon-secondary-neutral); For components with";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto res = coll1->search("favicon", {"title"}, "", {}, {}, {2}, 10, 1,
FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "locations.address",
20, {}, {}, {}, 0, "<mark>", "</mark>", {}, 1000, true, false, true,
"title").get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ("<mark>var(--icon</mark>-secondary-neutral); For components with",
res["hits"][0]["highlight"]["title"]["snippet"].get<std::string>());
}
TEST_F(CollectionSpecificMoreTest, HighlightObjectShouldBeEmptyWhenNoHighlightFieldFound) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"},
{"name": "brand", "type": "string"},
{"name": "sku", "type": "string"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["title"] = "42f05db9-373a-4372-9bd0-ff4b5aaba28d";
doc["brand"] = "brand";
doc["sku"] = "rgx761";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto res_op = coll1->search("brand", {"title", "brand", "sku"}, "", {}, {}, {2, 2, 0}, 10, 1,
FREQUENCY, {true},
10, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "locations.address",
20, {}, {}, {}, 0, "<mark>", "</mark>", {}, 1000, true, false, true,
"title");
ASSERT_TRUE(res_op.ok());
auto res = res_op.get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_TRUE(res["hits"][0]["highlight"]["snippet"].empty());
}
TEST_F(CollectionSpecificMoreTest, WildcardSearchWithNoSortingField) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
// search on empty collection
auto res_op = coll1->search("*", {}, "", {}, {}, {2}, 10, 1,
FREQUENCY, {true});
ASSERT_TRUE(res_op.ok());
auto res = res_op.get();
ASSERT_EQ(0, res["hits"].size());
ASSERT_EQ(0, res["found"].get<size_t>());
nlohmann::json doc;
doc["title"] = "Sample Title 1";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["title"] = "Sample Title 2";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
res_op = coll1->search("*", {}, "", {}, {}, {2}, 10, 1,
FREQUENCY, {true});
ASSERT_TRUE(res_op.ok());
res = res_op.get();
ASSERT_EQ(2, res["hits"].size());
ASSERT_EQ(2, res["found"].get<size_t>());
ASSERT_EQ("1", res["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", res["hits"][1]["document"]["id"].get<std::string>());
}
TEST_F(CollectionSpecificMoreTest, AutoSchemaWithObjectValueAsFirstDoc) {
// when a value is `object` initially and then is integer, updating that object should not cause errors
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": ".*", "type": "auto"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "Sample Title 1";
doc["num"] = nlohmann::json::object();
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "1";
doc["title"] = "Sample Title 2";
doc["num"] = 42;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
// now try updating first doc
doc["id"] = "0";
doc["title"] = "Sample Title 1";
doc["num"] = 100;
ASSERT_TRUE(coll1->add(doc.dump(), UPSERT).ok());
auto res = coll1->search("*", {}, "num:100", {}, {}, {2}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(1, res["hits"].size());
}
TEST_F(CollectionSpecificMoreTest, VerifyDeletionOfFacetStringIndex) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string", "facet": true},
{"name": "i32", "type": "int32", "facet": true},
{"name": "float", "type": "float", "facet": true},
{"name": "i64", "type": "int64", "facet": true},
{"name": "i32arr", "type": "int32[]", "facet": true},
{"name": "floatarr", "type": "float[]", "facet": true},
{"name": "i64arr", "type": "int64[]", "facet": true}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "Title";
doc["i32"] = 100;
doc["float"] = 2.40;
doc["i64"] = 10000;
doc["i32arr"] = {100};
doc["floatarr"] = {2.50};
doc["i64arr"] = {10000};
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto search_index = coll1->_get_index()->_get_search_index();
ASSERT_EQ(7, search_index.size());
for(const auto& kv: search_index) {
ASSERT_EQ(1, kv.second->size);
}
coll1->remove("0");
ASSERT_EQ(7, search_index.size());
for(const auto& kv: search_index) {
ASSERT_EQ(0, kv.second->size);
}
}
TEST_F(CollectionSpecificMoreTest, MustExcludeOutOf) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["title"] = "Sample Title 1";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
spp::sparse_hash_set<std::string> include_fields;
auto res_op = coll1->search("*", {}, "", {}, {}, {2}, 10, 1,
FREQUENCY, {true}, 0, include_fields, {"out_of"});
ASSERT_TRUE(res_op.ok());
auto res = res_op.get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ(0, res.count("out_of"));
}
TEST_F(CollectionSpecificMoreTest, ValidateQueryById) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["id"] = "doc-1";
doc["title"] = "Sample Title 1";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto res_op = coll1->search("doc-1", {"id"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 0);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Cannot use `id` as a query by field.", res_op.error());
}
TEST_F(CollectionSpecificMoreTest, ConsiderDroppedTokensDuringTextMatchScoring) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "name", "type": "string"},
{"name": "brand", "type": "string"}
]
})"_json;
Collection *coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["id"] = "0";
doc["brand"] = "Neutrogena";
doc["name"] = "Neutrogena Ultra Sheer Oil-Free Face Serum With Vitamin E + SPF 60";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "1";
doc["brand"] = "Neutrogena";
doc["name"] = "Neutrogena Ultra Sheer Liquid Sunscreen SPF 70";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto res = coll1->search("Neutrogena Ultra Sheer Moisturizing Face Serum", {"brand", "name"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {3, 2}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 0, 0, 0, 2, false, "", true, 0, max_weight).get();
ASSERT_EQ(2, res["hits"].size());
ASSERT_EQ("0", res["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", res["hits"][1]["document"]["id"].get<std::string>());
}
TEST_F(CollectionSpecificMoreTest, ConsiderDroppedTokensDuringTextMatchScoring2) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "name", "type": "string"}
]
})"_json;
Collection *coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["id"] = "0";
doc["name"] = "Elizabeth Arden 5th Avenue Eau de Parfum 125ml";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "1";
doc["name"] = "Avène Sun Very High Protection Mineral Cream SPF50+ 50ml";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto res = coll1->search("avène eau mineral", {"name"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {3}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 0, 0, 0, 2, false, "", true, 0, max_weight).get();
ASSERT_EQ(2, res["hits"].size());
ASSERT_EQ("1", res["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", res["hits"][1]["document"]["id"].get<std::string>());
}
TEST_F(CollectionSpecificMoreTest, DisableFieldCountForScoring) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "name", "type": "string"},
{"name": "brand", "type": "string"}
]
})"_json;
Collection *coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["id"] = "0";
doc["name"] = "Alpha beta gamma";
doc["brand"] = "Alpha beta gamma";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "1";
doc["name"] = "Alpha beta gamma";
doc["brand"] = "Theta";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto res_op = coll1->search("beta", {"name", "brand"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {3,3}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 0, 0, 0, 2, false, "", true, 0, max_score,
100, 0, 0, "exhaustive", 30000, 2, "", {}, {}, "right_to_left", true);
auto res = coll1->search("beta", {"name", "brand"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {3,3}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 0, 0, 0, 2, false, "", true, 0, max_score,
100, 0, 0, "exhaustive", 30000, 2, "", {}, {}, "right_to_left", false).get();
size_t score1 = std::stoul(res["hits"][0]["text_match_info"]["score"].get<std::string>());
size_t score2 = std::stoul(res["hits"][1]["text_match_info"]["score"].get<std::string>());
ASSERT_TRUE(score1 == score2);
res = coll1->search("beta", {"name", "brand"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {3,3}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 0, 0, 0, 2, false, "", true, 0, max_score,
100, 0, 0, "exhaustive", 30000, 2, "", {}, {}, "right_to_left", true).get();
ASSERT_EQ("0", res["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", res["hits"][1]["document"]["id"].get<std::string>());
score1 = std::stoul(res["hits"][0]["text_match_info"]["score"].get<std::string>());
score2 = std::stoul(res["hits"][1]["text_match_info"]["score"].get<std::string>());
ASSERT_TRUE(score1 > score2);
}
TEST_F(CollectionSpecificMoreTest, NonNestedFieldNameWithDot) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "category", "type": "string"},
{"name": "category.lvl0", "type": "string"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["id"] = "0";
doc["category"] = "Shoes";
doc["category.lvl0"] = "Shoes";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "1";
doc["category"] = "Mens";
doc["category.lvl0"] = "Shoes";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto res = coll1->search("shoes", {"category"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 0,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "category", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {1}).get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ("0", res["hits"][0]["document"]["id"].get<std::string>());
}
TEST_F(CollectionSpecificMoreTest, IncludeExcludeUnIndexedField) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "Sample Title 1";
doc["src"] = "Internet";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto res = coll1->search("sample", {"title"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 0,
{"src"}).get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ(1, res["hits"][0]["document"].size());
ASSERT_EQ("Internet", res["hits"][0]["document"]["src"].get<std::string>());
// check for exclude field of indexed field
spp::sparse_hash_set<std::string> include_fields;
res = coll1->search("sample", {"title"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 0,
include_fields, {"src"}).get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ(2, res["hits"][0]["document"].size());
ASSERT_EQ("Sample Title 1", res["hits"][0]["document"]["title"].get<std::string>());
ASSERT_EQ("0", res["hits"][0]["document"]["id"].get<std::string>());
}
TEST_F(CollectionSpecificMoreTest, WildcardIncludeExclude) {
nlohmann::json schema = R"({
"name": "posts",
"enable_nested_fields": true,
"fields": [
{"name": "username", "type": "string", "facet": true},
{"name": "user.rank", "type": "int32", "facet": true},
{"name": "user.bio", "type": "string"},
{"name": "likes", "type": "int32"},
{"name": "content", "type": "object"}
],
"default_sorting_field": "likes"
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll = op.get();
std::vector<std::string> json_lines = {
R"({"id": "124","username": "user_a","user": {"rank": 100,"bio": "Hi! I'm user_a"},"likes": 5215,"content": {"title": "title 1","body": "body 1"}})",
R"({"id": "125","username": "user_b","user": {"rank": 50,"bio": "user_b here, nice to meet you!"},"likes": 5215,"content": {"title": "title 2","body": "body 2"}})"
};
for (auto const& json: json_lines){
auto add_op = coll->add(json);
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
// include test: user* matches username, user.bio and user.rank
auto result = coll->search("user_a", {"username"}, "", {}, {}, {0},
10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD, {"user*"}).get();
ASSERT_EQ(1, result["found"].get<size_t>());
ASSERT_EQ(1, result["hits"].size());
ASSERT_EQ(0, result["hits"][0]["document"].count("id"));
ASSERT_EQ(0, result["hits"][0]["document"].count("likes"));
ASSERT_EQ(0, result["hits"][0]["document"].count("content"));
ASSERT_EQ(1, result["hits"][0]["document"].count("user"));
ASSERT_EQ(1, result["hits"][0]["document"]["user"].count("bio"));
ASSERT_EQ(1, result["hits"][0]["document"]["user"].count("rank"));
ASSERT_EQ(1, result["hits"][0]["document"].count("username"));
spp::sparse_hash_set<std::string> include_fields;
// exclude test: user.* matches user.rank and user.bio
result = coll->search("user_a", {"username"}, "", {}, {}, {0},
10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD, include_fields, {"user.*"}).get();
ASSERT_EQ(1, result["found"].get<size_t>());
ASSERT_EQ(1, result["hits"].size());
ASSERT_EQ(1, result["hits"][0]["document"].count("id"));
ASSERT_EQ(1, result["hits"][0]["document"].count("likes"));
ASSERT_EQ(1, result["hits"][0]["document"].count("content"));
ASSERT_EQ(1, result["hits"][0]["document"]["content"].count("title"));
ASSERT_EQ(1, result["hits"][0]["document"]["content"].count("body"));
ASSERT_EQ(0, result["hits"][0]["document"].count("user"));
ASSERT_EQ(1, result["hits"][0]["document"].count("username"));
// No matching field for include_fields/exclude_fields
result = coll->search("user_a", {"username"}, "", {}, {}, {0},
10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD, {"foo.*"}).get();
ASSERT_EQ(1, result["found"].get<size_t>());
ASSERT_EQ(1, result["hits"].size());
ASSERT_EQ(0, result["hits"][0]["document"].size());
result = coll->search("user_a", {"username"}, "", {}, {}, {0},
10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD, include_fields, {"foo.*"}).get();
ASSERT_EQ(1, result["found"].get<size_t>());
ASSERT_EQ(1, result["hits"].size());
ASSERT_EQ(1, result["hits"][0]["document"].count("id"));
ASSERT_EQ(1, result["hits"][0]["document"].count("likes"));
ASSERT_EQ(1, result["hits"][0]["document"].count("content"));
ASSERT_EQ(1, result["hits"][0]["document"]["content"].count("title"));
ASSERT_EQ(1, result["hits"][0]["document"]["content"].count("body"));
ASSERT_EQ(1, result["hits"][0]["document"].count("user"));
ASSERT_EQ(1, result["hits"][0]["document"]["user"].count("bio"));
ASSERT_EQ(1, result["hits"][0]["document"]["user"].count("rank"));
ASSERT_EQ(1, result["hits"][0]["document"].count("username"));
}
TEST_F(CollectionSpecificMoreTest, EmplaceWithNullValue) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "is_valid", "type": "bool", "optional": true}
]
})"_json;
Collection *coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["id"] = "0";
doc["is_valid"] = nullptr;
ASSERT_TRUE(coll1->add(doc.dump(), EMPLACE).ok());
}
TEST_F(CollectionSpecificMoreTest, PhraseMatchRepeatingTokens) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "Super easy super fast product";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "1";
doc["title"] = "The really easy really fast product really";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto res = coll1->search(R"("super easy super fast")", {"title"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ("0", res["hits"][0]["document"]["id"].get<std::string>());
res = coll1->search(R"("super easy super")", {"title"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ("0", res["hits"][0]["document"]["id"].get<std::string>());
res = coll1->search(R"("the really easy really fast product really")", {"title"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ("1", res["hits"][0]["document"]["id"].get<std::string>());
// these should not match
res = coll1->search(R"("the easy really really product fast really")", {"title"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(0, res["hits"].size());
res = coll1->search(R"("really the easy really fast product really")", {"title"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(0, res["hits"].size());
res = coll1->search(R"("super super easy fast")", {"title"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(0, res["hits"].size());
res = coll1->search(R"("super super easy")", {"title"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(0, res["hits"].size());
res = coll1->search(R"("product fast")", {"title"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(0, res["hits"].size());
}
TEST_F(CollectionSpecificMoreTest, PhraseMatchMultipleFields) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"},
{"name": "author", "type": "string"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "A Walk to the Tide Pools";
doc["author"] = "Nok Nok";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "1";
doc["title"] = "Random Title";
doc["author"] = "Tide Pools";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto res = coll1->search(R"("tide pools")", {"title", "author"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(2, res["hits"].size());
ASSERT_EQ("0", res["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", res["hits"][1]["document"]["id"].get<std::string>());
}
TEST_F(CollectionSpecificMoreTest, PhraseMatchAcrossArrayElements) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "texts", "type": "string[]"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["texts"] = {"state of the", "of the art"};
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto res = coll1->search(R"("state of the art)", {"texts"}, "", {}, {}, {0}, 10, 1,
FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>()).get();
ASSERT_EQ(1, res["hits"].size());
res = coll1->search(R"("state of the art")", {"texts"}, "", {}, {}, {0}, 10, 1,
FREQUENCY, {true}, 10, spp::sparse_hash_set<std::string>()).get();
ASSERT_EQ(0, res["hits"].size());
}
TEST_F(CollectionSpecificMoreTest, WeightTakingPrecendeceOverMatch) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "brand", "type": "string"},
{"name": "title", "type": "string"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "Healthy Mayo";
doc["brand"] = "Light Plus";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "1";
doc["title"] = "Healthy Light Mayo";
doc["brand"] = "Vegabond";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto res = coll1->search("light mayo", {"brand", "title"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 0, 0, 0, 2, false, "", true, 0, max_weight).get();
ASSERT_EQ(2, res["hits"].size());
ASSERT_EQ("0", res["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", res["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("1108091338752", res["hits"][0]["text_match_info"]["best_field_score"].get<std::string>());
ASSERT_EQ(15, res["hits"][0]["text_match_info"]["best_field_weight"].get<size_t>());
ASSERT_EQ(2, res["hits"][0]["text_match_info"]["fields_matched"].get<size_t>());
ASSERT_EQ(2, res["hits"][0]["text_match_info"]["tokens_matched"].get<size_t>());
ASSERT_EQ("2211897868288", res["hits"][1]["text_match_info"]["best_field_score"].get<std::string>());
ASSERT_EQ(14, res["hits"][1]["text_match_info"]["best_field_weight"].get<size_t>());
ASSERT_EQ(1, res["hits"][1]["text_match_info"]["fields_matched"].get<size_t>());
ASSERT_EQ(2, res["hits"][1]["text_match_info"]["tokens_matched"].get<size_t>());
}
TEST_F(CollectionSpecificMoreTest, IncrementingCount) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"},
{"name": "count", "type": "int32"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
// brand new document: create + upsert + emplace should work
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "Foo";
doc["$operations"]["increment"]["count"] = 1;
ASSERT_TRUE(coll1->add(doc.dump(), CREATE).ok());
doc.clear();
doc["id"] = "1";
doc["title"] = "Bar";
doc["$operations"]["increment"]["count"] = 1;
ASSERT_TRUE(coll1->add(doc.dump(), EMPLACE).ok());
doc.clear();
doc["id"] = "2";
doc["title"] = "Taz";
doc["$operations"]["increment"]["count"] = 1;
ASSERT_TRUE(coll1->add(doc.dump(), UPSERT).ok());
auto res = coll1->search("*", {}, "", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10).get();
ASSERT_EQ(3, res["hits"].size());
ASSERT_EQ(1, res["hits"][0]["document"]["count"].get<size_t>());
ASSERT_EQ(1, res["hits"][1]["document"]["count"].get<size_t>());
ASSERT_EQ(1, res["hits"][2]["document"]["count"].get<size_t>());
// should support updates
doc.clear();
doc["id"] = "0";
doc["title"] = "Foo";
doc["$operations"]["increment"]["count"] = 3;
ASSERT_TRUE(coll1->add(doc.dump(), UPSERT).ok());
doc.clear();
doc["id"] = "1";
doc["title"] = "Bar";
doc["$operations"]["increment"]["count"] = 3;
ASSERT_TRUE(coll1->add(doc.dump(), EMPLACE).ok());
doc.clear();
doc["id"] = "2";
doc["title"] = "Bar";
doc["$operations"]["increment"]["count"] = 3;
ASSERT_TRUE(coll1->add(doc.dump(), UPDATE).ok());
res = coll1->search("*", {}, "", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10).get();
ASSERT_EQ(3, res["hits"].size());
ASSERT_EQ(4, res["hits"][0]["document"]["count"].get<size_t>());
ASSERT_EQ(4, res["hits"][1]["document"]["count"].get<size_t>());
ASSERT_EQ(4, res["hits"][2]["document"]["count"].get<size_t>());
}
TEST_F(CollectionSpecificMoreTest, HighlightOnFieldNameWithDot) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "org.title", "type": "string"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["id"] = "0";
doc["org.title"] = "Infinity Inc.";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto res = coll1->search("infinity", {"org.title"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ(1, res["hits"][0]["highlights"].size());
ASSERT_EQ("<mark>Infinity</mark> Inc.", res["hits"][0]["highlights"][0]["snippet"].get<std::string>());
nlohmann::json highlight = R"({"org.title":{"matched_tokens":["Infinity"],"snippet":"<mark>Infinity</mark> Inc."}})"_json;
ASSERT_EQ(highlight.dump(), res["hits"][0]["highlight"].dump());
// even if nested fields enabled, plain field names with dots should work fine
schema = R"({
"name": "coll2",
"enable_nested_fields": true,
"fields": [
{"name": "org.title", "type": "string"}
]
})"_json;
Collection* coll2 = collectionManager.create_collection(schema).get();
ASSERT_TRUE(coll2->add(doc.dump()).ok());
res = coll2->search("infinity", {"org.title"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ(0, res["hits"][0]["highlights"].size());
highlight = R"({"org.title":{"matched_tokens":["Infinity"],"snippet":"<mark>Infinity</mark> Inc."}})"_json;
ASSERT_EQ(highlight.dump(), res["hits"][0]["highlight"].dump());
}
TEST_F(CollectionSpecificMoreTest, SearchCutoffTest) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"},
{"name": "desc", "type": "string"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
for(size_t i = 0; i < 70 * 1000; i++) {
nlohmann::json doc;
doc["title"] = "foobarbaz1";
doc["desc"] = "2";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto coll_op = coll1->search("foobarbar1 2", {"title", "desc"}, "", {}, {}, {2}, 3, 1, FREQUENCY, {false}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 1);
ASSERT_TRUE(coll_op.ok());
ASSERT_TRUE(coll_op.get()["search_cutoff"]);
}
TEST_F(CollectionSpecificMoreTest, ExhaustiveSearchWithoutExplicitDropTokens) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["title"] = "alpha beta gamma";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["title"] = "alpha";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
bool exhaustive_search = true;
size_t drop_tokens_threshold = 1;
auto res = coll1->search("alpha beta", {"title"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {false}, drop_tokens_threshold,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", exhaustive_search).get();
ASSERT_EQ(2, res["hits"].size());
}
TEST_F(CollectionSpecificMoreTest, DropTokensLeftToRightFirst) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["title"] = "alpha beta";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["title"] = "beta gamma";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
bool exhaustive_search = false;
size_t drop_tokens_threshold = 1;
auto res = coll1->search("alpha beta gamma", {"title"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {false}, drop_tokens_threshold,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 10000,
4, 7, fallback, 4, {off}, 100, 100, 2, 2, false, "", true, 0, max_score, 100, 0,
0, "exhaustive", 30000, 2, "", {}, {}, "left_to_right").get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ("1", res["hits"][0]["document"]["id"].get<std::string>());
res = coll1->search("alpha beta gamma", {"title"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {false}, drop_tokens_threshold,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 10000,
4, 7, fallback, 4, {off}, 100, 100, 2, 2, false, "", true, 0, max_score, 100, 0,
0, "exhaustive", 30000, 2, "", {}, {}, "right_to_left").get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ("0", res["hits"][0]["document"]["id"].get<std::string>());
// search on both sides
res = coll1->search("alpha gamma", {"title"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {false}, drop_tokens_threshold,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 10000,
4, 7, fallback, 4, {off}, 100, 100, 2, 2, false, "", true, 0, max_score, 100, 0,
0, "exhaustive", 30000, 2, "", {}, {}, "both_sides:3").get();
ASSERT_EQ(2, res["hits"].size());
// but must follow token limit
res = coll1->search("alpha gamma", {"title"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {false}, drop_tokens_threshold,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 10000,
4, 7, fallback, 4, {off}, 100, 100, 2, 2, false, "", true, 0, max_score, 100, 0,
0, "exhaustive", 30000, 2, "", {}, {}, "both_sides:1").get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ("0", res["hits"][0]["document"]["id"].get<std::string>());
// validation checks
auto res_op = coll1->search("alpha gamma", {"title"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {false}, drop_tokens_threshold,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 10000,
4, 7, fallback, 4, {off}, 100, 100, 2, 2, false, "", true, 0, max_score, 100, 0,
0, "exhaustive", 30000, 2, "", {}, {}, "all_sides");
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Invalid format for drop tokens mode.", res_op.error());
res_op = coll1->search("alpha gamma", {"title"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {false}, drop_tokens_threshold,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 10000,
4, 7, fallback, 4, {off}, 100, 100, 2, 2, false, "", true, 0, max_score, 100, 0,
0, "exhaustive", 30000, 2, "", {}, {}, "both_sides:x");
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Invalid format for drop tokens mode.", res_op.error());
}
TEST_F(CollectionSpecificMoreTest, DoNotHighlightFieldsForSpecialCharacterQuery) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"},
{"name": "description", "type": "string"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["title"] = "alpha beta gamma";
doc["description"] = "alpha beta gamma";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto res = coll1->search("'", {"title", "description"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {false}, 1,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>()).get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ(0, res["hits"][0]["highlight"].size());
ASSERT_EQ(0, res["hits"][0]["highlights"].size());
}
TEST_F(CollectionSpecificMoreTest, SearchForURL) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "url", "type": "string"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["url"] = "https://www.cpf.gov.sg/member/infohub/cpf-clarifies/policy-faqs/"
"why-interest-earned-on-cpf-life-premium-not-paid-to-beneficiaries";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto res = coll1->search("https://www.cpf.gov.sg/member/infohub/cpf-clarifies/policy-faqs/"
"why-interest-earned-on-cpf-life-premium-not-paid-to-beneficiaries", {"url"}, "",
{}, {}, {2}, 3, 1,
FREQUENCY, {true}).get();
ASSERT_EQ(1, res["hits"].size());
}
TEST_F(CollectionSpecificMoreTest, CrossFieldTypoAndPrefixWithWeights) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"},
{"name": "color", "type": "string"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "Cool trousers";
doc["color"] = "blue";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto res = coll1->search("trouzers", {"title", "color"}, "", {}, {}, {2, 0}, 10, 1, FREQUENCY, {true}, 0,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {2, 3}).get();
ASSERT_EQ(1, res["hits"].size());
res = coll1->search("trou", {"title", "color"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true, false}, 0,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {2, 3}).get();
ASSERT_EQ(1, res["hits"].size());
}
TEST_F(CollectionSpecificMoreTest, AnalyticsFullFirstQuery) {
Config::get_instance().set_enable_search_analytics(true);
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"},
{"name": "color", "type": "string"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "Cool cotton trousers";
doc["color"] = "blue";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto res = coll1->search("co", {"title", "color"}, "", {}, {}, {2, 0}, 10, 1, FREQUENCY, {true}, 0,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {2, 3}).get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ("cool", res["request_params"]["first_q"].get<std::string>());
res = coll1->search("cool pants", {"title", "color"}, "", {}, {}, {2, 0}, 10, 1, FREQUENCY, {true}, 1,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 40, {}, {}, {}, 0,
"<mark>", "</mark>", {2, 3}).get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ("cool pants", res["request_params"]["first_q"].get<std::string>());
Config::get_instance().set_enable_search_analytics(false);
}
TEST_F(CollectionSpecificMoreTest, TruncateAterTopK) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"},
{"name": "points", "type": "int32"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
for(auto i = -10; i < 5; i++) {
nlohmann::json doc;
doc["title"] = std::to_string(i);
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
for(auto i = 0; i < 5; i++) {
nlohmann::json doc;
doc["title"] = std::to_string(10 + i);
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
/* Values Doc ids
-10 0
-9 1
-8 2
-7 3
-6 4
-5 5
-4 6
-3 7
-2 8
-1 9
0 10, 15
1 11, 16
2 12, 17
3 13, 18
4 14, 19
*/
auto results = coll1->search("*", {"*"}, "", {}, {}, {0}).get();
ASSERT_EQ(20, results["found"]);
coll1->truncate_after_top_k("points", 15);
results = coll1->search("*", {"*"}, "", {}, {}, {0}).get();
ASSERT_EQ(15, results["found"]);
std::vector<std::string> ids = {"19", "18", "17", "16", "15", "14", "13", "12", "11", "10", "9", "8", "7", "6", "5"};
for(size_t i = 0; i < results["hits"].size(); i++) {
ASSERT_EQ(ids[i], results["hits"][i]["document"]["id"]);
}
coll1->truncate_after_top_k("points", 11);
results = coll1->search("*", {"*"}, "", {}, {}, {0}).get();
ASSERT_EQ(11, results["found"]);
ids = {"19", "18", "17", "16", "15", "14", "13", "12", "11", "10", "9"};
for(size_t i = 0; i < results["hits"].size(); i++) {
ASSERT_EQ(ids[i], results["hits"][i]["document"]["id"]);
}
coll1->truncate_after_top_k("points", 5);
results = coll1->search("*", {"*"}, "", {}, {}, {0}).get();
ASSERT_EQ(5, results["found"]);
ids = {"19", "18", "14", "13", "12"};
for(size_t i = 0; i < results["hits"].size(); i++) {
ASSERT_EQ(ids[i], results["hits"][i]["document"]["id"]);
}
}
TEST_F(CollectionSpecificMoreTest, HybridSearchTextMatchInfo) {
auto schema_json =
R"({
"name": "Products",
"fields": [
{"name": "product_id", "type": "string"},
{"name": "product_name", "type": "string", "infix": true},
{"name": "product_description", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["product_description"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
std::vector<nlohmann::json> documents = {
R"({
"product_id": "product_a",
"product_name": "shampoo",
"product_description": "Our new moisturizing shampoo is perfect for those with dry or damaged hair."
})"_json,
R"({
"product_id": "product_b",
"product_name": "soap",
"product_description": "Introducing our all-natural, organic soap bar made with essential oils and botanical ingredients."
})"_json
};
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
ASSERT_TRUE(add_op.ok());
}
auto coll1 = collection_create_op.get();
auto results = coll1->search("natural products", {"product_name", "embedding"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
0, spp::sparse_hash_set<std::string>()).get();
ASSERT_EQ(2, results["hits"].size());
// It's a hybrid search with only vector match
ASSERT_EQ("0", results["hits"][0]["text_match_info"]["score"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["text_match_info"]["score"].get<std::string>());
ASSERT_EQ(0, results["hits"][0]["text_match_info"]["fields_matched"].get<size_t>());
ASSERT_EQ(0, results["hits"][1]["text_match_info"]["fields_matched"].get<size_t>());
ASSERT_EQ(0, results["hits"][0]["text_match_info"]["tokens_matched"].get<size_t>());
ASSERT_EQ(0, results["hits"][1]["text_match_info"]["tokens_matched"].get<size_t>());
}
TEST_F(CollectionSpecificMoreTest, DisableTyposForNumericalTokens) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"}
],
"token_separators": ["-"]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["title"] = "XYZ-12345678";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["title"] = "XYZ-22345678";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
// with disabling typos for numerical tokens
auto res_op = coll1->search("XYZ-12345678", {"title"}, "", {},
{}, {2}, 10, 1,FREQUENCY, {true},
Index::DROP_TOKENS_THRESHOLD, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "",
30, 4, "", 40,
{}, {}, {}, 0,"<mark>",
"</mark>", {}, 1000,true,
false, true, "", false,
6000*1000, 4, 7, fallback, 4,
{off}, INT16_MAX, INT16_MAX,2,
2, false, "", true,
0, max_score, 100, 0, 0,
"exhaustive", 30000, 2, "",
{},{}, "right_to_left", true,
true, false, "", "", "",
"", false);
ASSERT_TRUE(res_op.ok());
ASSERT_EQ(1, res_op.get()["hits"].size());
res_op = coll1->search("XYZ-12345678", {"title"}, "", {},
{}, {2}, 10, 1,FREQUENCY, {true},
Index::DROP_TOKENS_THRESHOLD, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "",
30, 4, "", 400);
ASSERT_TRUE(res_op.ok());
ASSERT_EQ(2, res_op.get()["hits"].size());
}
TEST_F(CollectionSpecificMoreTest, DisableHighlightForLongFields) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "description", "type": "string"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
std::string description;
for(size_t i = 0; i < 70*1000; i++) {
description += StringUtils::randstring(4) + " ";
}
description += "foobar";
nlohmann::json doc;
doc["description"] = description;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto res_op = coll1->search("foobar", {"description"}, "", {},
{}, {2}, 10, 1,FREQUENCY, {true},
Index::DROP_TOKENS_THRESHOLD, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "");
ASSERT_TRUE(res_op.ok());
ASSERT_EQ(1, res_op.get()["hits"].size());
ASSERT_EQ(0, res_op.get()["hits"][0]["highlight"].size());
// if token is found within first 64K offsets, we will highlight
description = "";
for(size_t i = 0; i < 1000; i++) {
description += StringUtils::randstring(4) + " ";
}
description += " bazinga ";
for(size_t i = 0; i < 70*1000; i++) {
description += StringUtils::randstring(4) + " ";
}
doc["description"] = description;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
res_op = coll1->search("bazinga", {"description"}, "", {},
{}, {2}, 10, 1,FREQUENCY, {true},
Index::DROP_TOKENS_THRESHOLD, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "");
ASSERT_TRUE(res_op.ok());
ASSERT_EQ(1, res_op.get()["hits"].size());
ASSERT_EQ(1, res_op.get()["hits"][0]["highlight"].size());
}
TEST_F(CollectionSpecificMoreTest, StemmingEnglish) {
nlohmann::json schema = R"({
"name": "test",
"fields": [
{"name": "name", "type": "string", "stem": true}
]
})"_json;
auto coll_stem_res = collectionManager.create_collection(schema);
ASSERT_TRUE(coll_stem_res.ok());
auto coll_stem = coll_stem_res.get();
schema = R"({
"name": "test2",
"fields": [
{"name": "name", "type": "string", "stem": false}
]
})"_json;
auto coll_no_stem_res = collectionManager.create_collection(schema);
ASSERT_TRUE(coll_no_stem_res.ok());
auto coll_no_stem = coll_no_stem_res.get();
nlohmann::json doc;
doc["name"] = "running";
ASSERT_TRUE(coll_stem->add(doc.dump()).ok());
ASSERT_TRUE(coll_no_stem->add(doc.dump()).ok());
auto stem_res = coll_stem->search("run", {"name"}, {}, {}, {}, {0}, 10, 1, FREQUENCY, {false}, 1);
ASSERT_TRUE(stem_res.ok());
ASSERT_EQ(1, stem_res.get()["hits"].size());
ASSERT_EQ("running", stem_res.get()["hits"][0]["highlight"]["name"]["matched_tokens"][0].get<std::string>());
ASSERT_EQ("<mark>running</mark>", stem_res.get()["hits"][0]["highlight"]["name"]["snippet"].get<std::string>());
auto no_stem_res = coll_no_stem->search("run", {"name"}, {}, {}, {}, {0}, 10, 1, FREQUENCY, {false}, 1);
ASSERT_TRUE(no_stem_res.ok());
ASSERT_EQ(0, no_stem_res.get()["hits"].size());
}
TEST_F(CollectionSpecificMoreTest, StemmingEnglishWithCaps) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{
"facet": false,
"index": true,
"infix": true,
"locale": "",
"name": "name",
"optional": false,
"sort": false,
"stem": false,
"store": true,
"type": "string"
},
{
"facet": true,
"index": true,
"infix": true,
"locale": "",
"name": "subClass",
"optional": true,
"sort": false,
"stem": true,
"store": true,
"type": "string"
}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["id"] = "0";
doc["name"] = "Onion Coo Usa";
doc["subClass"] = "ONIONS";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "1";
doc["name"] = "Mccormick Onion Dip Mix";
doc["subClass"] = "GRAVY/SAUCE PACKETS";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
std::vector<sort_by> sort_fields = {};
auto res = coll1->search("onions", {"subClass","name"}, "", {}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_STREQ("0", res["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("1", res["hits"][1]["document"]["id"].get<std::string>().c_str());
}
TEST_F(CollectionSpecificMoreTest, StemmingEnglishPrefixHighlight) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{
"facet": false,
"index": true,
"infix": true,
"locale": "",
"name": "name",
"optional": false,
"sort": false,
"stem": false,
"store": true,
"type": "string"
},
{
"facet": true,
"index": true,
"infix": true,
"locale": "",
"name": "subClass",
"optional": true,
"sort": false,
"stem": true,
"store": true,
"type": "string"
}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["id"] = "0";
doc["name"] = "Generic Red Onions";
doc["subClass"] = "ONIONS";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
std::vector<sort_by> sort_fields = {};
auto res = coll1->search("onions", {"subClass","name"}, "", {}, sort_fields, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ("Generic Red <mark>Onions</mark>", res["hits"][0]["highlight"]["name"]["snippet"].get<std::string>());
ASSERT_EQ("<mark>ONIONS</mark>", res["hits"][0]["highlight"]["subClass"]["snippet"].get<std::string>());
}
TEST_F(CollectionSpecificMoreTest, StemmingEnglishHighlights) {
nlohmann::json schema = R"({
"name": "test",
"fields": [
{"name": "name", "type": "string", "stem": true}
]
})"_json;
auto coll_stem_res = collectionManager.create_collection(schema);
ASSERT_TRUE(coll_stem_res.ok());
auto coll_stem = coll_stem_res.get();
nlohmann::json doc;
doc["name"] = "Running runs";
ASSERT_TRUE(coll_stem->add(doc.dump()).ok());
auto res = coll_stem->search("run", {"name"}, {}, {}, {}, {0}, 10, 1, FREQUENCY, {false}, 1).get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ(2, res["hits"][0]["highlight"]["name"]["matched_tokens"].size());
ASSERT_EQ("Running", res["hits"][0]["highlight"]["name"]["matched_tokens"][0].get<std::string>());
ASSERT_EQ("runs", res["hits"][0]["highlight"]["name"]["matched_tokens"][1].get<std::string>());
ASSERT_EQ("<mark>Running</mark> <mark>runs</mark>", res["hits"][0]["highlight"]["name"]["snippet"].get<std::string>());
}
TEST_F(CollectionSpecificMoreTest, StemmingCyrilic) {
nlohmann::json schema = R"({
"name": "words",
"fields": [
{"name": "word", "type": "string", "stem": true, "locale": "ru"}
]
})"_json;
auto coll_stem_res = collectionManager.create_collection(schema);
ASSERT_TRUE(coll_stem_res.ok());
auto coll_stem = coll_stem_res.get();
ASSERT_TRUE(coll_stem->add(R"({"word": "доверенное"})"_json.dump()).ok());
ASSERT_TRUE(coll_stem->add(R"({"word": "доверенные"})"_json.dump()).ok());
auto res = coll_stem->search("доверенное", {"word"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(2, res["hits"].size());
}
TEST_F(CollectionSpecificMoreTest, NumDroppedTokensTest) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
Collection *coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["title"] = "alpha beta";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["title"] = "beta gamma";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["title"] = "gamma delta";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["title"] = "delta epsilon";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["title"] = "epsilon alpha";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
bool exhaustive_search = false;
size_t drop_tokens_threshold = 5;
auto res = coll1->search("alpha zeta gamma", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true},
drop_tokens_threshold).get();
ASSERT_EQ(4, res["hits"].size());
ASSERT_EQ("4", res["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("epsilon alpha", res["hits"][0]["document"]["title"]);
ASSERT_EQ(2, res["hits"][0]["text_match_info"]["num_tokens_dropped"]);
ASSERT_EQ("2", res["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("gamma delta", res["hits"][1]["document"]["title"]);
ASSERT_EQ(2, res["hits"][1]["text_match_info"]["num_tokens_dropped"]);
ASSERT_EQ("1", res["hits"][2]["document"]["id"].get<std::string>());
ASSERT_EQ("beta gamma", res["hits"][2]["document"]["title"]);
ASSERT_EQ(2, res["hits"][2]["text_match_info"]["num_tokens_dropped"]);
ASSERT_EQ("0", res["hits"][3]["document"]["id"].get<std::string>());
ASSERT_EQ("alpha beta", res["hits"][3]["document"]["title"]);
ASSERT_EQ(2, res["hits"][3]["text_match_info"]["num_tokens_dropped"]);
res = coll1->search("zeta theta epsilon", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true},
drop_tokens_threshold).get();
ASSERT_EQ(2, res["hits"].size());
ASSERT_EQ("4", res["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("epsilon alpha", res["hits"][0]["document"]["title"]);
ASSERT_EQ(2, res["hits"][0]["text_match_info"]["num_tokens_dropped"]);
ASSERT_EQ("3", res["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("delta epsilon", res["hits"][1]["document"]["title"]);
ASSERT_EQ(2, res["hits"][1]["text_match_info"]["num_tokens_dropped"]);
drop_tokens_threshold = 1;
res = coll1->search("alpha beta gamma", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true},
drop_tokens_threshold).get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ("0", res["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("alpha beta", res["hits"][0]["document"]["title"]);
ASSERT_EQ(1, res["hits"][0]["text_match_info"]["num_tokens_dropped"]);
}
TEST_F(CollectionSpecificMoreTest, TestStemming2) {
nlohmann::json schema = R"({
"name": "words",
"fields": [
{"name": "word", "type": "string", "stem": true }
]
})"_json;
auto coll_stem_res = collectionManager.create_collection(schema);
ASSERT_TRUE(coll_stem_res.ok());
auto coll_stem = coll_stem_res.get();
ASSERT_TRUE(coll_stem->add(R"({"word": "Walk"})"_json.dump()).ok());
ASSERT_TRUE(coll_stem->add(R"({"word": "Walks"})"_json.dump()).ok());
ASSERT_TRUE(coll_stem->add(R"({"word": "Walked"})"_json.dump()).ok());
ASSERT_TRUE(coll_stem->add(R"({"word": "Walking"})"_json.dump()).ok());
ASSERT_TRUE(coll_stem->add(R"({"word": "Walkings"})"_json.dump()).ok());
ASSERT_TRUE(coll_stem->add(R"({"word": "Walker"})"_json.dump()).ok());
ASSERT_TRUE(coll_stem->add(R"({"word": "Walkers"})"_json.dump()).ok());
auto res = coll_stem->search("Walking", {"word"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(7, res["hits"].size());
}
TEST_F(CollectionSpecificMoreTest, TestStemmingWithSynonym) {
nlohmann::json schema = R"({
"name": "words",
"fields": [
{"name": "word", "type": "string", "stem": true }
]
})"_json;
auto coll_stem_res = collectionManager.create_collection(schema);
ASSERT_TRUE(coll_stem_res.ok());
auto coll_stem = coll_stem_res.get();
nlohmann::json synonym_json = R"(
{
"id": "",
"synonyms": ["making", "foobar"]
}
)"_json;
LOG(INFO) << "Adding synonym...";
auto synonym_op = coll_stem->add_synonym(synonym_json);
LOG(INFO) << "Synonym added...";
ASSERT_TRUE(synonym_op.ok());
ASSERT_TRUE(coll_stem->add(R"({"word": "foobar"})"_json.dump()).ok());
auto res = coll_stem->search("making", {"word"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ("foobar", res["hits"][0]["document"]["word"].get<std::string>());
}
TEST_F(CollectionSpecificMoreTest, TestFieldStore) {
nlohmann::json schema = R"({
"name": "words",
"fields": [
{"name": "word_to_store", "type": "string", "store": true },
{"name": "word_not_to_store", "type": "string", "store": false }
]
})"_json;
auto coll_store_res = collectionManager.create_collection(schema);
ASSERT_TRUE(coll_store_res.ok());
auto coll_store = coll_store_res.get();
nlohmann::json doc;
doc["word_to_store"] = "store";
doc["word_not_to_store"] = "not store";
ASSERT_TRUE(coll_store->add(doc.dump()).ok());
auto res = coll_store->search("*", {}, {}, {}, {}, {0}, 10, 1, FREQUENCY, {false}, 1);
ASSERT_TRUE(res.ok());
ASSERT_EQ(1, res.get()["hits"].size());
ASSERT_EQ("store", res.get()["hits"][0]["document"]["word_to_store"].get<std::string>());
ASSERT_TRUE(res.get()["hits"][0]["document"].count("word_not_to_store") == 0);
}
TEST_F(CollectionSpecificMoreTest, EnableTyposForAlphaNumericalTokens) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"}
],
"symbols_to_index":["/"]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["title"] = "c-136/14";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["title"] = "13/14";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["title"] = "(136)214";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["title"] = "c136/14";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["title"] = "A-136/14";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
bool enable_typos_for_alpha_numerical_tokens = false;
auto res = coll1->search("c-136/14", {"title"}, "", {},
{}, {2}, 10, 1,FREQUENCY, {true},
Index::DROP_TOKENS_THRESHOLD, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "",
30, 4, "", 40,
{}, {}, {}, 0,"<mark>",
"</mark>", {}, 1000,true,
false, true, "", false,
6000*1000, 4, 7, fallback, 4,
{off}, INT16_MAX, INT16_MAX,2,
2, false, "", true,
0, max_score, 100, 0, 0,
"exhaustive", 30000, 2, "",
{},{}, "right_to_left", true,
true, false, "", "", "",
"", true, true, false, 0, true,
enable_typos_for_alpha_numerical_tokens).get();
ASSERT_EQ(2, res["hits"].size());
ASSERT_EQ("c136/14", res["hits"][0]["document"]["title"].get<std::string>());
ASSERT_EQ("c-136/14", res["hits"][1]["document"]["title"].get<std::string>());
enable_typos_for_alpha_numerical_tokens = true;
res = coll1->search("c-136/14", {"title"}, "", {},
{}, {2}, 10, 1,FREQUENCY, {true},
Index::DROP_TOKENS_THRESHOLD, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "",
30, 4, "", 40,
{}, {}, {}, 0,"<mark>",
"</mark>", {}, 1000,true,
false, true, "", false,
6000*1000, 4, 7, fallback, 4,
{off}, INT16_MAX, INT16_MAX,2,
2, false, "", true,
0, max_score, 100, 0, 0,
"exhaustive", 30000, 2, "",
{},{}, "right_to_left", true,
true, false, "", "", "",
"", true, true, false, 0, true,
enable_typos_for_alpha_numerical_tokens).get();
ASSERT_EQ(5, res["hits"].size());
ASSERT_EQ("c136/14", res["hits"][0]["document"]["title"].get<std::string>());
ASSERT_EQ("c-136/14", res["hits"][1]["document"]["title"].get<std::string>());
ASSERT_EQ("A-136/14", res["hits"][2]["document"]["title"].get<std::string>());
ASSERT_EQ("(136)214", res["hits"][3]["document"]["title"].get<std::string>());
ASSERT_EQ("13/14", res["hits"][4]["document"]["title"].get<std::string>());
}
TEST_F(CollectionSpecificMoreTest, PopulateIncludeExcludeFields) {
auto schema_json =
R"({
"name": "Products",
"fields": [
{"name": "product_id", "type": "string"},
{"name": "product_name", "type": "string", "infix": true},
{"name": "product_description", "type": "string"},
{"name": "product_embedding", "type":"float[]", "embed":{"from": ["product_description"], "model_config": {"model_name": "ts/e5-small"}}},
{"name": "rating", "type": "int32"}
]
})"_json;
std::vector<nlohmann::json> documents = {
R"({
"product_id": "product_a",
"product_name": "shampoo",
"product_description": "Our new moisturizing shampoo is perfect for those with dry or damaged hair.",
"rating": "2"
})"_json,
R"({
"product_id": "product_b",
"product_name": "soap",
"product_description": "Introducing our all-natural, organic soap bar made with essential oils and botanical ingredients.",
"rating": "4"
})"_json
};
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
auto products_coll = collection_create_op.get();
spp::sparse_hash_set<std::string> input_include_fields;
spp::sparse_hash_set<std::string> input_exclude_fields;
tsl::htrie_set<char> output_include_fields;
tsl::htrie_set<char> output_exclude_fields;
input_include_fields = {"product_*"};
products_coll->populate_include_exclude_fields_lk(input_include_fields, input_exclude_fields, output_include_fields,
output_exclude_fields);
std::vector<std::string> expected = {"product_id", "product_name", "product_description"};
for (const auto& item: expected) {
ASSERT_EQ(1, output_include_fields.count(item));
}
input_include_fields = {"product_*"};
products_coll->populate_include_exclude_fields_lk(input_include_fields, input_exclude_fields, output_include_fields,
output_exclude_fields);
expected = {"product_id", "product_name", "product_description", "product_embedding"};
for (const auto& item: expected) {
ASSERT_EQ(1, output_include_fields.count(item));
}
}
TEST_F(CollectionSpecificMoreTest, IgnoreMissingQueryByFields) {
nlohmann::json schema = R"({
"name": "test",
"enable_nested_fields": true,
"fields": [
{
"name": "parts",
"type": "object"
}
]
})"_json;
auto collection_create_op = collectionManager.create_collection(schema);
ASSERT_TRUE(collection_create_op.ok());
auto coll = collection_create_op.get();
auto add_op = coll->add(R"({
"parts": {
"1": {"id": "foo", "price": 10},
"2": {"id": "bar", "price": 15},
"3": {"id": "zip", "price": 20}
}
})"_json.dump());
ASSERT_TRUE(add_op.ok());
bool validate_field_names = true;
auto res_op = coll->search("foo", {"parts.10"}, "", {},
{}, {2}, 10, 1,FREQUENCY, {true},
Index::DROP_TOKENS_THRESHOLD, spp::sparse_hash_set<std::string>(),
{"embedding"}, 10, "",
30, 4, "", 40,
{}, {}, {}, 0,"<mark>",
"</mark>", {}, 1000,true,
false, true, "", false,
6000*1000, 4, 7, fallback, 4,
{off}, INT16_MAX, INT16_MAX,2,
2, false, "", true,
0, max_score, 100, 0, 0,
"exhaustive", 30000, 2, "",
{},{}, "right_to_left", true,
true, false, "", "", "",
"", true, true, false, 0, true,
true, DEFAULT_FILTER_BY_CANDIDATES, false, validate_field_names);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Could not find a field named `parts.10` in the schema.", res_op.error());
validate_field_names = false;
res_op = coll->search("foo", {"parts.10"}, "", {},
{}, {2}, 10, 1,FREQUENCY, {true},
Index::DROP_TOKENS_THRESHOLD, spp::sparse_hash_set<std::string>(),
{"embedding"}, 10, "",
30, 4, "", 40,
{}, {}, {}, 0,"<mark>",
"</mark>", {}, 1000,true,
false, true, "", false,
6000*1000, 4, 7, fallback, 4,
{off}, INT16_MAX, INT16_MAX,2,
2, false, "", true,
0, max_score, 100, 0, 0,
"exhaustive", 30000, 2, "",
{},{}, "right_to_left", true,
true, false, "", "", "",
"", true, true, false, 0, true,
true, DEFAULT_FILTER_BY_CANDIDATES, false, validate_field_names);
ASSERT_TRUE(res_op.ok());
auto res = res_op.get();
ASSERT_EQ(0, res["hits"].size());
ASSERT_EQ(0, res["found"].get<size_t>());
}
| 135,792
|
C++
|
.cpp
| 2,703
| 40.012949
| 175
| 0.521852
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,726
|
collection_synonyms_test.cpp
|
typesense_typesense/test/collection_synonyms_test.cpp
|
#include <gtest/gtest.h>
#include <string>
#include <vector>
#include <fstream>
#include <algorithm>
#include <collection_manager.h>
#include "collection.h"
class CollectionSynonymsTest : public ::testing::Test {
protected:
Store *store;
CollectionManager & collectionManager = CollectionManager::get_instance();
std::atomic<bool> quit = false;
Collection *coll_mul_fields;
void setupCollection() {
std::string state_dir_path = "/tmp/typesense_test/collection_override";
LOG(INFO) << "Truncating and creating: " << state_dir_path;
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
store = new Store(state_dir_path);
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
std::ifstream infile(std::string(ROOT_DIR)+"test/multi_field_documents.jsonl");
std::vector<field> fields = {
field("title", field_types::STRING, false),
field("starring", field_types::STRING, true),
field("cast", field_types::STRING_ARRAY, true),
field("points", field_types::INT32, false)
};
coll_mul_fields = collectionManager.get_collection("coll_mul_fields").get();
if(coll_mul_fields == nullptr) {
coll_mul_fields = collectionManager.create_collection("coll_mul_fields", 4, fields, "points").get();
}
std::string json_line;
while (std::getline(infile, json_line)) {
coll_mul_fields->add(json_line);
}
infile.close();
}
virtual void SetUp() {
setupCollection();
}
virtual void TearDown() {
collectionManager.drop_collection("coll_mul_fields");
collectionManager.dispose();
delete store;
}
};
TEST_F(CollectionSynonymsTest, SynonymParsingFromJson) {
nlohmann::json syn_json = {
{"id", "syn-1"},
{"root", "Ocean"},
{"synonyms", {"Sea"} }
};
synonym_t synonym;
auto syn_op = synonym_t::parse(syn_json, synonym);
ASSERT_TRUE(syn_op.ok());
ASSERT_STREQ("syn-1", synonym.id.c_str());
ASSERT_STREQ("ocean", synonym.root[0].c_str());
ASSERT_STREQ("sea", synonym.synonyms[0][0].c_str());
// should accept without root
nlohmann::json syn_json_without_root = {
{"id", "syn-1"},
{"synonyms", {"Sea", "ocean"} }
};
syn_op = synonym_t::parse(syn_json_without_root, synonym);
ASSERT_TRUE(syn_op.ok());
// should preserve symbols
nlohmann::json syn_plus_json = {
{"id", "syn-plus"},
{"root", "+"},
{"synonyms", {"plus", "#"} },
{"symbols_to_index", {"+", "#"}},
};
synonym_t synonym_plus;
syn_op = synonym_t::parse(syn_plus_json, synonym_plus);
ASSERT_TRUE(syn_op.ok());
ASSERT_STREQ("syn-plus", synonym_plus.id.c_str());
ASSERT_STREQ("+", synonym_plus.root[0].c_str());
ASSERT_STREQ("plus", synonym_plus.synonyms[0][0].c_str());
ASSERT_STREQ("#", synonym_plus.synonyms[1][0].c_str());
nlohmann::json view_json = synonym_plus.to_view_json();
ASSERT_EQ(2, view_json["symbols_to_index"].size());
ASSERT_EQ("+", view_json["symbols_to_index"][0].get<std::string>());
ASSERT_EQ("#", view_json["symbols_to_index"][1].get<std::string>());
// when `id` is not given
nlohmann::json syn_json_without_id = {
{"root", "Ocean"},
{"synonyms", {"Sea"} }
};
syn_op = synonym_t::parse(syn_json_without_id, synonym);
ASSERT_FALSE(syn_op.ok());
ASSERT_STREQ("Missing `id` field.", syn_op.error().c_str());
// synonyms missing
nlohmann::json syn_json_without_synonyms = {
{"id", "syn-1"},
{"root", "Ocean"}
};
syn_op = synonym_t::parse(syn_json_without_synonyms, synonym);
ASSERT_FALSE(syn_op.ok());
ASSERT_STREQ("Could not find an array of `synonyms`", syn_op.error().c_str());
// synonyms bad type
nlohmann::json syn_json_bad_type1 = R"({
"id": "syn-1",
"root": "Ocean",
"synonyms": [["Sea", 1]]
})"_json;
syn_op = synonym_t::parse(syn_json_bad_type1, synonym);
ASSERT_FALSE(syn_op.ok());
ASSERT_STREQ("Could not find a valid string array of `synonyms`", syn_op.error().c_str());
nlohmann::json syn_json_bad_type3 = {
{"id", "syn-1"},
{"root", "Ocean"},
{"synonyms", {} }
};
syn_op = synonym_t::parse(syn_json_bad_type3, synonym);
ASSERT_FALSE(syn_op.ok());
ASSERT_STREQ("Could not find an array of `synonyms`", syn_op.error().c_str());
// empty string in synonym list
nlohmann::json syn_json_bad_type4 = R"({
"id": "syn-1",
"root": "Ocean",
"synonyms": [["Foo", ""]]
})"_json;
syn_op = synonym_t::parse(syn_json_bad_type4, synonym);
ASSERT_FALSE(syn_op.ok());
ASSERT_STREQ("Could not find a valid string array of `synonyms`", syn_op.error().c_str());
// root bad type
nlohmann::json syn_json_root_bad_type = {
{"id", "syn-1"},
{"root", 120},
{"synonyms", {"Sea"} }
};
syn_op = synonym_t::parse(syn_json_root_bad_type, synonym);
ASSERT_FALSE(syn_op.ok());
ASSERT_STREQ("Key `root` should be a string.", syn_op.error().c_str());
// bad symbols to index
nlohmann::json syn_json_bad_symbols = {
{"id", "syn-1"},
{"root", "Ocean"},
{"synonyms", {"Sea"} },
{"symbols_to_index", {}}
};
syn_op = synonym_t::parse(syn_json_bad_symbols, synonym);
ASSERT_FALSE(syn_op.ok());
ASSERT_STREQ("Synonym `symbols_to_index` should be an array of strings.", syn_op.error().c_str());
syn_json_bad_symbols = {
{"id", "syn-1"},
{"root", "Ocean"},
{"synonyms", {"Sea"} },
{"symbols_to_index", {"%^"}}
};
syn_op = synonym_t::parse(syn_json_bad_symbols, synonym);
ASSERT_FALSE(syn_op.ok());
ASSERT_STREQ("Synonym `symbols_to_index` should be an array of single character symbols.", syn_op.error().c_str());
}
TEST_F(CollectionSynonymsTest, SynonymReductionOneWay) {
std::vector<std::vector<std::string>> results;
nlohmann::json synonym1 = R"({
"id": "nyc-expansion",
"root": "nyc",
"synonyms": ["new york"]
})"_json;
coll_mul_fields->add_synonym(synonym1);
results.clear();
coll_mul_fields->synonym_reduction({"red", "nyc", "tshirt"}, "", results);
ASSERT_EQ(1, results.size());
ASSERT_EQ(4, results[0].size());
std::vector<std::string> red_new_york_tshirts = {"red", "new", "york", "tshirt"};
for(size_t i=0; i<red_new_york_tshirts.size(); i++) {
ASSERT_STREQ(red_new_york_tshirts[i].c_str(), results[0][i].c_str());
}
// when no synonyms exist, reduction should return nothing
results.clear();
coll_mul_fields->synonym_reduction({"foo", "bar", "baz"}, "", results);
ASSERT_EQ(0, results.size());
// compression and also ensure that it does not revert back to expansion rule
results.clear();
nlohmann::json synonym2 = R"({
"id": "new-york-compression",
"root": "new york",
"synonyms": ["nyc"]
})"_json;
coll_mul_fields->add_synonym(synonym2);
coll_mul_fields->synonym_reduction({"red", "new", "york", "tshirt"}, "", results);
ASSERT_EQ(1, results.size());
ASSERT_EQ(3, results[0].size());
std::vector<std::string> red_nyc_tshirts = {"red", "nyc", "tshirt"};
for(size_t i=0; i<red_nyc_tshirts.size(); i++) {
ASSERT_STREQ(red_nyc_tshirts[i].c_str(), results[0][i].c_str());
}
// replace two synonyms with the same length
results.clear();
nlohmann::json synonym3 = R"({
"id": "t-shirt-compression",
"root": "t shirt",
"synonyms": ["tshirt"]
})"_json;
coll_mul_fields->add_synonym(synonym3);
coll_mul_fields->synonym_reduction({"new", "york", "t", "shirt"}, "", results);
ASSERT_EQ(1, results.size());
ASSERT_EQ(2, results[0].size());
std::vector<std::string> nyc_tshirt = {"nyc", "tshirt"};
for(size_t i=0; i<nyc_tshirt.size(); i++) {
ASSERT_STREQ(nyc_tshirt[i].c_str(), results[0][i].c_str());
}
// replace two synonyms with different lengths
results.clear();
nlohmann::json synonym4 = R"({
"id": "red-crimson",
"root": "red",
"synonyms": ["crimson"]
})"_json;
coll_mul_fields->add_synonym(synonym4);
coll_mul_fields->synonym_reduction({"red", "new", "york", "cap"}, "", results);
ASSERT_EQ(1, results.size());
ASSERT_EQ(3, results[0].size());
std::vector<std::string> crimson_nyc_cap = {"crimson", "nyc", "cap"};
for(size_t i=0; i<crimson_nyc_cap.size(); i++) {
ASSERT_STREQ(crimson_nyc_cap[i].c_str(), results[0][i].c_str());
}
}
TEST_F(CollectionSynonymsTest, SynonymReductionMultiWay) {
nlohmann::json synonym1 = R"({
"id": "ipod-synonyms",
"synonyms": ["ipod", "i pod", "pod"]
})"_json;
auto op = coll_mul_fields->add_synonym(synonym1);
std::vector<std::vector<std::string>> results;
coll_mul_fields->synonym_reduction({"ipod"}, "", results);
ASSERT_EQ(2, results.size());
ASSERT_EQ(2, results[0].size());
ASSERT_EQ(1, results[1].size());
std::vector<std::string> i_pod = {"i", "pod"};
for(size_t i=0; i<i_pod.size(); i++) {
ASSERT_STREQ(i_pod[i].c_str(), results[0][i].c_str());
}
ASSERT_STREQ("pod", results[1][0].c_str());
// multiple tokens
results.clear();
coll_mul_fields->synonym_reduction({"i", "pod"}, "", results);
ASSERT_EQ(2, results.size());
ASSERT_EQ(1, results[0].size());
ASSERT_EQ(1, results[1].size());
ASSERT_STREQ("ipod", results[0][0].c_str());
ASSERT_STREQ("pod", results[1][0].c_str());
// multi-token synonym + multi-token synonym definitions
nlohmann::json synonym2 = R"({
"id": "usa-synonyms",
"synonyms": ["usa", "united states", "us", "united states of america", "states"]
})"_json;
coll_mul_fields->add_synonym(synonym2);
results.clear();
coll_mul_fields->synonym_reduction({"united", "states"}, "", results);
ASSERT_EQ(4, results.size());
ASSERT_EQ(1, results[0].size());
ASSERT_EQ(1, results[1].size());
ASSERT_EQ(4, results[2].size());
ASSERT_EQ(1, results[3].size());
ASSERT_STREQ("usa", results[0][0].c_str());
ASSERT_STREQ("us", results[1][0].c_str());
std::vector<std::string> red_new_york_tshirts = {"united", "states", "of", "america"};
for(size_t i=0; i<red_new_york_tshirts.size(); i++) {
ASSERT_STREQ(red_new_york_tshirts[i].c_str(), results[2][i].c_str());
}
ASSERT_STREQ("states", results[3][0].c_str());
}
TEST_F(CollectionSynonymsTest, SynonymBelongingToMultipleSets) {
nlohmann::json synonym1 = R"({
"id": "iphone-synonyms",
"synonyms": ["i phone", "smart phone"]
})"_json;
nlohmann::json synonym2 = R"({
"id": "samsung-synonyms",
"synonyms": ["smart phone", "galaxy phone", "samsung phone"]
})"_json;
coll_mul_fields->add_synonym(synonym1);
coll_mul_fields->add_synonym(synonym2);
std::vector<std::vector<std::string>> results;
coll_mul_fields->synonym_reduction({"smart", "phone"}, "", results);
ASSERT_EQ(3, results.size());
ASSERT_EQ(2, results[0].size());
ASSERT_EQ(2, results[1].size());
ASSERT_EQ(2, results[2].size());
ASSERT_STREQ("i", results[0][0].c_str());
ASSERT_STREQ("phone", results[0][1].c_str());
ASSERT_STREQ("galaxy", results[1][0].c_str());
ASSERT_STREQ("phone", results[1][1].c_str());
ASSERT_STREQ("samsung", results[2][0].c_str());
ASSERT_STREQ("phone", results[2][1].c_str());
}
TEST_F(CollectionSynonymsTest, OneWaySynonym) {
nlohmann::json syn_json = {
{"id", "syn-1"},
{"root", "Ocean"},
{"synonyms", {"Sea"} }
};
synonym_t synonym;
auto syn_op = synonym_t::parse(syn_json, synonym);
ASSERT_TRUE(syn_op.ok());
// without synonym
auto res = coll_mul_fields->search("ocean", {"title"}, "", {}, {}, {0}, 10).get();
ASSERT_EQ(0, res["hits"].size());
ASSERT_EQ(0, res["found"].get<uint32_t>());
// add synonym and redo search
ASSERT_TRUE(coll_mul_fields->add_synonym(synonym.to_view_json()).ok());
res = coll_mul_fields->search("ocean", {"title"}, "", {}, {}, {0}, 10).get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ(1, res["found"].get<uint32_t>());
}
TEST_F(CollectionSynonymsTest, SynonymQueryVariantWithDropTokens) {
std::vector<field> fields = {field("category", field_types::STRING_ARRAY, false),
field("location", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json syn_json = {
{"id", "syn-1"},
{"root", "us"},
{"synonyms", {"united states"} }
};
synonym_t synonym;
auto syn_op = synonym_t::parse(syn_json, synonym);
ASSERT_TRUE(syn_op.ok());
coll1->add_synonym(synonym.to_view_json());
nlohmann::json doc1;
doc1["id"] = "0";
doc1["category"].push_back("sneakers");
doc1["category"].push_back("jewellery");
doc1["location"] = "united states";
doc1["points"] = 10;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["category"].push_back("gloves");
doc2["category"].push_back("wallets");
doc2["location"] = "united states";
doc2["points"] = 20;
nlohmann::json doc3;
doc3["id"] = "2";
doc3["category"].push_back("sneakers");
doc3["category"].push_back("jewellery");
doc3["location"] = "england";
doc3["points"] = 30;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
auto res = coll1->search("us sneakers", {"category", "location"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(3, res["hits"].size());
ASSERT_EQ("0", res["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", res["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("2", res["hits"][2]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSynonymsTest, SynonymsTextMatchSameAsRootQuery) {
std::vector<field> fields = {field("name", field_types::STRING, false),
field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json syn_json = {
{"id", "syn-1"},
{"root", "ceo"},
{"synonyms", {"chief executive officer"} }
};
synonym_t synonym;
auto syn_op = synonym_t::parse(syn_json, synonym);
ASSERT_TRUE(syn_op.ok());
coll1->add_synonym(synonym.to_view_json());
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "Dan Fisher";
doc1["title"] = "Chief Executive Officer";
doc1["points"] = 10;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "Jack Sparrow";
doc2["title"] = "CEO";
doc2["points"] = 20;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
auto res = coll1->search("ceo", {"name", "title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(2, res["hits"].size());
ASSERT_EQ("1", res["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", res["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ(res["hits"][1]["text_match"].get<size_t>(), res["hits"][0]["text_match"].get<size_t>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSynonymsTest, MultiWaySynonym) {
nlohmann::json syn_json = {
{"id", "syn-1"},
{"synonyms", {"Home Land", "Homeland", "homǝland"}}
};
synonym_t synonym;
auto syn_op = synonym_t::parse(syn_json, synonym);
ASSERT_TRUE(syn_op.ok());
// without synonym
auto res = coll_mul_fields->search("homǝland", {"title"}, "", {}, {}, {0}, 10).get();
ASSERT_EQ(0, res["hits"].size());
ASSERT_EQ(0, res["found"].get<uint32_t>());
coll_mul_fields->add_synonym(synonym.to_view_json());
res = coll_mul_fields->search("homǝland", {"title"}, "", {}, {}, {0}, 10).get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ(1, res["found"].get<uint32_t>());
ASSERT_STREQ("<mark>Homeland</mark> Security", res["hits"][0]["highlights"][0]["snippet"].get<std::string>().c_str());
nlohmann::json syn_json2 = {
{"id", "syn-2"},
{"synonyms", {"Samuel L. Jackson", "Sam Jackson", "Leroy"}}
};
res = coll_mul_fields->search("samuel leroy jackson", {"starring"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 0).get();
ASSERT_EQ(0, res["hits"].size());
coll_mul_fields->add_synonym(syn_json2);
res = coll_mul_fields->search("samuel leroy jackson", {"starring"}, "", {}, {}, {0}, 10).get();
ASSERT_EQ(2, res["hits"].size());
ASSERT_EQ(2, res["found"].get<uint32_t>());
ASSERT_STREQ("<mark>Samuel</mark> <mark>L</mark>. <mark>Jackson</mark>", res["hits"][0]["highlights"][0]["snippet"].get<std::string>().c_str());
ASSERT_STREQ("<mark>Samuel</mark> <mark>L</mark>. <mark>Jackson</mark>", res["hits"][1]["highlights"][0]["snippet"].get<std::string>().c_str());
// for now we don't support synonyms on ANY prefix
res = coll_mul_fields->search("ler", {"starring"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(0, res["hits"].size());
ASSERT_EQ(0, res["found"].get<uint32_t>());
}
TEST_F(CollectionSynonymsTest, ExactMatchRankedSameAsSynonymMatch) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("description", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"Laughing out Loud", "Description 1", "100"},
{"Stop Laughing", "Description 2", "120"},
{"LOL sure", "Laughing out loud sure", "200"},
{"Really ROFL now", "Description 3", "250"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["description"] = records[i][1];
doc["points"] = std::stoi(records[i][2]);
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
nlohmann::json syn_json = {
{"id", "syn-1"},
{"synonyms", {"Lol", "ROFL", "laughing"}}
};
synonym_t synonym;
auto syn_op = synonym_t::parse(syn_json, synonym);
ASSERT_TRUE(syn_op.ok());
coll1->add_synonym(synonym.to_view_json());
auto res = coll1->search("laughing", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 0).get();
ASSERT_EQ(4, res["hits"].size());
ASSERT_EQ(4, res["found"].get<uint32_t>());
ASSERT_STREQ("3", res["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("2", res["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("1", res["hits"][2]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("0", res["hits"][3]["document"]["id"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSynonymsTest, ExactMatchVsSynonymMatchCrossFields) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("description", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"Head of Marketing", "The Chief Marketing Officer", "100"},
{"VP of Sales", "Preparing marketing and sales materials.", "120"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["description"] = records[i][1];
doc["points"] = std::stoi(records[i][2]);
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
nlohmann::json syn_json = {
{"id", "syn-1"},
{"synonyms", {"cmo", "Chief Marketing Officer", "VP of Marketing"}}
};
synonym_t synonym;
auto syn_op = synonym_t::parse(syn_json, synonym);
ASSERT_TRUE(syn_op.ok());
coll1->add_synonym(synonym.to_view_json());
auto res = coll1->search("cmo", {"title", "description"}, "", {}, {},
{0}, 10, 1, FREQUENCY, {false}, 0).get();
ASSERT_EQ(2, res["hits"].size());
ASSERT_EQ(2, res["found"].get<uint32_t>());
ASSERT_EQ("0", res["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", res["hits"][1]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSynonymsTest, SynonymFieldOrdering) {
// Synonym match on a field earlier in the fields list should rank above exact match of another field
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("description", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"LOL really", "Description 1", "50"},
{"Never stop", "Description 2", "120"},
{"Yes and no", "Laughing out loud sure", "100"},
{"And so on", "Description 3", "250"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["description"] = records[i][1];
doc["points"] = std::stoi(records[i][2]);
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
nlohmann::json syn_json = {
{"id", "syn-1"},
{"synonyms", {"Lol", "ROFL", "laughing"}}
};
synonym_t synonym;
auto syn_op = synonym_t::parse(syn_json, synonym);
ASSERT_TRUE(syn_op.ok());
coll1->add_synonym(synonym.to_view_json());
auto res = coll1->search("laughing", {"title", "description"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 0).get();
ASSERT_EQ(2, res["hits"].size());
ASSERT_EQ(2, res["found"].get<uint32_t>());
ASSERT_STREQ("0", res["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("2", res["hits"][1]["document"]["id"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSynonymsTest, DeleteAndUpsertDuplicationOfSynonms) {
coll_mul_fields->add_synonym(R"({"id": "ipod-synonyms", "synonyms": ["i pod", "Apple Phone"]})"_json);
coll_mul_fields->add_synonym(R"({"id": "case-synonyms", "root": "Cases", "synonyms": ["phone cover", "mobile protector"]})"_json);
coll_mul_fields->add_synonym(R"({"id": "samsung-synonyms", "root": "s3", "synonyms": ["s3 phone", "samsung"]})"_json);
ASSERT_EQ(3, coll_mul_fields->get_synonyms().get().size());
coll_mul_fields->remove_synonym("ipod-synonyms");
coll_mul_fields->remove_synonym("case-synonyms");
auto res_op = coll_mul_fields->search("apple phone", {"starring"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true});
ASSERT_TRUE(res_op.ok());
res_op = coll_mul_fields->search("cases", {"starring"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true});
ASSERT_TRUE(res_op.ok());
auto synonyms = coll_mul_fields->get_synonyms().get();
ASSERT_EQ(1, synonyms.size());
ASSERT_EQ("samsung-synonyms", synonyms.begin()->second->id);
// try to upsert synonym with same ID
auto upsert_op = coll_mul_fields->add_synonym(R"({"id": "samsung-synonyms", "root": "s3 smartphone",
"synonyms": ["s3 phone", "samsung"]})"_json);
ASSERT_TRUE(upsert_op.ok());
ASSERT_EQ(1, coll_mul_fields->get_synonyms().get().size());
synonym_t synonym2_updated;
coll_mul_fields->get_synonym("samsung-synonyms", synonym2_updated);
ASSERT_EQ("s3", synonym2_updated.root[0]);
ASSERT_EQ("smartphone", synonym2_updated.root[1]);
coll_mul_fields->remove_synonym("samsung-synonyms");
ASSERT_EQ(0, coll_mul_fields->get_synonyms().get().size());
}
TEST_F(CollectionSynonymsTest, UpsertAndSearch) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "title", "type": "string", "locale": "da" },
{"name": "points", "type": "int32" }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
nlohmann::json doc;
doc["title"] = "Rose gold rosenblade, 500 stk";
doc["points"] = 0;
coll1->add(doc.dump());
coll1->add_synonym(R"({"id":"abcde","locale":"da","root":"",
"synonyms":["rosegold","rosaguld","rosa guld","rose gold","roseguld","rose guld"]})"_json);
ASSERT_EQ(1, coll1->get_synonyms().get().size());
// try to upsert synonym with same ID
auto upsert_op = coll1->add_synonym(R"({"id":"abcde","locale":"da","root":"",
"synonyms":["rosegold","rosaguld","rosa guld","rose gold","roseguld","rose guld"]})"_json);
ASSERT_TRUE(upsert_op.ok());
ASSERT_EQ(1, coll1->get_synonyms().get().size());
// now try searching
auto res = coll1->search("rosa guld", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, 0).get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ(1, res["found"].get<uint32_t>());
}
TEST_F(CollectionSynonymsTest, SynonymJsonSerialization) {
synonym_t synonym1;
synonym1.id = "ipod-synonyms";
synonym1.root = {"apple", "ipod"};
synonym1.raw_root = "apple ipod";
synonym1.raw_synonyms = {"ipod", "i pod", "pod"};
synonym1.synonyms.push_back({"ipod"});
synonym1.synonyms.push_back({"i", "pod"});
synonym1.synonyms.push_back({"pod"});
nlohmann::json obj = synonym1.to_view_json();
ASSERT_STREQ("ipod-synonyms", obj["id"].get<std::string>().c_str());
ASSERT_STREQ("apple ipod", obj["root"].get<std::string>().c_str());
ASSERT_EQ(3, obj["synonyms"].size());
ASSERT_STREQ("ipod", obj["synonyms"][0].get<std::string>().c_str());
ASSERT_STREQ("i pod", obj["synonyms"][1].get<std::string>().c_str());
ASSERT_STREQ("pod", obj["synonyms"][2].get<std::string>().c_str());
}
TEST_F(CollectionSynonymsTest, SynonymSingleTokenExactMatch) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("description", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"Smashed Lemon", "Description 1", "100"},
{"Lulu Guinness", "Description 2", "100"},
{"Lululemon", "Description 3", "100"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["description"] = records[i][1];
doc["points"] = std::stoi(records[i][2]);
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
coll1->add_synonym(R"({"id": "syn-1", "root": "lulu lemon", "synonyms": ["lululemon"]})"_json);
auto res = coll1->search("lulu lemon", {"title"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ(1, res["found"].get<uint32_t>());
ASSERT_STREQ("2", res["hits"][0]["document"]["id"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSynonymsTest, SynonymExpansionAndCompressionRanking) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"Smashed Lemon", "100"},
{"Lulu Lemon", "100"},
{"Lululemon", "200"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["points"] = std::stoi(records[i][1]);
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
coll1->add_synonym(R"({"id": "syn-1", "root": "lululemon", "synonyms": ["lulu lemon"]})"_json);
auto res = coll1->search("lululemon", {"title"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(2, res["hits"].size());
ASSERT_EQ(2, res["found"].get<uint32_t>());
// Even thought "lulu lemon" has two token synonym match, it should have same text match score as "lululemon"
// and hence must be tied and then ranked on "points"
ASSERT_EQ("2", res["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", res["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ(res["hits"][0]["text_match"].get<size_t>(), res["hits"][1]["text_match"].get<size_t>());
// now with compression synonym
coll1->add_synonym(R"({"id": "syn-1", "root": "lulu lemon", "synonyms": ["lululemon"]})"_json);
res = coll1->search("lulu lemon", {"title"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(2, res["hits"].size());
ASSERT_EQ(2, res["found"].get<uint32_t>());
// Even thought "lululemon" has single token synonym match, it should have same text match score as "lulu lemon"
// and hence must be tied and then ranked on "points"
ASSERT_EQ("2", res["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("1", res["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ(res["hits"][0]["text_match"].get<size_t>(), res["hits"][1]["text_match"].get<size_t>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSynonymsTest, SynonymQueriesMustHavePrefixEnabled) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"Nonstick Cookware", "100"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["points"] = std::stoi(records[i][1]);
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
coll1->add_synonym(R"({"id": "syn-1", "root": "ns", "synonyms": ["nonstick"]})"_json);
auto res = coll1->search("ns cook", {"title"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ(1, res["found"].get<uint32_t>());
res = coll1->search("ns cook", {"title"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {false}, 0).get();
ASSERT_EQ(0, res["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSynonymsTest, HandleSpecialSymbols) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points",
0, "", {"+"}, {"."}).get();
}
std::vector<std::vector<std::string>> records = {
{"+", "100"},
{"example.com", "100"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["points"] = std::stoi(records[i][1]);
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
nlohmann::json syn_plus_json = {
{"id", "syn-1"},
{"root", "plus"},
{"synonyms", {"+"} },
{"symbols_to_index", {"+"}}
};
ASSERT_TRUE(coll1->add_synonym(syn_plus_json).ok());
auto res = coll1->search("plus", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ("0", res["hits"][0]["document"]["id"].get<std::string>());
}
TEST_F(CollectionSynonymsTest, SynonymForNonAsciiLanguage) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points",
0, "", {"+"}, {"."}).get();
}
std::vector<std::vector<std::string>> records = {
{"அனைவருக்கும் வணக்கம்", "100"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["points"] = std::stoi(records[i][1]);
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
nlohmann::json syn_plus_json = {
{"id", "syn-1"},
{"root", "எல்லோருக்கும்"},
{"synonyms", {"அனைவருக்கும்"} }
};
ASSERT_TRUE(coll1->add_synonym(syn_plus_json).ok());
auto res = coll1->search("எல்லோருக்கும்", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ("0", res["hits"][0]["document"]["id"].get<std::string>());
}
TEST_F(CollectionSynonymsTest, SynonymForKorean) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string", "locale": "ko"},
{"name": "points", "type": "int32" }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
std::vector<std::vector<std::string>> records = {
{"도쿄구울", "100"},
{"도쿄 구울", "100"},
{"구울", "100"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["points"] = std::stoi(records[i][1]);
auto add_op = coll1->add(doc.dump());
ASSERT_TRUE(add_op.ok());
}
nlohmann::json synonym1 = R"({
"id": "syn-1",
"root": "",
"synonyms": ["도쿄구울", "도쿄 구울", "구울"],
"locale": "ko"
})"_json;
ASSERT_TRUE(coll1->add_synonym(synonym1).ok());
auto res = coll1->search("도쿄구울", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(3, res["hits"].size());
res = coll1->search("도쿄 구울", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(3, res["hits"].size());
res = coll1->search("구울", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(3, res["hits"].size());
}
TEST_F(CollectionSynonymsTest, SynonymWithLocaleMatch) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title_en", "type": "string"},
{"name": "title_es", "type": "string", "locale": "es"},
{"name": "title_de", "type": "string", "locale": "de"}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
std::vector<std::vector<std::string>> records = {
{"Brun New Shoe", "Zapato nuevo / Sandalen", "Nagelneuer Schuh"},
{"Marrones socks", "Calcetines marrones / Schuh", "Braune Socken"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title_en"] = records[i][0];
doc["title_es"] = records[i][1];
doc["title_de"] = records[i][2];
auto add_op = coll1->add(doc.dump());
ASSERT_TRUE(add_op.ok());
}
nlohmann::json synonym1 = R"({
"id": "syn-1",
"root": "",
"synonyms": ["marrones", "brun"],
"locale": "es"
})"_json;
ASSERT_TRUE(coll1->add_synonym(synonym1).ok());
nlohmann::json synonym2 = R"({
"id": "syn-2",
"root": "",
"synonyms": ["schuh", "sandalen"],
"locale": "de"
})"_json;
ASSERT_TRUE(coll1->add_synonym(synonym2).ok());
// the "es" synonym should NOT be resolved to en locale (missing locale field)
auto res = coll1->search("brun", {"title_en"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ("0", res["hits"][0]["document"]["id"]);
// the "de" synonym should not work for "es"
res = coll1->search("schuh", {"title_es"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ("1", res["hits"][0]["document"]["id"]);
}
TEST_F(CollectionSynonymsTest, MultipleSynonymSubstitution) {
nlohmann::json schema = R"({
"name": "coll2",
"fields": [
{"name": "title", "type": "string"},
{"name": "gender", "type": "string"}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll2 = op.get();
std::vector<std::vector<std::string>> records = {
{"Beautiful Blazer", "Male"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["gender"] = records[i][1];
auto add_op = coll2->add(doc.dump());
ASSERT_TRUE(add_op.ok());
}
nlohmann::json synonym1 = R"({
"id": "foobar",
"synonyms": ["blazer", "suit"]
})"_json;
nlohmann::json synonym2 = R"({
"id": "foobar2",
"synonyms": ["male", "man"]
})"_json;
ASSERT_TRUE(coll2->add_synonym(synonym1).ok());
ASSERT_TRUE(coll2->add_synonym(synonym2).ok());
auto res = coll2->search("blazer male", {"title", "gender"}, "", {},
{}, {0}, 10, 1, FREQUENCY, {true},0).get();
ASSERT_EQ(1, res["hits"].size());
res = coll2->search("blazer man", {"title", "gender"}, "", {},
{}, {0}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(1, res["hits"].size());
res = coll2->search("suit male", {"title", "gender"}, "", {},
{}, {0}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(1, res["hits"].size());
res = coll2->search("suit man", {"title", "gender"}, "", {},
{}, {0}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(1, res["hits"].size());
}
TEST_F(CollectionSynonymsTest, EnableSynonymFlag) {
nlohmann::json schema = R"({
"name": "coll2",
"fields": [
{"name": "title", "type": "string"},
{"name": "gender", "type": "string"}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll2 = op.get();
std::vector<std::vector<std::string>> records = {
{"Beautiful Blazer", "Male"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["gender"] = records[i][1];
auto add_op = coll2->add(doc.dump());
ASSERT_TRUE(add_op.ok());
}
nlohmann::json synonym1 = R"({
"id": "foobar",
"synonyms": ["blazer", "suit"]
})"_json;
nlohmann::json synonym2 = R"({
"id": "foobar2",
"synonyms": ["male", "man"]
})"_json;
ASSERT_TRUE(coll2->add_synonym(synonym1).ok());
ASSERT_TRUE(coll2->add_synonym(synonym2).ok());
bool enable_synonyms = true;
auto res = coll2->search("suit man", {"title", "gender"}, "", {},
{}, {2}, 10, 1,FREQUENCY, {true},
Index::DROP_TOKENS_THRESHOLD, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "",
30, 4, "", 40,
{}, {}, {}, 0,"<mark>",
"</mark>", {}, 1000,true,
false, true, "", false,
6000*1000, 4, 7, fallback, 4,
{off}, INT16_MAX, INT16_MAX,2,
2, false, "", true,
0, max_score, 100, 0, 0,
"exhaustive", 30000, 2, "",
{},{}, "right_to_left", true,
true, false, "", "", "",
"", false, enable_synonyms).get();
ASSERT_EQ(1, res["hits"].size());
enable_synonyms = false;
res = coll2->search("suit man", {"title", "gender"}, "", {},
{}, {2}, 10, 1,FREQUENCY, {true},
Index::DROP_TOKENS_THRESHOLD, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "",
30, 4, "", 40,
{}, {}, {}, 0,"<mark>",
"</mark>", {}, 1000,true,
false, true, "", false,
6000*1000, 4, 7, fallback, 4,
{off}, INT16_MAX, INT16_MAX,2,
2, false, "", true,
0, max_score, 100, 0, 0,
"exhaustive", 30000, 2, "",
{},{}, "right_to_left", true,
true, false, "", "", "",
"", false, enable_synonyms).get();
ASSERT_EQ(0, res["hits"].size());
}
TEST_F(CollectionSynonymsTest, SynonymTypos) {
nlohmann::json schema = R"({
"name": "coll3",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection *coll3 = op.get();
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "Cool Trousers";
auto add_op = coll3->add(doc.dump());
ASSERT_TRUE(add_op.ok());
nlohmann::json synonym1 = R"({
"id": "foobar",
"synonyms": ["trousers", "pants"]
})"_json;
ASSERT_TRUE(coll3->add_synonym(synonym1).ok());
auto res = coll3->search("trousers", {"title"}, "", {},
{}, {0}, 10, 1, FREQUENCY, {true},0).get();
ASSERT_EQ(1, res["hits"].size());
res = coll3->search("pants", {"title"}, "", {},
{}, {0}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(1, res["hits"].size());
//try with typos
uint32_t synonym_num_typos = 0;
res = coll3->search("patns", {"title"}, "", {},
{}, {2}, 10, 1,FREQUENCY, {true},
Index::DROP_TOKENS_THRESHOLD, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "",
30, 4, "", 40,
{}, {}, {}, 0,"<mark>",
"</mark>", {}, 1000,true,
false, true, "", false,
6000*1000, 4, 7, fallback, 4,
{off}, INT16_MAX, INT16_MAX,2,
2, false, "", true,
0, max_score, 100, 0, 0,
"exhaustive", 30000, 2, "",
{},{}, "right_to_left", true,
true, false, "", "", "",
"", false, true, false, synonym_num_typos).get();
ASSERT_EQ(0, res["hits"].size());
synonym_num_typos = 2;
res = coll3->search("patns", {"title"}, "", {},
{}, {2}, 10, 1,FREQUENCY, {true},
Index::DROP_TOKENS_THRESHOLD, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "",
30, 4, "", 40,
{}, {}, {}, 0,"<mark>",
"</mark>", {}, 1000,true,
false, true, "", false,
6000*1000, 4, 7, fallback, 4,
{off}, INT16_MAX, INT16_MAX,2,
2, false, "", true,
0, max_score, 100, 0, 0,
"exhaustive", 30000, 2, "",
{},{}, "right_to_left", true,
true, false, "", "", "",
"", false, true, false, synonym_num_typos).get();
ASSERT_EQ(1, res["hits"].size());
//max 2 typos supported
synonym_num_typos = 3;
auto search_op = coll3->search("trosuers", {"title"}, "", {},
{}, {2}, 10, 1,FREQUENCY, {true},
Index::DROP_TOKENS_THRESHOLD, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "",
30, 4, "", 40,
{}, {}, {}, 0,"<mark>",
"</mark>", {}, 1000,true,
false, true, "", false,
6000*1000, 4, 7, fallback, 4,
{off}, INT16_MAX, INT16_MAX,2,
2, false, "", true,
0, max_score, 100, 0, 0,
"exhaustive", 30000, 2, "",
{},{}, "right_to_left", true,
true, false, "", "", "",
"", false, true, false, synonym_num_typos);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("Value of `synonym_num_typos` must not be greater than 2.",search_op.error());
}
TEST_F(CollectionSynonymsTest, SynonymPrefix) {
nlohmann::json schema = R"({
"name": "coll3",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection *coll3 = op.get();
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "Cool Trousers";
auto add_op = coll3->add(doc.dump());
ASSERT_TRUE(add_op.ok());
doc["id"] = "1";
doc["title"] = "Cool Pants";
add_op = coll3->add(doc.dump());
ASSERT_TRUE(add_op.ok());
nlohmann::json synonym1 = R"({
"id": "foobar",
"synonyms": ["trousers", "pants"]
})"_json;
ASSERT_TRUE(coll3->add_synonym(synonym1).ok());
bool synonym_prefix = false;
auto res = coll3->search("pan", {"title"}, "", {},
{}, {2}, 10, 1,FREQUENCY, {false},
Index::DROP_TOKENS_THRESHOLD, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "",
30, 4, "", 40,
{}, {}, {}, 0,"<mark>",
"</mark>", {}, 1000,true,
false, true, "", false,
6000*1000, 4, 7, fallback, 4,
{off}, INT16_MAX, INT16_MAX,2,
2, false, "", true,
0, max_score, 100, 0, 0,
"exhaustive", 30000, 2, "",
{},{}, "right_to_left", true,
true, false, "", "", "",
"", false, true, synonym_prefix).get();
ASSERT_EQ(0, res["hits"].size());
synonym_prefix = true;
res = coll3->search("pan", {"title"}, "", {},
{}, {2}, 10, 1,FREQUENCY, {false},
Index::DROP_TOKENS_THRESHOLD, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "",
30, 4, "", 40,
{}, {}, {}, 0,"<mark>",
"</mark>", {}, 1000,true,
false, true, "", false,
6000*1000, 4, 7, fallback, 4,
{off}, INT16_MAX, INT16_MAX,2,
2, false, "", true,
0, max_score, 100, 0, 0,
"exhaustive", 30000, 2, "",
{},{}, "right_to_left", true,
true, false, "", "", "",
"", false, true, synonym_prefix).get();
ASSERT_EQ(2, res["hits"].size());
}
TEST_F(CollectionSynonymsTest, SynonymsPagination) {
Collection *coll3;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false)};
coll3 = collectionManager.get_collection("coll3").get();
if (coll3 == nullptr) {
coll3 = collectionManager.create_collection("coll3", 1, fields, "points").get();
}
for (int i = 0; i < 5; ++i) {
nlohmann::json synonym_json = R"(
{
"id": "foobar",
"synonyms": ["blazer", "suit"]
})"_json;
synonym_json["id"] = synonym_json["id"].get<std::string>() + std::to_string(i + 1);
coll3->add_synonym(synonym_json);
}
uint32_t limit = 0, offset = 0;
//limit collections by 2
limit = 2;
auto synonym_op = coll3->get_synonyms(limit);
auto synonym_map = synonym_op.get();
auto it = synonym_map.begin();
ASSERT_EQ(2, synonym_map.size());
ASSERT_EQ("foobar1", it->second->id); it++;
ASSERT_EQ("foobar2", it->second->id);
//get 2 collection from offset 3
offset = 3;
synonym_op = coll3->get_synonyms(limit, offset);
synonym_map = synonym_op.get();
it = synonym_map.begin();
ASSERT_EQ(2, synonym_map.size());
ASSERT_EQ("foobar4", it->second->id); it++;
ASSERT_EQ("foobar5", it->second->id);
//get all collection except first
offset = 1;
limit = 0;
synonym_op = coll3->get_synonyms(limit, offset);
synonym_map = synonym_op.get();
it = synonym_map.begin();
ASSERT_EQ(4, synonym_map.size());
ASSERT_EQ("foobar2", it->second->id); it++;
ASSERT_EQ("foobar3", it->second->id); it++;
ASSERT_EQ("foobar4", it->second->id); it++;
ASSERT_EQ("foobar5", it->second->id); it++;
//get last collection
offset = 4, limit = 1;
synonym_op = coll3->get_synonyms(limit, offset);
synonym_map = synonym_op.get();
it = synonym_map.begin();
ASSERT_EQ(1, synonym_map.size());
ASSERT_EQ("foobar5", it->second->id);
//if limit is greater than number of collection then return all from offset
offset = 0;
limit = 8;
synonym_op = coll3->get_synonyms(limit, offset);
synonym_map = synonym_op.get();
it = synonym_map.begin();
ASSERT_EQ(5, synonym_map.size());
ASSERT_EQ("foobar1", it->second->id); it++;
ASSERT_EQ("foobar2", it->second->id); it++;
ASSERT_EQ("foobar3", it->second->id); it++;
ASSERT_EQ("foobar4", it->second->id); it++;
ASSERT_EQ("foobar5", it->second->id); it++;
offset = 3;
limit = 4;
synonym_op = coll3->get_synonyms(limit, offset);
synonym_map = synonym_op.get();
it = synonym_map.begin();
ASSERT_EQ(2, synonym_map.size());
ASSERT_EQ("foobar4", it->second->id); it++;
ASSERT_EQ("foobar5", it->second->id);
//invalid offset
offset = 6;
limit = 0;
synonym_op = coll3->get_synonyms(limit, offset);
ASSERT_FALSE(synonym_op.ok());
ASSERT_EQ("Invalid offset param.", synonym_op.error());
}
TEST_F(CollectionSynonymsTest, SynonymWithStemming) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "name", "type": "string", "stem": true}
]
})"_json;
auto coll1 = collectionManager.create_collection(schema).get();
std::vector<std::string> records = {"k8s", "kubernetes"};
for(size_t i = 0; i < records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["name"] = records[i];
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
coll1->add_synonym(R"({"id": "syn-1", "synonyms": ["k8s", "kubernetes"]})"_json);
auto res = coll1->search("k8s", {"name"}, "", {}, {}, {2}, 10, 1, FREQUENCY, {true}, 0).get();
ASSERT_EQ(2, res["hits"].size());
ASSERT_EQ(2, res["found"].get<uint32_t>());
collectionManager.drop_collection("coll1");
}
| 54,331
|
C++
|
.cpp
| 1,206
| 36.077944
| 148
| 0.539216
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,727
|
collection_all_fields_test.cpp
|
typesense_typesense/test/collection_all_fields_test.cpp
|
#include <gtest/gtest.h>
#include <string>
#include <vector>
#include <fstream>
#include <algorithm>
#include <filesystem>
#include <collection_manager.h>
#include "collection.h"
#include "embedder_manager.h"
#include "http_client.h"
class CollectionAllFieldsTest : public ::testing::Test {
protected:
Store *store;
CollectionManager & collectionManager = CollectionManager::get_instance();
std::atomic<bool> quit = false;
std::vector<std::string> query_fields;
std::vector<sort_by> sort_fields;
void setupCollection() {
std::string state_dir_path = "/tmp/typesense_test/collection_all_fields";
LOG(INFO) << "Truncating and creating: " << state_dir_path;
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
system("mkdir -p /tmp/typesense_test/models");
store = new Store(state_dir_path);
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
}
virtual void SetUp() {
setupCollection();
}
virtual void TearDown() {
collectionManager.dispose();
delete store;
}
};
TEST_F(CollectionAllFieldsTest, IndexDocsWithoutSchema) {
Collection *coll1;
std::ifstream infile(std::string(ROOT_DIR)+"test/multi_field_documents.jsonl");
std::vector<field> fields = {};
std::vector<sort_by> sort_fields = { sort_by("points", "DESC") };
// try to create collection with random fallback field type
auto bad_coll_op = collectionManager.create_collection("coll_bad", 1, fields, "", 0, "blah");
ASSERT_FALSE(bad_coll_op.ok());
ASSERT_EQ("Field `.*` has an invalid type.", bad_coll_op.error());
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
auto coll_op = collectionManager.create_collection("coll1", 1, fields, "", 0, field_types::AUTO);
coll1 = coll_op.get();
}
std::string json_line;
while (std::getline(infile, json_line)) {
nlohmann::json document = nlohmann::json::parse(json_line);
Option<nlohmann::json> add_op = coll1->add(document.dump());
ASSERT_TRUE(add_op.ok());
}
infile.close();
query_fields = {"starring"};
std::vector<std::string> facets;
// check default no specific dirty values option is sent for a collection that has schema detection enabled
std::string dirty_values;
ASSERT_EQ(DIRTY_VALUES::COERCE_OR_REJECT, coll1->parse_dirty_values_option(dirty_values));
dirty_values = "coerce_or_reject";
ASSERT_EQ(DIRTY_VALUES::COERCE_OR_REJECT, coll1->parse_dirty_values_option(dirty_values));
dirty_values = "COERCE_OR_DROP";
ASSERT_EQ(DIRTY_VALUES::COERCE_OR_DROP, coll1->parse_dirty_values_option(dirty_values));
dirty_values = "reject";
ASSERT_EQ(DIRTY_VALUES::REJECT, coll1->parse_dirty_values_option(dirty_values));
dirty_values = "DROP";
ASSERT_EQ(DIRTY_VALUES::DROP, coll1->parse_dirty_values_option(dirty_values));
// same should succeed when verbatim filter is made
auto results = coll1->search("will", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_STREQ("1", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("0", results["hits"][1]["document"]["id"].get<std::string>().c_str());
results = coll1->search("chris", {"cast"}, "", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ(3, results["found"].get<size_t>());
ASSERT_STREQ("6", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("1", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("7", results["hits"][2]["document"]["id"].get<std::string>().c_str());
// reject field with a different type than already inferred type
// default for `index_all_fields` is `DIRTY_FIELD_COERCE_IGNORE`
// unable to coerce
auto doc_json = R"({"cast":"William Barnes","points":63,"starring":"Will Ferrell",
"starring_facet":"Will Ferrell","title":"Anchorman 2: The Legend Continues"})";
Option<nlohmann::json> add_op = coll1->add(doc_json);
ASSERT_FALSE(add_op.ok());
ASSERT_STREQ("Field `cast` must be an array.", add_op.error().c_str());
// coerce integer to string
doc_json = R"({"cast": ["William Barnes"],"points": 63, "starring":"Will Ferrell",
"starring_facet":"Will Ferrell","title": 300})";
add_op = coll1->add(doc_json);
ASSERT_TRUE(add_op.ok());
results = coll1->search("300", {"title"}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("300", results["hits"][0]["document"]["title"].get<std::string>().c_str());
// with dirty values set to `COERCE_OR_DROP`
// `cast` field should not be indexed into store
doc_json = R"({"cast":"William Barnes","points":63,"starring":"Will Ferrell",
"starring_facet":"Will Ferrell","title":"With bad cast field."})";
add_op = coll1->add(doc_json, CREATE, "", DIRTY_VALUES::COERCE_OR_DROP);
ASSERT_TRUE(add_op.ok());
results = coll1->search("With bad cast field", {"title"}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("With bad cast field.", results["hits"][0]["document"]["title"].get<std::string>().c_str());
ASSERT_EQ(0, results["hits"][0]["document"].count("cast"));
// with dirty values set to `DROP`
// no coercion should happen, `title` field will just be dropped, but record indexed
doc_json = R"({"cast": ["Jeremy Livingston"],"points":63,"starring":"Will Ferrell",
"starring_facet":"Will Ferrell","title": 1200 })";
add_op = coll1->add(doc_json, CREATE, "", DIRTY_VALUES::DROP);
ASSERT_TRUE(add_op.ok());
results = coll1->search("1200", {"title"}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
results = coll1->search("Jeremy Livingston", {"cast"}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(0, results["hits"][0]["document"].count("title"));
// with dirty values set to `REJECT`
doc_json = R"({"cast": ["Jeremy Livingston"],"points":63,"starring":"Will Ferrell",
"starring_facet":"Will Ferrell","title": 1200 })";
add_op = coll1->add(doc_json, CREATE, "", DIRTY_VALUES::REJECT);
ASSERT_FALSE(add_op.ok());
ASSERT_STREQ("Field `title` must be a string.", add_op.error().c_str());
// try querying using an non-existing sort field
sort_fields = { sort_by("not-found", "DESC") };
auto res_op = coll1->search("*", {}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {false});
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Could not find a field named `not-found` in the schema for sorting.", res_op.error());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionAllFieldsTest, CoerceDynamicStringField) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, true),
field(".*_name", "string", true, true),};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "", 0, "").get();
}
std::string dirty_values;
ASSERT_EQ(DIRTY_VALUES::COERCE_OR_REJECT, coll1->parse_dirty_values_option(dirty_values));
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionAllFieldsTest, HandleArrayTypes) {
Collection *coll1;
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, {}, "", 0, field_types::AUTO).get();
}
nlohmann::json doc;
doc["title"] = "FIRST";
doc["int_values"] = {1, 2};
Option<nlohmann::json> add_op = coll1->add(doc.dump(), CREATE, "0");
ASSERT_TRUE(add_op.ok());
// coercion of string -> int
doc["int_values"] = {"3"};
add_op = coll1->add(doc.dump(), UPDATE, "0");
ASSERT_TRUE(add_op.ok());
// bad array type value should be dropped when stored
doc["title"] = "SECOND";
doc["int_values"] = {{3}};
add_op = coll1->add(doc.dump(), CREATE, "", DIRTY_VALUES:: DROP);
ASSERT_TRUE(add_op.ok());
auto results = coll1->search("second", {"title"}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
// check that the "bad" value does not exists in the stored document
ASSERT_EQ(1, results["hits"][0]["document"].count("int_values"));
ASSERT_EQ(0, results["hits"][0]["document"]["int_values"].size());
// bad array type should follow coercion rules
add_op = coll1->add(doc.dump(), CREATE, "", DIRTY_VALUES::REJECT);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `int_values` must be an array of int64.", add_op.error());
// non array field should be handled as per coercion rule
doc["title"] = "THIRD";
doc["int_values"] = 3;
add_op = coll1->add(doc.dump(), CREATE, "", DIRTY_VALUES::REJECT);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `int_values` must be an array.", add_op.error());
add_op = coll1->add(doc.dump(), CREATE, "", DIRTY_VALUES::COERCE_OR_DROP);
ASSERT_TRUE(add_op.ok());
results = coll1->search("third", {"title"}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(0, results["hits"][0]["document"].count("int_values"));
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionAllFieldsTest, NonOptionalFieldShouldNotBeDropped) {
Collection *coll1;
std::vector<field> fields = {
field("points", field_types::INT32, false, false)
};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "", 0).get();
}
nlohmann::json doc;
doc["title"] = "FIRST";
doc["points"] = {100};
Option<nlohmann::json> add_op = coll1->add(doc.dump(), CREATE, "0", DIRTY_VALUES::DROP);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `points` must be an int32.", add_op.error());
add_op = coll1->add(doc.dump(), CREATE, "0", DIRTY_VALUES::COERCE_OR_DROP);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `points` must be an int32.", add_op.error());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionAllFieldsTest, ShouldBeAbleToUpdateSchemaDetectedDocs) {
Collection *coll1;
std::vector<field> fields = {
};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "", 0, field_types::AUTO).get();
}
nlohmann::json doc;
doc["title"] = "FIRST";
doc["scores"] = {100, 200, 300};
Option<nlohmann::json> add_op = coll1->add(doc.dump(), CREATE, "0", DIRTY_VALUES::REJECT);
ASSERT_TRUE(add_op.ok());
// now update both values and reinsert
doc["title"] = "SECOND";
doc["scores"] = {100, 250, "300", 400};
add_op = coll1->add(doc.dump(), UPDATE, "0", DIRTY_VALUES::COERCE_OR_DROP);
ASSERT_TRUE(add_op.ok());
auto results = coll1->search("second", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("SECOND", results["hits"][0]["document"]["title"].get<std::string>());
ASSERT_EQ(4, results["hits"][0]["document"]["scores"].size());
ASSERT_EQ(100, results["hits"][0]["document"]["scores"][0].get<size_t>());
ASSERT_EQ(250, results["hits"][0]["document"]["scores"][1].get<size_t>());
ASSERT_EQ(300, results["hits"][0]["document"]["scores"][2].get<size_t>());
ASSERT_EQ(400, results["hits"][0]["document"]["scores"][3].get<size_t>());
// insert multiple docs at the same time
const size_t NUM_DOCS = 20;
std::vector<std::string> json_lines;
for(size_t i = 0; i < NUM_DOCS; i++) {
const std::string &i_str = std::to_string(i);
doc["title"] = std::string("upserted ") + std::to_string(StringUtils::hash_wy(i_str.c_str(), i_str.size()));
doc["scores"] = {i};
doc["max"] = i;
doc["id"] = std::to_string(i+10);
json_lines.push_back(doc.dump());
}
nlohmann::json insert_doc;
auto res = coll1->add_many(json_lines, insert_doc, UPSERT);
ASSERT_TRUE(res["success"].get<bool>());
// now we will replace all `max` values with the same value and assert that
json_lines.clear();
insert_doc.clear();
for(size_t i = 0; i < NUM_DOCS; i++) {
const std::string &i_str = std::to_string(i);
doc.clear();
doc["title"] = std::string("updated ") + std::to_string(StringUtils::hash_wy(i_str.c_str(), i_str.size()));
doc["scores"] = {1000, 2000};
doc["max"] = 2000;
doc["id"] = std::to_string(i+10);
json_lines.push_back(doc.dump());
}
res = coll1->add_many(json_lines, insert_doc, UPDATE);
ASSERT_TRUE(res["success"].get<bool>());
results = coll1->search("updated", {"title"}, "", {}, {}, {0}, 50, 1, FREQUENCY, {false}).get();
ASSERT_EQ(20, results["hits"].size());
for(auto& hit: results["hits"]) {
ASSERT_EQ(2000, hit["document"]["max"].get<int>());
ASSERT_EQ(2, hit["document"]["scores"].size());
ASSERT_EQ(1000, hit["document"]["scores"][0].get<int>());
ASSERT_EQ(2000, hit["document"]["scores"][1].get<int>());
}
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionAllFieldsTest, StringifyAllValues) {
Collection *coll1;
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, {}, "", 0, "string*").get();
}
nlohmann::json doc;
doc["title"] = "FIRST";
doc["int_values"] = {1, 2};
Option<nlohmann::json> add_op = coll1->add(doc.dump(), CREATE, "0");
ASSERT_TRUE(add_op.ok());
auto added_doc = add_op.get();
auto schema = coll1->get_fields();
ASSERT_EQ("int_values", schema[0].name);
ASSERT_EQ(field_types::STRING_ARRAY, schema[0].type);
ASSERT_EQ("title", schema[1].name);
ASSERT_EQ(field_types::STRING, schema[1].type);
ASSERT_EQ("1", added_doc["int_values"][0].get<std::string>());
ASSERT_EQ("2", added_doc["int_values"][1].get<std::string>());
auto results = coll1->search("first", {"title"}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("FIRST", results["hits"][0]["document"]["title"].get<std::string>());
ASSERT_EQ(1, results["hits"][0]["document"].count("int_values"));
ASSERT_EQ(2, results["hits"][0]["document"]["int_values"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["int_values"][0].get<std::string>());
ASSERT_EQ("2", results["hits"][0]["document"]["int_values"][1].get<std::string>());
// try with DROP
doc["title"] = "SECOND";
add_op = coll1->add(doc.dump(), CREATE, "", DIRTY_VALUES::DROP);
ASSERT_TRUE(add_op.ok());
results = coll1->search("second", {"title"}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("SECOND", results["hits"][0]["document"]["title"].get<std::string>());
ASSERT_EQ(1, results["hits"][0]["document"].count("int_values"));
ASSERT_EQ(0, results["hits"][0]["document"]["int_values"].size()); // since both array values are dropped
// try with REJECT
doc["title"] = "THIRD";
add_op = coll1->add(doc.dump(), CREATE, "", DIRTY_VALUES::REJECT);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `int_values` must be an array of string.", add_op.error());
// singular field coercion
doc["int_values"] = {"100"};
doc["single_int"] = 100;
doc["title"] = "FOURTH";
add_op = coll1->add(doc.dump(), CREATE, "", DIRTY_VALUES::REJECT);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `single_int` must be a string.", add_op.error());
// try with empty array
doc["title"] = "FIFTH";
doc["int_values"] = {"100"};
doc["int_values_2"] = nlohmann::json::array();
doc["single_int"] = "200";
add_op = coll1->add(doc.dump(), CREATE, "", DIRTY_VALUES::REJECT);
ASSERT_TRUE(add_op.ok());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionAllFieldsTest, IntegerAllValues) {
Collection *coll1;
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, {}, "", 0, "int32").get();
}
nlohmann::json doc;
doc["age"] = 100;
doc["year"] = 2000;
Option<nlohmann::json> add_op = coll1->add(doc.dump(), CREATE, "0");
ASSERT_TRUE(add_op.ok());
auto added_doc = add_op.get();
auto results = coll1->search("*", {}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
// try with DROP
doc["age"] = "SECOND";
add_op = coll1->add(doc.dump(), CREATE, "", DIRTY_VALUES::DROP);
ASSERT_TRUE(add_op.ok());
results = coll1->search("*", {}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(2, results["hits"].size());
// try with REJECT
doc["age"] = "THIRD";
add_op = coll1->add(doc.dump(), CREATE, "", DIRTY_VALUES::REJECT);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `age` must be an int32.", add_op.error());
// try with coerce_or_reject
add_op = coll1->add(doc.dump(), CREATE, "", DIRTY_VALUES::COERCE_OR_REJECT);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `age` must be an int32.", add_op.error());
// try with coerce_or_drop
doc["age"] = "FOURTH";
add_op = coll1->add(doc.dump(), CREATE, "66", DIRTY_VALUES::COERCE_OR_DROP);
ASSERT_TRUE(add_op.ok());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionAllFieldsTest, SearchStringifiedField) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, true),
field("department", "string*", true, true),
field(".*_name", "string*", true, true),};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
const Option<Collection*> &coll_op = collectionManager.create_collection("coll1", 1, fields, "", 0, "");
ASSERT_TRUE(coll_op.ok());
coll1 = coll_op.get();
}
nlohmann::json doc;
doc["title"] = "FIRST";
doc["department"] = "ENGINEERING";
doc["company_name"] = "Stark Inc.";
Option<nlohmann::json> add_op = coll1->add(doc.dump(), CREATE, "0");
ASSERT_TRUE(add_op.ok());
// department field's type must be "solidified" to an actual type
auto schema = coll1->get_fields();
ASSERT_EQ("department", schema[4].name);
ASSERT_EQ(field_types::STRING, schema[4].type);
auto results_op = coll1->search("stark", {"company_name"}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {false});
ASSERT_TRUE(results_op.ok());
ASSERT_EQ(1, results_op.get()["hits"].size());
results_op = coll1->search("engineering", {"department"}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {false});
ASSERT_TRUE(results_op.ok());
ASSERT_EQ(1, results_op.get()["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionAllFieldsTest, StringSingularAllValues) {
Collection *coll1;
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, {}, "", 0, "string").get();
}
nlohmann::json doc;
doc["title"] = "FIRST";
doc["int_values"] = {1, 2};
Option<nlohmann::json> add_op = coll1->add(doc.dump(), CREATE, "0");
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `int_values` must be a string.", add_op.error());
doc["int_values"] = 123;
add_op = coll1->add(doc.dump(), CREATE, "0");
ASSERT_TRUE(add_op.ok());
auto added_doc = add_op.get();
ASSERT_EQ("FIRST", added_doc["title"].get<std::string>());
ASSERT_EQ("123", added_doc["int_values"].get<std::string>());
auto results = coll1->search("first", {"title"}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("FIRST", results["hits"][0]["document"]["title"].get<std::string>());
ASSERT_EQ("123", results["hits"][0]["document"]["int_values"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionAllFieldsTest, UpdateOfDocumentsInAutoMode) {
Collection *coll1;
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, {}, "", 0, field_types::AUTO).get();
}
nlohmann::json doc;
doc["title"] = "FIRST";
doc["single_float"] = 50.50;
auto add_op = coll1->add(doc.dump(), CREATE, "0", DIRTY_VALUES::COERCE_OR_REJECT);
ASSERT_TRUE(add_op.ok());
// try updating a value
nlohmann::json update_doc;
update_doc["single_float"] = "123";
add_op = coll1->add(update_doc.dump(), UPDATE, "0", DIRTY_VALUES::COERCE_OR_REJECT);
ASSERT_TRUE(add_op.ok());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionAllFieldsTest, NormalFieldWithAutoType) {
Collection *coll1;
std::vector<field> fields = {
field("city", field_types::AUTO, true, true),
field("publication_year", field_types::AUTO, true, true),
};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
auto coll_op = collectionManager.create_collection("coll1", 1, fields, "", 0, field_types::AUTO);
ASSERT_TRUE(coll_op.ok());
coll1 = coll_op.get();
}
nlohmann::json doc;
doc["title"] = "FIRST";
doc["city"] = "Austin";
doc["publication_year"] = 2010;
auto add_op = coll1->add(doc.dump(), CREATE, "0", DIRTY_VALUES::COERCE_OR_REJECT);
ASSERT_TRUE(add_op.ok());
auto res_op = coll1->search("austin", {"city"}, "publication_year: 2010", {}, sort_fields, {0}, 10, 1, FREQUENCY, {false});
ASSERT_TRUE(res_op.ok());
auto results = res_op.get();
ASSERT_EQ(1, results["hits"].size());
auto schema = coll1->get_fields();
ASSERT_EQ("city", schema[2].name);
ASSERT_EQ(field_types::STRING, schema[2].type);
ASSERT_EQ("publication_year", schema[3].name);
ASSERT_EQ(field_types::INT64, schema[3].type);
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionAllFieldsTest, JsonFieldsToFieldsConversion) {
nlohmann::json fields_json = nlohmann::json::array();
nlohmann::json all_field;
all_field[fields::name] = ".*";
all_field[fields::type] = "string*";
fields_json.emplace_back(all_field);
std::string fallback_field_type;
std::vector<field> fields;
auto parse_op = field::json_fields_to_fields(false, fields_json, fallback_field_type, fields);
ASSERT_TRUE(parse_op.ok());
ASSERT_EQ(1, fields.size());
ASSERT_EQ("string*", fallback_field_type);
ASSERT_EQ(true, fields[0].optional);
ASSERT_EQ(false, fields[0].facet);
ASSERT_EQ(".*", fields[0].name);
ASSERT_EQ("string*", fields[0].type);
// non-wildcard string* field should be treated as optional by default
fields_json = nlohmann::json::array();
nlohmann::json string_star_field;
string_star_field[fields::name] = "title";
string_star_field[fields::type] = "string*";
fields_json.emplace_back(string_star_field);
fields.clear();
parse_op = field::json_fields_to_fields(false, fields_json, fallback_field_type, fields);
ASSERT_TRUE(parse_op.ok());
ASSERT_EQ(true, fields[0].optional);
fields_json = nlohmann::json::array();
fields_json.emplace_back(all_field);
// reject when you try to set optional to false or facet to true
fields_json[0][fields::optional] = false;
parse_op = field::json_fields_to_fields(false, fields_json, fallback_field_type, fields);
ASSERT_FALSE(parse_op.ok());
ASSERT_EQ("Field `.*` must be an optional field.", parse_op.error());
fields_json[0][fields::optional] = true;
fields_json[0][fields::facet] = true;
parse_op = field::json_fields_to_fields(false, fields_json, fallback_field_type, fields);
ASSERT_FALSE(parse_op.ok());
ASSERT_EQ("Field `.*` cannot be a facet field.", parse_op.error());
fields_json[0][fields::facet] = false;
// can have only one ".*" field
fields_json.emplace_back(all_field);
parse_op = field::json_fields_to_fields(false, fields_json, fallback_field_type, fields);
ASSERT_FALSE(parse_op.ok());
ASSERT_EQ("There can be only one field named `.*`.", parse_op.error());
// try with the `auto` type
fields_json.clear();
fields.clear();
all_field[fields::type] = "auto";
fields_json.emplace_back(all_field);
parse_op = field::json_fields_to_fields(false, fields_json, fallback_field_type, fields);
ASSERT_TRUE(parse_op.ok());
ASSERT_EQ("auto", fields[0].type);
// try with locale on a regular field
fields_json.clear();
fields.clear();
all_field[fields::type] = "string";
all_field[fields::name] = "title";
all_field[fields::locale] = "ja";
fields_json.emplace_back(all_field);
parse_op = field::json_fields_to_fields(false, fields_json, fallback_field_type, fields);
ASSERT_TRUE(parse_op.ok());
ASSERT_EQ("ja", fields[0].locale);
// try with locale on fallback field
fields_json.clear();
fields.clear();
all_field[fields::type] = "string";
all_field[fields::name] = ".*";
all_field[fields::locale] = "ko";
fields_json.emplace_back(all_field);
parse_op = field::json_fields_to_fields(false, fields_json, fallback_field_type, fields);
ASSERT_TRUE(parse_op.ok());
ASSERT_EQ("ko", fields[0].locale);
}
TEST_F(CollectionAllFieldsTest, WildcardFacetFieldsOnAutoSchema) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, true),
field(".*_name", field_types::STRING, true, true),};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "", 0, field_types::AUTO).get();
}
nlohmann::json doc;
doc["title"] = "Org";
doc["org_name"] = "Amazon";
auto add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
doc["title"] = "Org";
doc["org_name"] = "Walmart";
add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto results = coll1->search("org", {"title"}, "", {"org_name"}, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("Walmart", results["hits"][0]["document"]["org_name"].get<std::string>());
ASSERT_EQ("Amazon", results["hits"][1]["document"]["org_name"].get<std::string>());
ASSERT_EQ("Amazon", results["facet_counts"][0]["counts"][0]["value"].get<std::string>());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_EQ("Walmart", results["facet_counts"][0]["counts"][1]["value"].get<std::string>());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][1]["count"]);
// add another type of .*_name field
doc.clear();
doc["title"] = "Company";
doc["company_name"] = "Stark";
add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
results = coll1->search("*", {"title"}, "", {"company_name", "org_name"}, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ("company_name", results["facet_counts"][0]["field_name"].get<std::string>());
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("Stark", results["facet_counts"][0]["counts"][0]["value"].get<std::string>());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_EQ("org_name", results["facet_counts"][1]["field_name"].get<std::string>());
ASSERT_EQ(2, results["facet_counts"][1]["counts"].size());
ASSERT_EQ("Amazon", results["facet_counts"][1]["counts"][0]["value"].get<std::string>());
ASSERT_EQ(1, (int) results["facet_counts"][1]["counts"][0]["count"]);
ASSERT_EQ("Walmart", results["facet_counts"][1]["counts"][1]["value"].get<std::string>());
ASSERT_EQ(1, (int) results["facet_counts"][1]["counts"][1]["count"]);
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionAllFieldsTest, WildcardFacetFieldsWithAuoFacetFieldType) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, true),
field(".*_name", field_types::AUTO, true, true),};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "", 0, field_types::AUTO).get();
}
nlohmann::json doc;
doc["title"] = "Org";
doc["org_name"] = "Amazon";
doc["year_name"] = 1990;
auto add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
doc["title"] = "Org";
doc["org_name"] = "Walmart";
add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto results = coll1->search("org", {"title"}, "", {"org_name"}, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("Walmart", results["hits"][0]["document"]["org_name"].get<std::string>());
ASSERT_EQ("Amazon", results["hits"][1]["document"]["org_name"].get<std::string>());
ASSERT_EQ("Amazon", results["facet_counts"][0]["counts"][0]["value"].get<std::string>());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_EQ("Walmart", results["facet_counts"][0]["counts"][1]["value"].get<std::string>());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][1]["count"]);
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionAllFieldsTest, WildcardFacetFieldsWithoutAutoSchema) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, true),
field(".*_name", field_types::STRING, true, true),};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "", 0).get();
}
nlohmann::json doc;
doc["title"] = "Org";
doc["org_name"] = "Amazon";
auto add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
doc["title"] = "Org";
doc["org_name"] = "Walmart";
add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto results = coll1->search("org", {"title"}, "", {"org_name"}, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("Walmart", results["hits"][0]["document"]["org_name"].get<std::string>());
ASSERT_EQ("Amazon", results["hits"][1]["document"]["org_name"].get<std::string>());
ASSERT_EQ("Amazon", results["facet_counts"][0]["counts"][0]["value"].get<std::string>());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_EQ("Walmart", results["facet_counts"][0]["counts"][1]["value"].get<std::string>());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][1]["count"]);
// add another type of .*_name field
doc.clear();
doc["title"] = "Company";
doc["company_name"] = "Stark";
add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
results = coll1->search("*", {"title"}, "", {"company_name", "org_name"}, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ("company_name", results["facet_counts"][0]["field_name"].get<std::string>());
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("Stark", results["facet_counts"][0]["counts"][0]["value"].get<std::string>());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][0]["count"]);
ASSERT_EQ("org_name", results["facet_counts"][1]["field_name"].get<std::string>());
ASSERT_EQ(2, results["facet_counts"][1]["counts"].size());
ASSERT_EQ("Amazon", results["facet_counts"][1]["counts"][0]["value"].get<std::string>());
ASSERT_EQ(1, (int) results["facet_counts"][1]["counts"][0]["count"]);
ASSERT_EQ("Walmart", results["facet_counts"][1]["counts"][1]["value"].get<std::string>());
ASSERT_EQ(1, (int) results["facet_counts"][1]["counts"][1]["count"]);
// Don't allow auto detection of schema when AUTO mode is not chosen
doc["description"] = "Stark company.";
add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto res_op = coll1->search("*", {"description"}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {false});
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Could not find a field named `description` in the schema.", res_op.error());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionAllFieldsTest, RegexpExplicitFieldTypeCoercion) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, true),
field("i.*", field_types::INT32, false, true),
field("s.*", field_types::STRING, false, true),
field("a.*", field_types::STRING_ARRAY, false, true),
field("nullsa.*", field_types::STRING_ARRAY, false, true),
field("num.*", "string*", false, true),};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "", 0).get();
}
nlohmann::json doc;
doc["title"] = "Rand Building";
doc["i_age"] = "28";
doc["s_name"] = nullptr;
doc["a_name"] = {};
doc["nullsa"] = nullptr;
doc["num_employees"] = 28;
// should coerce while retaining expected type
auto add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto schema = coll1->get_fields();
ASSERT_EQ("a_name", schema[6].name);
ASSERT_EQ(field_types::STRING_ARRAY, schema[6].type);
ASSERT_EQ("i_age", schema[7].name);
ASSERT_EQ(field_types::INT32, schema[7].type);
ASSERT_EQ("nullsa", schema[8].name);
ASSERT_EQ(field_types::STRING_ARRAY, schema[8].type);
// num_employees field's type must be "solidified" to an actual type
ASSERT_EQ("num_employees", schema[9].name);
ASSERT_EQ(field_types::STRING, schema[9].type);
ASSERT_EQ("s_name", schema[10].name);
ASSERT_EQ(field_types::STRING, schema[10].type);
auto results = coll1->search("rand", {"title"}, "i_age: 28", {}, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionAllFieldsTest, DynamicFieldsMustOnlyBeOptional) {
Collection *coll1;
std::vector<field> bad_fields = {field("title", field_types::STRING, true),
field(".*_name", field_types::STRING, true, false),};
auto op = collectionManager.create_collection("coll1", 1, bad_fields, "", 0);
ASSERT_FALSE(op.ok());
ASSERT_EQ("Field `.*_name` must be an optional field.", op.error());
// string* fields should only be optional
std::vector<field> bad_fields2 = {field("title", field_types::STRING, true),
field("name", "string*", true, false),};
op = collectionManager.create_collection("coll1", 1, bad_fields2, "", 0);
ASSERT_FALSE(op.ok());
ASSERT_EQ("Field `name` must be an optional field.", op.error());
std::vector<field> fields = {field("title", field_types::STRING, true),
field(".*_name", field_types::STRING, true, true),};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
op = collectionManager.create_collection("coll1", 1, fields, "", 0);
ASSERT_TRUE(op.ok());
coll1 = op.get();
}
ASSERT_TRUE(coll1->get_dynamic_fields()[".*_name"].optional);
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionAllFieldsTest, AutoAndStringStarFieldsShouldAcceptNullValues) {
Collection *coll1;
std::vector<field> fields = {
field("foo", "string*", true, true),
field("buzz", "auto", true, true),
field("bar.*", "string*", true, true),
field("baz.*", "auto", true, true),
};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
auto coll_op = collectionManager.create_collection("coll1", 1, fields, "", 0);
ASSERT_TRUE(coll_op.ok());
coll1 = coll_op.get();
}
nlohmann::json doc;
doc["foo"] = nullptr;
doc["buzz"] = nullptr;
doc["bar_one"] = nullptr;
doc["baz_one"] = nullptr;
// should allow indexing of null values since all are optional
auto add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto res = coll1->search("*", {}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ("0", res["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ(1, res["hits"][0]["document"].size());
auto schema = coll1->get_fields();
ASSERT_EQ(4, schema.size());
doc["foo"] = {"hello", "world"};
doc["buzz"] = 123;
doc["bar_one"] = "hello";
doc["baz_one"] = true;
add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
schema = coll1->get_fields();
ASSERT_EQ(8, schema.size());
ASSERT_EQ("bar_one", schema[4].name);
ASSERT_EQ(field_types::STRING, schema[4].type);
ASSERT_EQ("baz_one", schema[5].name);
ASSERT_EQ(field_types::BOOL, schema[5].type);
ASSERT_EQ("buzz", schema[6].name);
ASSERT_EQ(field_types::INT64, schema[6].type);
ASSERT_EQ("foo", schema[7].name);
ASSERT_EQ(field_types::STRING_ARRAY, schema[7].type);
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionAllFieldsTest, BothFallbackAndDynamicFields) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, true),
field(".*_name", field_types::STRING, false, true),
field(".*_year", field_types::INT32, true, true),
field(".*", field_types::AUTO, false, true)};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
auto op = collectionManager.create_collection("coll1", 1, fields, "", 0, field_types::AUTO);
ASSERT_TRUE(op.ok());
coll1 = op.get();
}
ASSERT_EQ(4, coll1->get_fields().size());
ASSERT_EQ(2, coll1->get_dynamic_fields().size());
ASSERT_TRUE(coll1->get_dynamic_fields().count(".*_name") != 0);
ASSERT_TRUE(coll1->get_dynamic_fields()[".*_name"].optional);
ASSERT_FALSE(coll1->get_dynamic_fields()[".*_name"].facet);
ASSERT_TRUE(coll1->get_dynamic_fields().count(".*_year") != 0);
ASSERT_TRUE(coll1->get_dynamic_fields()[".*_year"].optional);
ASSERT_TRUE(coll1->get_dynamic_fields()[".*_year"].facet);
nlohmann::json doc;
doc["title"] = "Amazon Inc.";
doc["org_name"] = "Amazon";
doc["org_year"] = 1994;
doc["rand_int"] = 42;
doc["rand_str"] = "fizzbuzz";
auto add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
// org_year should be of type int32
auto schema = coll1->get_fields();
ASSERT_EQ("org_year", schema[5].name);
ASSERT_EQ(field_types::INT32, schema[5].type);
auto res_op = coll1->search("Amazon", {"org_name"}, "", {"org_name"}, sort_fields, {0}, 10, 1, FREQUENCY, {false});
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Could not find a facet field named `org_name` in the schema.", res_op.error());
auto results = coll1->search("Amazon", {"org_name"}, "", {"org_year"}, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
res_op = coll1->search("fizzbuzz", {"rand_str"}, "", {"rand_str"}, sort_fields, {0}, 10, 1, FREQUENCY, {false});
ASSERT_EQ("Could not find a facet field named `rand_str` in the schema.", res_op.error());
results = coll1->search("fizzbuzz", {"rand_str"}, "", {"org_year"}, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionAllFieldsTest, WildcardFieldAndDictionaryField) {
Collection *coll1;
std::vector<field> fields = {field(".*", field_types::AUTO, false, true)};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
auto op = collectionManager.create_collection("coll1", 1, fields, "", 0, field_types::AUTO, {}, {}, true);
ASSERT_TRUE(op.ok());
coll1 = op.get();
}
nlohmann::json doc;
doc["year"] = 2000;
doc["kinds"] = nlohmann::json::object();
doc["kinds"]["CGXX"] = 13;
doc["kinds"]["ZBXX"] = 24;
auto add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto results = coll1->search("*", {}, "year: 2000", {}, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
auto schema = coll1->get_fields();
ASSERT_EQ(5, schema.size());
ASSERT_EQ(".*", schema[0].name);
ASSERT_EQ("kinds", schema[1].name);
ASSERT_EQ("year", schema[2].name);
ASSERT_EQ("kinds.ZBXX", schema[3].name);
ASSERT_EQ("kinds.CGXX", schema[4].name);
// filter on object key
results = coll1->search("*", {}, "kinds.CGXX: 13", {}, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionAllFieldsTest, DynamicFieldAndDictionaryField) {
Collection *coll1;
std::vector<field> fields = {field("k.*", field_types::STRING, false, true),
field(".*", field_types::AUTO, false, true)};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
auto op = collectionManager.create_collection("coll1", 1, fields, "", 0, field_types::AUTO);
ASSERT_TRUE(op.ok());
coll1 = op.get();
}
nlohmann::json doc;
doc["kinds"] = nlohmann::json::object();
doc["kinds"]["CGXX"] = 13;
doc["kinds"]["ZBXX"] = 24;
auto add_op = coll1->add(doc.dump(), CREATE);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `kinds` must be a string.", add_op.error());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionAllFieldsTest, RegexpIntFieldWithFallbackStringType) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, true),
field("n.*", field_types::INT32, false, true),
field("s.*", "string*", false, true),
field(".*", field_types::STRING, false, true)};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
auto op = collectionManager.create_collection("coll1", 1, fields, "", 0, field_types::STRING);
ASSERT_TRUE(op.ok());
coll1 = op.get();
}
nlohmann::json doc;
doc["title"] = "Amazon Inc.";
doc["n_age"] = 32;
doc["s_tags"] = {"shopping"};
doc["rand_str"] = "fizzbuzz";
auto add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
// n_age should be of type int32
auto schema = coll1->get_fields();
ASSERT_EQ("n_age", schema[4].name);
ASSERT_EQ(field_types::INT32, schema[4].type);
ASSERT_EQ("rand_str", schema[5].name);
ASSERT_EQ(field_types::STRING, schema[5].type);
ASSERT_EQ("s_tags", schema[6].name);
ASSERT_EQ(field_types::STRING_ARRAY, schema[6].type);
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionAllFieldsTest, ContainingWildcardOnlyField) {
Collection *coll1;
std::vector<field> fields = {field("company_name", field_types::STRING, false),
field("num_employees", field_types::INT32, false),
field(".*", field_types::BOOL, true, true)};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
auto op = collectionManager.create_collection("coll1", 1, fields, "", 0, field_types::BOOL);
ASSERT_TRUE(op.ok());
coll1 = op.get();
}
nlohmann::json doc;
doc["company_name"] = "Amazon Inc.";
doc["num_employees"] = 2000;
doc["country"] = "USA";
auto add_op = coll1->add(doc.dump(), CREATE);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `country` must be a bool.", add_op.error());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionAllFieldsTest, DoNotIndexFieldMarkedAsNonIndex) {
Collection *coll1;
std::vector<field> fields = {field("company_name", field_types::STRING, false),
field("num_employees", field_types::INT32, false),
field("post", field_types::STRING, false, true, false),
field(".*_txt", field_types::STRING, false, true, false),
field(".*", field_types::AUTO, false, true)};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
auto op = collectionManager.create_collection("coll1", 1, fields, "", 0, field_types::AUTO);
ASSERT_TRUE(op.ok());
coll1 = op.get();
}
nlohmann::json doc;
doc["company_name"] = "Amazon Inc.";
doc["num_employees"] = 2000;
doc["post"] = "Some post.";
doc["description_txt"] = "Rome was not built in a day.";
auto add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
ASSERT_EQ(0, coll1->_get_index()->_get_search_index().count("post"));
auto res_op = coll1->search("Amazon", {"description_txt"}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {false});
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Could not find a field named `description_txt` in the schema.", res_op.error());
res_op = coll1->search("Amazon", {"post"}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {false});
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Field `post` is marked as a non-indexed field in the schema.", res_op.error());
// wildcard pattern should exclude non-indexed field while searching,
res_op = coll1->search("Amazon", {"*"}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {false});
ASSERT_TRUE(res_op.ok());
ASSERT_EQ(1, res_op.get()["hits"].size());
// try updating a document with non-indexable field
doc["post"] = "Some post updated.";
auto update_op = coll1->add(doc.dump(), UPDATE, "0");
ASSERT_TRUE(add_op.ok());
ASSERT_EQ(0, coll1->_get_index()->_get_search_index().count("post"));
auto res = coll1->search("Amazon", {"company_name"}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ("Some post updated.", res["hits"][0]["document"]["post"].get<std::string>());
// try to delete doc with non-indexable field
auto del_op = coll1->remove("0");
ASSERT_TRUE(del_op.ok());
// facet search should also be disabled
auto fs_op = coll1->search("Amazon", {"company_name"}, "", {"description_txt"}, sort_fields, {0}, 10, 1, FREQUENCY, {false});
ASSERT_FALSE(fs_op.ok());
ASSERT_EQ("Could not find a facet field named `description_txt` in the schema.", fs_op.error());
fields = {field("company_name", field_types::STRING, false),
field("num_employees", field_types::INT32, false),
field("post", field_types::STRING, false, false, false),
field(".*_txt", field_types::STRING, true, true, false),
field(".*", field_types::AUTO, false, true)};
auto op = collectionManager.create_collection("coll2", 1, fields, "", 0, field_types::AUTO);
ASSERT_FALSE(op.ok());
ASSERT_EQ("Field `.*_txt` cannot be a facet since it's marked as non-indexable.", op.error());
fields = {field("company_name", field_types::STRING, false),
field("num_employees", field_types::INT32, false),
field("post", field_types::STRING, false, true, false),
field(".*_txt", field_types::STRING, true, false, false),
field(".*", field_types::AUTO, false, true)};
op = collectionManager.create_collection("coll2", 1, fields, "", 0, field_types::AUTO);
ASSERT_FALSE(op.ok());
ASSERT_EQ("Field `.*_txt` must be an optional field.", op.error());
// don't allow catch all field to be non-indexable
fields = {field("company_name", field_types::STRING, false),
field("num_employees", field_types::INT32, false),
field(".*_txt", field_types::STRING, false, true, false),
field(".*", field_types::AUTO, false, true, false)};
op = collectionManager.create_collection("coll2", 1, fields, "", 0, field_types::AUTO);
ASSERT_FALSE(op.ok());
ASSERT_EQ("Field `.*` cannot be marked as non-indexable.", op.error());
// allow auto field to be non-indexable
fields = {field("company_name", field_types::STRING, false),
field("num_employees", field_types::INT32, false),
field("noidx_.*", field_types::AUTO, false, true, false)};
op = collectionManager.create_collection("coll3", 1, fields, "", 0, field_types::AUTO);
ASSERT_TRUE(op.ok());
// don't allow facet to be true when index is false
fields = {field("company_name", field_types::STRING, false),
field("num_employees", field_types::INT32, false),
field("facet_noindex", field_types::STRING, true, true, false)};
op = collectionManager.create_collection("coll4", 1, fields, "", 0, field_types::AUTO);
ASSERT_FALSE(op.ok());
ASSERT_EQ("Field `facet_noindex` cannot be a facet since it's marked as non-indexable.", op.error());
collectionManager.drop_collection("coll1");
collectionManager.drop_collection("coll2");
collectionManager.drop_collection("coll3");
collectionManager.drop_collection("coll4");
}
TEST_F(CollectionAllFieldsTest, NullValueUpdate) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false, true),
field(".*_name", field_types::STRING, true, true),
field("unindexed", field_types::STRING, false, true, false),
field(".*", field_types::STRING, false, true)};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
auto op = collectionManager.create_collection("coll1", 1, fields, "", 0, field_types::STRING);
ASSERT_TRUE(op.ok());
coll1 = op.get();
}
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "Running Shoes";
doc["company_name"] = "Nike";
doc["country"] = "USA";
doc["unindexed"] = "Hello";
auto add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
doc["title"] = nullptr;
doc["company_name"] = nullptr;
doc["country"] = nullptr;
add_op = coll1->add(doc.dump(), UPDATE);
ASSERT_TRUE(add_op.ok());
// try updating the doc with null value again
add_op = coll1->add(doc.dump(), UPDATE);
ASSERT_TRUE(add_op.ok());
// ensure that the fields are removed from the document
auto results = coll1->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(2, results["hits"][0]["document"].size());
ASSERT_EQ("Hello", results["hits"][0]["document"]["unindexed"].get<std::string>());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionAllFieldsTest, NullValueArrayUpdate) {
Collection *coll1;
std::vector<field> fields = {field("titles", field_types::STRING_ARRAY, false, true),
field(".*_names", field_types::STRING_ARRAY, true, true),
field("unindexed", field_types::STRING, false, true, false),
field(".*", field_types::STRING_ARRAY, false, true)};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
auto op = collectionManager.create_collection("coll1", 1, fields, "", 0, field_types::STRING_ARRAY);
ASSERT_TRUE(op.ok());
coll1 = op.get();
}
nlohmann::json doc;
doc["id"] = "0";
doc["titles"] = {"Running Shoes"};
doc["company_names"] = {"Nike"};
doc["countries"] = {"USA", nullptr};
doc["unindexed"] = "Hello";
auto add_op = coll1->add(doc.dump(), CREATE);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `countries` must be an array of string.", add_op.error());
doc["countries"] = {nullptr};
add_op = coll1->add(doc.dump(), CREATE);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `countries` must be an array of string.", add_op.error());
doc["countries"] = {"USA"};
add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
ASSERT_EQ(1, coll1->get_num_documents());
ASSERT_EQ(1, coll1->_get_index()->num_seq_ids());
doc["titles"] = nullptr;
doc["company_names"] = nullptr;
doc["countries"] = nullptr;
add_op = coll1->add(doc.dump(), UPDATE);
ASSERT_TRUE(add_op.ok());
// try updating the doc with null value again
add_op = coll1->add(doc.dump(), UPDATE);
ASSERT_TRUE(add_op.ok());
ASSERT_EQ(1, coll1->get_num_documents());
// ensure that the fields are removed from the document
auto results = coll1->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(2, results["hits"][0]["document"].size());
ASSERT_EQ("Hello", results["hits"][0]["document"]["unindexed"].get<std::string>());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
// update with null values inside array
doc["countries"] = {nullptr};
add_op = coll1->add(doc.dump(), UPDATE);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `countries` must be an array of string.", add_op.error());
doc["countries"] = {"USA", nullptr};
add_op = coll1->add(doc.dump(), UPDATE);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `countries` must be an array of string.", add_op.error());
ASSERT_EQ(1, coll1->get_num_documents());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionAllFieldsTest, EmptyArrayShouldBeAcceptedAsFirstValueOfAutoField) {
Collection *coll1;
std::vector<field> fields = {field(".*", field_types::AUTO, false, true)};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
auto op = collectionManager.create_collection("coll1", 1, fields, "",
0, field_types::AUTO);
ASSERT_TRUE(op.ok());
coll1 = op.get();
}
nlohmann::json doc;
doc["company_name"] = "Amazon Inc.";
doc["tags"] = nlohmann::json::array();
doc["country"] = "USA";
auto add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionAllFieldsTest, DISABLED_SchemaUpdateShouldBeAtomicForAllFields) {
// when a given field in a document is "bad", other fields should not be partially added to schema
Collection *coll1;
std::vector<field> fields = {field(".*", field_types::AUTO, false, true),};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
auto op = collectionManager.create_collection("coll1", 1, fields, "",
0, field_types::AUTO);
ASSERT_TRUE(op.ok());
coll1 = op.get();
}
// insert a document with bad data for that key, but surrounded by "good" keys
// this should NOT end up creating schema changes
nlohmann::json doc;
doc["int_2"] = 200;
auto add_op = coll1->add(doc.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
doc["int_1"] = 100;
doc["int_2"] = nlohmann::json::array();
doc["int_2"].push_back(nlohmann::json::object());
doc["int_3"] = 300;
add_op = coll1->add(doc.dump(), CREATE);
ASSERT_FALSE(add_op.ok());
auto f = coll1->get_fields();
ASSERT_EQ(1, coll1->get_fields().size());
ASSERT_EQ(0, coll1->get_sort_fields().size());
ASSERT_EQ(0, coll1->_get_index()->_get_search_index().size());
ASSERT_EQ(0, coll1->_get_index()->_get_numerical_index().size());
// now insert document with just "int_1" key
nlohmann::json doc2;
doc2["int_1"] = 200;
add_op = coll1->add(doc2.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
ASSERT_EQ(2, coll1->get_fields().size());
ASSERT_EQ(1, coll1->get_sort_fields().size());
ASSERT_EQ(0, coll1->_get_index()->_get_search_index().size());
ASSERT_EQ(1, coll1->_get_index()->_get_numerical_index().size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionAllFieldsTest, FieldNameMatchingRegexpShouldNotBeIndexed) {
std::vector<field> fields = {field(".*", field_types::AUTO, false, true),
field("title", field_types::STRING, false),
field("name.*", field_types::STRING, true, true)};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "", 0, field_types::AUTO).get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "One Two Three";
doc1["name.*"] = "Rowling";
doc1["name.*barbaz"] = "JK";
doc1[".*"] = "foo";
std::vector<std::string> json_lines;
json_lines.push_back(doc1.dump());
coll1->add_many(json_lines, doc1, UPSERT);
json_lines[0] = doc1.dump();
coll1->add_many(json_lines, doc1, UPSERT);
ASSERT_EQ(1, coll1->_get_index()->_get_search_index().size());
ASSERT_EQ(3, coll1->get_fields().size());
auto results = coll1->search("one", {"title"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 5, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true).get();
ASSERT_EQ(1, results["hits"].size());
}
TEST_F(CollectionAllFieldsTest, AutoFieldValueCoercionRemoval) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "store", "type": "auto", "optional": true}
]
})"_json;
auto coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["store"]["id"] = 123;
coll1->add(doc1.dump(), CREATE);
// string value will be coerced to integer
doc1["id"] = "1";
doc1["store"]["id"] = "124";
coll1->add(doc1.dump(), CREATE);
// removal should work correctly
coll1->remove("1");
auto results = coll1->search("*", {},
"store.id: 124", {}, {}, {2}, 10,
1, FREQUENCY, {true},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 5, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true).get();
ASSERT_EQ(0, results["found"].get<size_t>());
}
TEST_F(CollectionAllFieldsTest, FieldNameMatchingRegexpShouldNotBeIndexedInNonAutoSchema) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("name.*", field_types::STRING, true, true)};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "", 0, field_types::AUTO).get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "One Two Three";
doc1["name.*"] = "Rowling";
doc1["name.*barbaz"] = "JK";
doc1[".*"] = "foo";
std::vector<std::string> json_lines;
json_lines.push_back(doc1.dump());
coll1->add_many(json_lines, doc1, UPSERT);
json_lines[0] = doc1.dump();
coll1->add_many(json_lines, doc1, UPSERT);
ASSERT_EQ(1, coll1->_get_index()->_get_search_index().size());
ASSERT_EQ(2, coll1->get_fields().size());
auto results = coll1->search("one", {"title"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
1, spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 5, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true).get();
ASSERT_EQ(1, results["hits"].size());
}
TEST_F(CollectionAllFieldsTest, EmbedFromFieldJSONInvalidField) {
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
nlohmann::json field_json;
field_json["name"] = "embedding";
field_json["type"] = "float[]";
field_json["embed"] = nlohmann::json::object();
field_json["embed"]["from"] = {"name"};
field_json["embed"]["model_config"] = nlohmann::json::object();
field_json["embed"]["model_config"]["model_name"] = "ts/e5-small";
std::vector<field> fields;
std::string fallback_field_type;
auto arr = nlohmann::json::array();
arr.push_back(field_json);
auto field_op = field::json_fields_to_fields(false, arr, fallback_field_type, fields);
ASSERT_FALSE(field_op.ok());
ASSERT_EQ("Property `embed.from` can only refer to string, string array or image (for supported models) fields.", field_op.error());
}
TEST_F(CollectionAllFieldsTest, EmbedFromNotArray) {
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
nlohmann::json field_json;
field_json["name"] = "embedding";
field_json["type"] = "float[]";
field_json["embed"] = nlohmann::json::object();
field_json["embed"]["from"] = "name";
field_json["embed"]["model_config"] = nlohmann::json::object();
field_json["embed"]["model_config"]["model_name"] = "ts/e5-small";
std::vector<field> fields;
std::string fallback_field_type;
auto arr = nlohmann::json::array();
arr.push_back(field_json);
auto field_op = field::json_fields_to_fields(false, arr, fallback_field_type, fields);
ASSERT_FALSE(field_op.ok());
ASSERT_EQ("Property `embed.from` must be an array.", field_op.error());
}
TEST_F(CollectionAllFieldsTest, ModelParametersWithoutEmbedFrom) {
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
nlohmann::json field_json;
field_json["name"] = "embedding";
field_json["type"] = "float[]";
field_json["embed"]["model_config"] = nlohmann::json::object();
field_json["embed"]["model_config"]["model_name"] = "ts/e5-small";
std::vector<field> fields;
std::string fallback_field_type;
auto arr = nlohmann::json::array();
arr.push_back(field_json);
auto field_op = field::json_fields_to_fields(false, arr, fallback_field_type, fields);
ASSERT_FALSE(field_op.ok());
ASSERT_EQ("Property `embed` must contain a `from` property.", field_op.error());
}
TEST_F(CollectionAllFieldsTest, EmbedFromBasicValid) {
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
nlohmann::json schema = R"({
"name": "obj_coll",
"fields": [
{"name": "name", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"],
"model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
auto obj_coll_op = collectionManager.create_collection(schema);
ASSERT_TRUE(obj_coll_op.ok());
Collection* obj_coll = obj_coll_op.get();
nlohmann::json doc1;
doc1["name"] = "One Two Three";
auto add_res = obj_coll->add(doc1.dump());
ASSERT_TRUE(add_res.ok());
ASSERT_TRUE(add_res.get()["name"].is_string());
ASSERT_TRUE(add_res.get()["embedding"].is_array());
ASSERT_EQ(384, add_res.get()["embedding"].size());
}
TEST_F(CollectionAllFieldsTest, WrongDataTypeForEmbedFrom) {
nlohmann::json schema = R"({
"name": "obj_coll",
"fields": [
{"name": "age", "type": "int32"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["age"],
"model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
auto obj_coll_op = collectionManager.create_collection(schema);
ASSERT_FALSE(obj_coll_op.ok());
ASSERT_EQ("Property `embed.from` can only refer to string, string array or image (for supported models) fields.", obj_coll_op.error());
}
TEST_F(CollectionAllFieldsTest, StoreInvalidInput) {
nlohmann::json schema = R"({
"name": "obj_coll",
"fields": [
{"name": "age", "type": "int32", "store": "qwerty"}
]
})"_json;
auto obj_coll_op = collectionManager.create_collection(schema);
ASSERT_FALSE(obj_coll_op.ok());
ASSERT_EQ("The `store` property of the field `age` should be a boolean.", obj_coll_op.error());
}
TEST_F(CollectionAllFieldsTest, InvalidstemValue) {
nlohmann::json schema = R"({
"name": "test",
"fields": [
{"name": "name", "type": "string", "stem": "qwerty"}
]
})"_json;
auto obj_coll_op = collectionManager.create_collection(schema);
ASSERT_FALSE(obj_coll_op.ok());
ASSERT_EQ("The `stem` property of the field `name` should be a boolean.", obj_coll_op.error());
schema = R"({
"name": "test",
"fields": [
{"name": "name", "type": "int32", "stem": true}
]
})"_json;
obj_coll_op = collectionManager.create_collection(schema);
ASSERT_FALSE(obj_coll_op.ok());
ASSERT_EQ("The `stem` property is only allowed for string and string[] fields.", obj_coll_op.error());
}
TEST_F(CollectionAllFieldsTest, GeopointSortValue) {
nlohmann::json schema = R"({
"name": "test",
"fields": [
{"name": "geo", "type": "geopoint", "sort": false}
]
})"_json;
auto create_op = collectionManager.create_collection(schema);
ASSERT_FALSE(create_op.ok());
ASSERT_EQ("The `sort` property of the field `geo` having `geopoint` type cannot be `false`."
" The sort index is used during GeoSearch.", create_op.error());
schema = R"({
"name": "test",
"fields": [
{"name": "geo_array", "type": "geopoint[]", "sort": false}
]
})"_json;
create_op = collectionManager.create_collection(schema);
ASSERT_FALSE(create_op.ok());
ASSERT_EQ("The `sort` property of the field `geo_array` having `geopoint[]` type cannot be `false`."
" The sort index is used during GeoSearch.", create_op.error());
}
| 68,025
|
C++
|
.cpp
| 1,356
| 43.315634
| 139
| 0.60756
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,728
|
collection_schema_change_test.cpp
|
typesense_typesense/test/collection_schema_change_test.cpp
|
#include <gtest/gtest.h>
#include <string>
#include <vector>
#include <fstream>
#include <algorithm>
#include <collection_manager.h>
#include "collection.h"
class CollectionSchemaChangeTest : public ::testing::Test {
protected:
Store *store;
CollectionManager & collectionManager = CollectionManager::get_instance();
std::atomic<bool> quit = false;
std::vector<std::string> query_fields;
std::vector<sort_by> sort_fields;
void setupCollection() {
std::string state_dir_path = "/tmp/typesense_test/collection_schema_change";
LOG(INFO) << "Truncating and creating: " << state_dir_path;
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
store = new Store(state_dir_path);
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
}
virtual void SetUp() {
setupCollection();
}
virtual void TearDown() {
collectionManager.dispose();
delete store;
}
};
TEST_F(CollectionSchemaChangeTest, AddNewFieldsToCollection) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "The quick brown fox was too fast.";
doc["tags"] = {"experimental", "news"};
doc["category"] = "animals";
doc["quantity"] = 100;
doc["points"] = 100;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("fox",
{"title"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
auto schema_changes = R"({
"fields": [
{"name": "tags", "type": "string[]", "infix": true},
{"name": "category", "type": "string", "sort": true}
]
})"_json;
auto alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
results = coll1->search("eriment",
{"tags"}, "", {}, {}, {0}, 3, 1, FREQUENCY,
{true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {always}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
sort_fields = { sort_by("category", "DESC") };
results = coll1->search("*",
{}, "", {}, sort_fields, {0}, 3, 1, FREQUENCY,
{true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {always}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
schema_changes = R"({
"fields": [
{"name": "quantity", "type": "int32", "facet": true}
]
})"_json;
alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
results = coll1->search("*",
{}, "quantity: 100", {"quantity"}, {}, {0}, 3, 1, FREQUENCY,
{true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {always}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_STREQ("100", results["facet_counts"][0]["counts"][0]["value"].get<std::string>().c_str());
ASSERT_EQ(1, (int) results["facet_counts"][0]["counts"][0]["count"]);
// add a dynamic field
schema_changes = R"({
"fields": [
{"name": ".*_bool", "type": "bool"},
{"name": "age", "type": "auto", "optional": true}
]
})"_json;
alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
auto coll_fields = coll1->get_fields();
ASSERT_EQ(7, coll_fields.size());
ASSERT_EQ(".*_bool", coll_fields[5].name);
ASSERT_EQ("age", coll_fields[6].name);
doc["id"] = "1";
doc["title"] = "The one";
doc["tags"] = {"sports", "news"};
doc["category"] = "things";
doc["quantity"] = 200;
doc["points"] = 100;
doc["on_sale_bool"] = true;
doc["age"] = 45;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
results = coll1->search("*",
{}, "on_sale_bool: true", {}, {}, {0}, 3, 1, FREQUENCY,
{true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {always}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_STREQ("1", results["hits"][0]["document"]["id"].get<std::string>().c_str());
results = coll1->search("*",
{}, "age: 45", {}, {}, {0}, 3, 1, FREQUENCY,
{true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {always}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_STREQ("1", results["hits"][0]["document"]["id"].get<std::string>().c_str());
// add auto field
schema_changes = R"({
"fields": [
{"name": ".*", "type": "auto"}
]
})"_json;
alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
doc["id"] = "2";
doc["title"] = "The two";
doc["tags"] = {"sports", "news"};
doc["category"] = "things";
doc["quantity"] = 200;
doc["points"] = 100;
doc["on_sale_bool"] = false;
doc["foobar"] = 123;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
results = coll1->search("*",
{}, "foobar: 123", {}, {}, {0}, 3, 1, FREQUENCY,
{true}, 5,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {always}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_STREQ("2", results["hits"][0]["document"]["id"].get<std::string>().c_str());
// try to add auto field again
alter_op = coll1->alter(schema_changes);
ASSERT_FALSE(alter_op.ok());
ASSERT_EQ("The schema already contains a `.*` field.", alter_op.error());
// try to add a regular field with 2 auto fields
schema_changes = R"({
"fields": [
{"name": "bar", "type": "auto"},
{"name": ".*", "type": "auto"},
{"name": ".*", "type": "auto"}
]
})"_json;
alter_op = coll1->alter(schema_changes);
ASSERT_FALSE(alter_op.ok());
ASSERT_EQ("There can be only one field named `.*`.", alter_op.error());
// add non-index field
schema_changes = R"({
"fields": [
{"name": "raw", "type": "int32", "index": false, "optional": true}
]
})"_json;
alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
// try to add `id` field
schema_changes = R"({
"fields": [
{"name": "id", "type": "int32"}
]
})"_json;
alter_op = coll1->alter(schema_changes);
ASSERT_FALSE(alter_op.ok());
ASSERT_EQ("Field `id` cannot be altered.", alter_op.error());
ASSERT_EQ(9, coll1->get_schema().size());
ASSERT_EQ(12, coll1->get_fields().size());
ASSERT_EQ(5, coll1->_get_index()->_get_numerical_index().size());
// fields should also be persisted properly on disk
std::string collection_meta_json;
store->get(Collection::get_meta_key("coll1"), collection_meta_json);
nlohmann::json collection_meta = nlohmann::json::parse(collection_meta_json);
ASSERT_EQ(12, collection_meta["fields"].size());
// try restoring collection from disk: all fields should be preserved
collectionManager.dispose();
delete store;
store = new Store("/tmp/typesense_test/collection_schema_change");
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
coll1 = collectionManager.get_collection("coll1").get();
ASSERT_EQ(9, coll1->get_schema().size());
ASSERT_EQ(12, coll1->get_fields().size());
ASSERT_EQ(5, coll1->_get_index()->_get_numerical_index().size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSchemaChangeTest, DropFieldsFromCollection) {
std::vector<field> fields = {field(".*", field_types::AUTO, false),
field("title", field_types::STRING, false, false, true, "", 1, 1),
field("location", field_types::GEOPOINT, false),
field("locations", field_types::GEOPOINT_ARRAY, false),
field("tags", field_types::STRING_ARRAY, true),
field("points", field_types::INT32, true),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points", 0, "auto").get();
std::vector<std::vector<double>> lat_lngs;
lat_lngs.push_back({48.85821022164442, 2.294239067890161});
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "The quick brown fox was too fast.";
doc["location"] = {48.85821022164442, 2.294239067890161};
doc["locations"] = lat_lngs;
doc["tags"] = {"experimental", "news"};
doc["points"] = 100;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("*",
{}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
auto schema_changes = R"({
"fields": [
{"name": ".*", "drop": true},
{"name": "title", "drop": true},
{"name": "location", "drop": true},
{"name": "locations", "drop": true},
{"name": "tags", "drop": true},
{"name": "points", "drop": true}
]
})"_json;
auto alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
results = coll1->search("*", {}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
auto res_op = coll1->search("quick", {"title"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Could not find a field named `title` in the schema.", res_op.error());
auto search_schema = coll1->get_schema();
ASSERT_EQ(0, search_schema.size());
auto coll_fields = coll1->get_fields();
ASSERT_EQ(0, coll_fields.size());
ASSERT_EQ(0, coll1->_get_index()->_get_search_index().size());
ASSERT_EQ(0, coll1->_get_index()->_get_numerical_index().size());
ASSERT_EQ(0, coll1->_get_index()->_get_infix_index().size());
ASSERT_EQ(1, coll1->_get_index()->num_seq_ids());
ASSERT_EQ("", coll1->get_fallback_field_type());
ASSERT_EQ("", coll1->get_default_sorting_field());
// try to drop `id` field
schema_changes = R"({
"fields": [
{"name": "id", "drop": true}
]
})"_json;
alter_op = coll1->alter(schema_changes);
ASSERT_FALSE(alter_op.ok());
ASSERT_EQ("Field `id` cannot be altered.", alter_op.error());
// try restoring collection from disk: all fields should be deleted
collectionManager.dispose();
delete store;
store = new Store("/tmp/typesense_test/collection_schema_change");
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
coll1 = collectionManager.get_collection("coll1").get();
search_schema = coll1->get_schema();
ASSERT_EQ(0, search_schema.size());
coll_fields = coll1->get_fields();
ASSERT_EQ(0, coll_fields.size());
ASSERT_EQ(0, coll1->_get_index()->_get_search_index().size());
ASSERT_EQ(0, coll1->_get_index()->_get_numerical_index().size());
ASSERT_EQ(0, coll1->_get_index()->_get_infix_index().size());
ASSERT_EQ(1, coll1->_get_index()->num_seq_ids());
ASSERT_EQ("", coll1->get_default_sorting_field());
ASSERT_EQ("", coll1->get_fallback_field_type());
results = coll1->search("*", {}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
res_op = coll1->search("quick", {"title"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Could not find a field named `title` in the schema.", res_op.error());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSchemaChangeTest, AlterValidations) {
std::vector<field> fields = {field("title", field_types::STRING, false, false, true, "", 1, 1),
field("location", field_types::GEOPOINT, false),
field("locations", field_types::GEOPOINT_ARRAY, false),
field("tags", field_types::STRING_ARRAY, true),
field("points", field_types::INT32, true),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points", 0, "").get();
std::vector<std::vector<double>> lat_lngs;
lat_lngs.push_back({48.85821022164442, 2.294239067890161});
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "The quick brown fox was too fast.";
doc["location"] = {48.85821022164442, 2.294239067890161};
doc["locations"] = lat_lngs;
doc["tags"] = {"experimental", "news"};
doc["desc"] = "Story about fox.";
doc["points"] = 100;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
// 1. Modify existing field, which is not supported
auto schema_changes = R"({
"fields": [
{"name": "title", "type": "string[]"}
]
})"_json;
auto alter_op = coll1->alter(schema_changes);
ASSERT_FALSE(alter_op.ok());
ASSERT_EQ("Field `title` is already part of the schema: To change this field, drop it first before adding it "
"back to the schema.",alter_op.error());
// 2. Bad field format
schema_changes = R"({
"fields": [
{"name": "age", "typezzz": "int32"}
]
})"_json;
alter_op = coll1->alter(schema_changes);
ASSERT_FALSE(alter_op.ok());
ASSERT_EQ("Wrong format for `fields`. It should be an array of objects containing `name`, `type`, "
"`optional` and `facet` properties.",alter_op.error());
// 3. Try to drop non-existing field
schema_changes = R"({
"fields": [
{"name": "age", "drop": true}
]
})"_json;
alter_op = coll1->alter(schema_changes);
ASSERT_FALSE(alter_op.ok());
ASSERT_EQ("Field `age` is not part of collection schema.",alter_op.error());
// 4. Bad value for `drop` parameter
schema_changes = R"({
"fields": [
{"name": "title", "drop": 123}
]
})"_json;
alter_op = coll1->alter(schema_changes);
ASSERT_FALSE(alter_op.ok());
ASSERT_EQ("Field `title` must have a drop value of `true`.", alter_op.error());
// 5. New field schema should match on-disk data
schema_changes = R"({
"fields": [
{"name": "desc", "type": "int32"}
]
})"_json;
alter_op = coll1->alter(schema_changes);
ASSERT_FALSE(alter_op.ok());
ASSERT_EQ("Schema change is incompatible with the type of documents already stored in this collection. "
"Existing data for field `desc` cannot be coerced into an int32.", alter_op.error());
// 6. Prevent non-optional field when on-disk data has missing values
doc.clear();
doc["id"] = "1";
doc["title"] = "The brown lion was too slow.";
doc["location"] = {68.85821022164442, 4.294239067890161};
doc["locations"] = lat_lngs;
doc["tags"] = {"lion", "zoo"};
doc["points"] = 200;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
schema_changes = R"({
"fields": [
{"name": "desc", "type": "string", "optional": false}
]
})"_json;
alter_op = coll1->alter(schema_changes);
ASSERT_FALSE(alter_op.ok());
ASSERT_EQ("Field `desc` has been declared in the schema, but is not found in the documents already present "
"in the collection. If you still want to add this field, set it as `optional: true`.", alter_op.error());
// 7. schema JSON missing "fields" property
schema_changes = R"({
"foo": "bar"
})"_json;
alter_op = coll1->alter(schema_changes);
ASSERT_FALSE(alter_op.ok());
ASSERT_EQ("The `fields` value should be an array of objects containing the field `name` "
"and other properties.", alter_op.error());
// 8. sending full collection schema, like creation body
schema_changes = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"},
{"name": "points", "type": "int32"}
]
})"_json;
alter_op = coll1->alter(schema_changes);
ASSERT_FALSE(alter_op.ok());
ASSERT_EQ("Only `fields` and `metadata` can be updated at the moment.",alter_op.error());
// 9. bad datatype in alter
schema_changes = R"({
"fields": [
{"name": "title", "drop": true},
{"name": "title", "type": "foobar"}
]
})"_json;
alter_op = coll1->alter(schema_changes);
ASSERT_FALSE(alter_op.ok());
ASSERT_EQ("Field `title` has an invalid data type `foobar`, see docs for supported data types.",alter_op.error());
// add + drop `id` field
schema_changes = R"({
"fields": [
{"name": "id", "drop": true},
{"name": "id", "type": "string"}
]
})"_json;
alter_op = coll1->alter(schema_changes);
ASSERT_FALSE(alter_op.ok());
ASSERT_EQ("Field `id` cannot be altered.", alter_op.error());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSchemaChangeTest, DropPropertyShouldNotBeAllowedInSchemaCreation) {
nlohmann::json req_json = R"({
"name": "coll1",
"fields": [{"name": "title", "type": "string", "drop": true}]
})"_json;
auto coll1_op = collectionManager.create_collection(req_json);
ASSERT_FALSE(coll1_op.ok());
ASSERT_EQ("Invalid property `drop` on field `title`: it is allowed only during schema update.", coll1_op.error());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSchemaChangeTest, AbilityToDropAndReAddIndexAtTheSameTime) {
nlohmann::json req_json = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"},
{"name": "timestamp", "type": "int32"}
]
})"_json;
auto coll1_op = collectionManager.create_collection(req_json);
ASSERT_TRUE(coll1_op.ok());
auto coll1 = coll1_op.get();
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "Hello";
doc["timestamp"] = 3433232;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
// try to alter with a bad type
auto schema_changes = R"({
"fields": [
{"name": "title", "drop": true},
{"name": "title", "type": "int32"}
]
})"_json;
auto alter_op = coll1->alter(schema_changes);
ASSERT_FALSE(alter_op.ok());
ASSERT_EQ("Schema change is incompatible with the type of documents already stored in this collection. "
"Existing data for field `title` cannot be coerced into an int32.", alter_op.error());
// existing data should not have been touched
auto res = coll1->search("he", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ("0", res["hits"][0]["document"]["id"].get<std::string>());
// drop re-add with facet index
schema_changes = R"({
"fields": [
{"name": "title", "drop": true},
{"name": "title", "type": "string", "facet": true}
]
})"_json;
alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
res = coll1->search("*",
{}, "", {"title"}, {}, {0}, 3, 1, FREQUENCY, {true}).get();
ASSERT_EQ(1, res["found"].get<size_t>());
ASSERT_EQ("0", res["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ(1, res["facet_counts"].size());
ASSERT_EQ(4, res["facet_counts"][0].size());
ASSERT_EQ("title", res["facet_counts"][0]["field_name"]);
ASSERT_EQ(1, res["facet_counts"][0]["counts"].size());
ASSERT_EQ("Hello", res["facet_counts"][0]["counts"][0]["value"].get<std::string>());
// migrate int32 to int64
schema_changes = R"({
"fields": [
{"name": "timestamp", "drop": true},
{"name": "timestamp", "type": "int64"}
]
})"_json;
alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
ASSERT_EQ("int64", coll1->get_schema()["timestamp"].type);
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionSchemaChangeTest, AddAndDropFieldImmediately) {
std::vector<field> fields = {field("title", field_types::STRING, false, false, true, "", 1, 1),
field("points", field_types::INT32, true),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points", 0, "").get();
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "The quick brown fox was too fast.";
doc["points"] = 100;
doc["quantity_int"] = 1000;
doc["some_txt"] = "foo";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
ASSERT_EQ(2, coll1->get_schema().size());
ASSERT_EQ(0, coll1->get_dynamic_fields().size());
auto results = coll1->search("*",
{}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
// add a field via alter which we will try dropping later
auto schema_changes = R"({
"fields": [
{"name": ".*_int", "type": "int32", "optional": true}
]
})"_json;
auto alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
ASSERT_EQ(3, coll1->get_schema().size());
ASSERT_EQ(4, coll1->get_fields().size());
ASSERT_EQ(1, coll1->get_dynamic_fields().size());
results = coll1->search("*",
{}, "quantity_int: 1000", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5).get();
ASSERT_EQ(1, results["found"].get<size_t>());
// drop + re-add dynamic field
schema_changes = R"({
"fields": [
{"name": ".*_int", "type": "int32", "facet": true},
{"name": ".*_int", "drop": true}
]
})"_json;
alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
ASSERT_EQ(3, coll1->get_schema().size());
ASSERT_EQ(4, coll1->get_fields().size());
ASSERT_EQ(1, coll1->get_dynamic_fields().size());
results = coll1->search("*",
{}, "", {"quantity_int"}, {}, {0}, 3, 1, FREQUENCY, {true}, 5).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"][0]["count"].get<size_t>());
ASSERT_EQ("quantity_int", results["facet_counts"][0]["field_name"].get<std::string>());
schema_changes = R"({
"fields": [
{"name": ".*_int", "drop": true}
]
})"_json;
alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
ASSERT_EQ(2, coll1->get_schema().size());
ASSERT_EQ(2, coll1->get_fields().size());
ASSERT_EQ(0, coll1->get_dynamic_fields().size());
// with bad on-disk data
schema_changes = R"({
"fields": [
{"name": ".*_txt", "type": "int32"}
]
})"_json;
alter_op = coll1->alter(schema_changes);
ASSERT_FALSE(alter_op.ok());
ASSERT_EQ("Schema change is incompatible with the type of documents already stored in this collection. "
"Existing data for field `some_txt` cannot be coerced into an int32.", alter_op.error());
ASSERT_EQ(2, coll1->get_schema().size());
ASSERT_EQ(2, coll1->get_fields().size());
ASSERT_EQ(0, coll1->get_dynamic_fields().size());
}
TEST_F(CollectionSchemaChangeTest, DropSpecificDynamicField) {
nlohmann::json req_json = R"({
"name": "coll1",
"fields": [
{"name": ".*_int", "type": "int32", "facet": true}
]
})"_json;
auto coll1_op = collectionManager.create_collection(req_json);
ASSERT_TRUE(coll1_op.ok());
auto coll1 = coll1_op.get();
nlohmann::json doc;
doc["quantity_int"] = 1000;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
ASSERT_EQ(2, coll1->get_fields().size());
ASSERT_EQ(1, coll1->get_schema().size());
ASSERT_EQ(1, coll1->get_dynamic_fields().size());
// drop specific field via alter which we will try dropping later
auto schema_changes = R"({
"fields": [
{"name": "quantity_int", "drop": true}
]
})"_json;
auto alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
ASSERT_EQ(1, coll1->get_fields().size());
ASSERT_EQ(0, coll1->get_schema().size());
ASSERT_EQ(1, coll1->get_dynamic_fields().size());
}
TEST_F(CollectionSchemaChangeTest, AddDynamicFieldMatchingMultipleFields) {
std::vector<field> fields = {field("title", field_types::STRING, false, false, true, "", 1, 1),
field("points", field_types::INT32, true),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points", 0, "").get();
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "The quick brown fox was too fast.";
doc["points"] = 100;
doc["quantity_int"] = 1000;
doc["year_int"] = 2020;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
ASSERT_EQ(2, coll1->get_schema().size());
ASSERT_EQ(0, coll1->get_dynamic_fields().size());
// add a dynamic field via alter that will target both _int fields
auto schema_changes = R"({
"fields": [
{"name": ".*_int", "type": "int32", "optional": true}
]
})"_json;
auto alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
ASSERT_EQ(4, coll1->get_schema().size());
ASSERT_EQ(5, coll1->get_fields().size());
ASSERT_EQ(1, coll1->get_dynamic_fields().size());
auto results = coll1->search("*",
{}, "quantity_int: 1000", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5).get();
ASSERT_EQ(1, results["found"].get<size_t>());
results = coll1->search("*",
{}, "year_int: 2020", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5).get();
ASSERT_EQ(1, results["found"].get<size_t>());
// drop + re-add dynamic field that targets 2 underlying fields
schema_changes = R"({
"fields": [
{"name": ".*_int", "type": "int32", "facet": true},
{"name": ".*_int", "drop": true}
]
})"_json;
alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
ASSERT_EQ(4, coll1->get_schema().size());
ASSERT_EQ(5, coll1->get_fields().size());
ASSERT_EQ(1, coll1->get_dynamic_fields().size());
results = coll1->search("*",
{}, "", {"quantity_int"}, {}, {0}, 3, 1, FREQUENCY, {true}, 5).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"][0]["count"].get<size_t>());
ASSERT_EQ("quantity_int", results["facet_counts"][0]["field_name"].get<std::string>());
results = coll1->search("*",
{}, "", {"year_int"}, {}, {0}, 3, 1, FREQUENCY, {true}, 5).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"][0]["count"].get<size_t>());
ASSERT_EQ("year_int", results["facet_counts"][0]["field_name"].get<std::string>());
schema_changes = R"({
"fields": [
{"name": ".*_int", "drop": true}
]
})"_json;
alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
ASSERT_EQ(2, coll1->get_schema().size());
ASSERT_EQ(2, coll1->get_fields().size());
ASSERT_EQ(0, coll1->get_dynamic_fields().size());
}
TEST_F(CollectionSchemaChangeTest, DropFieldNotExistingInDocuments) {
// optional title field
std::vector<field> fields = {field("title", field_types::STRING, false, true, true, "", 1, 1),
field("points", field_types::INT32, true),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points", 0, "").get();
nlohmann::json doc;
doc["id"] = "0";
doc["points"] = 100;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto schema_changes = R"({
"fields": [
{"name": "title", "drop": true}
]
})"_json;
auto alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
}
TEST_F(CollectionSchemaChangeTest, ChangeFieldToCoercableTypeIsAllowed) {
// optional title field
std::vector<field> fields = {field("title", field_types::STRING, false, true, true, "", 1, 1),
field("points", field_types::INT32, true),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points", 0, "").get();
nlohmann::json doc;
doc["id"] = "0";
doc["points"] = 100;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
// coerce field from int to string
auto schema_changes = R"({
"fields": [
{"name": "points", "drop": true},
{"name": "points", "type": "string"}
]
})"_json;
auto alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
}
TEST_F(CollectionSchemaChangeTest, ChangeFromPrimitiveToDynamicField) {
nlohmann::json req_json = R"({
"name": "coll1",
"fields": [
{"name": "tags", "type": "string"}
]
})"_json;
auto coll1_op = collectionManager.create_collection(req_json);
ASSERT_TRUE(coll1_op.ok());
auto coll1 = coll1_op.get();
nlohmann::json doc;
doc["id"] = "0";
doc["tags"] = "123";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
ASSERT_EQ(1, coll1->get_schema().size());
ASSERT_EQ(1, coll1->get_fields().size());
ASSERT_EQ(0, coll1->get_dynamic_fields().size());
// try to alter to string* type
auto schema_changes = R"({
"fields": [
{"name": "tags", "drop": true},
{"name": "tags", "type": "string*", "facet": true}
]
})"_json;
auto alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
auto results = coll1->search("123", {"tags"}, "", {"tags"}, {}, {0}, 3, 1, FREQUENCY, {true}, 5).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, coll1->get_schema().size());
ASSERT_EQ(2, coll1->get_fields().size());
ASSERT_EQ(1, coll1->get_dynamic_fields().size());
// go back to plain string type
schema_changes = R"({
"fields": [
{"name": "tags", "drop": true},
{"name": "tags", "type": "string", "facet": true}
]
})"_json;
alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
results = coll1->search("123", {"tags"}, "", {"tags"}, {}, {0}, 3, 1, FREQUENCY, {true}, 5).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, coll1->get_schema().size());
ASSERT_EQ(1, coll1->get_fields().size());
ASSERT_EQ(0, coll1->get_dynamic_fields().size());
}
TEST_F(CollectionSchemaChangeTest, ChangeFromPrimitiveToAutoField) {
nlohmann::json req_json = R"({
"name": "coll1",
"fields": [
{"name": "tags", "type": "string"}
]
})"_json;
auto coll1_op = collectionManager.create_collection(req_json);
ASSERT_TRUE(coll1_op.ok());
auto coll1 = coll1_op.get();
nlohmann::json doc;
doc["id"] = "0";
doc["tags"] = "123";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
ASSERT_EQ(1, coll1->get_schema().size());
ASSERT_EQ(1, coll1->get_fields().size());
ASSERT_EQ(0, coll1->get_dynamic_fields().size());
// try to alter to auto type
auto schema_changes = R"({
"fields": [
{"name": "tags", "drop": true},
{"name": "tags", "type": "auto", "facet": true}
]
})"_json;
auto alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
auto results = coll1->search("123", {"tags"}, "", {"tags"}, {}, {0}, 3, 1, FREQUENCY, {true}, 5).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, coll1->get_schema().size());
ASSERT_EQ(2, coll1->get_fields().size());
ASSERT_EQ(1, coll1->get_dynamic_fields().size());
// go back to plain string type
schema_changes = R"({
"fields": [
{"name": "tags", "drop": true},
{"name": "tags", "type": "string", "facet": true}
]
})"_json;
alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
results = coll1->search("123", {"tags"}, "", {"tags"}, {}, {0}, 3, 1, FREQUENCY, {true}, 5).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, coll1->get_schema().size());
ASSERT_EQ(1, coll1->get_fields().size());
ASSERT_EQ(0, coll1->get_dynamic_fields().size());
}
TEST_F(CollectionSchemaChangeTest, ChangeFromStringStarToAutoField) {
nlohmann::json req_json = R"({
"name": "coll1",
"fields": [
{"name": "tags", "type": "string*"}
]
})"_json;
auto coll1_op = collectionManager.create_collection(req_json);
ASSERT_TRUE(coll1_op.ok());
auto coll1 = coll1_op.get();
nlohmann::json doc;
doc["id"] = "0";
doc["tags"] = "123";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
ASSERT_EQ(1, coll1->get_schema().size());
ASSERT_EQ(2, coll1->get_fields().size());
ASSERT_EQ(1, coll1->get_dynamic_fields().size());
// try to alter to auto type
auto schema_changes = R"({
"fields": [
{"name": "tags", "drop": true},
{"name": "tags", "type": "auto", "facet": true}
]
})"_json;
auto alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
auto results = coll1->search("123", {"tags"}, "", {"tags"}, {}, {0}, 3, 1, FREQUENCY, {true}, 5).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, coll1->get_schema().size());
ASSERT_EQ(2, coll1->get_fields().size());
ASSERT_EQ(1, coll1->get_dynamic_fields().size());
// go back to string* type
schema_changes = R"({
"fields": [
{"name": "tags", "drop": true},
{"name": "tags", "type": "string*", "facet": true}
]
})"_json;
alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
results = coll1->search("123", {"tags"}, "", {"tags"}, {}, {0}, 3, 1, FREQUENCY, {true}, 5).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, coll1->get_schema().size());
ASSERT_EQ(2, coll1->get_fields().size());
ASSERT_EQ(1, coll1->get_dynamic_fields().size());
}
TEST_F(CollectionSchemaChangeTest, OrderOfDropShouldNotMatter) {
nlohmann::json req_json = R"({
"name": "coll1",
"fields": [
{"name": "loc", "type": "geopoint"}
]
})"_json;
auto coll1_op = collectionManager.create_collection(req_json);
ASSERT_TRUE(coll1_op.ok());
auto coll1 = coll1_op.get();
nlohmann::json doc;
doc["id"] = "0";
doc["loc"] = {1, 2};
ASSERT_TRUE(coll1->add(doc.dump()).ok());
// try to alter to a bad type (int32)
auto schema_changes = R"({
"fields": [
{"name": "loc", "type": "int32"},
{"name": "loc", "drop": true}
]
})"_json;
auto alter_op = coll1->alter(schema_changes);
ASSERT_FALSE(alter_op.ok());
schema_changes = R"({
"fields": [
{"name": "loc", "drop": true},
{"name": "loc", "type": "int32"}
]
})"_json;
alter_op = coll1->alter(schema_changes);
ASSERT_FALSE(alter_op.ok());
}
TEST_F(CollectionSchemaChangeTest, IndexFalseToTrue) {
nlohmann::json req_json = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string", "index": false, "facet": false, "optional": true}
]
})"_json;
auto coll1_op = collectionManager.create_collection(req_json);
ASSERT_TRUE(coll1_op.ok());
auto coll1 = coll1_op.get();
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "Typesense";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
// make field indexable
auto schema_changes = R"({
"fields": [
{"name": "title", "drop": true},
{"name": "title", "type": "string", "index": true, "facet": true, "optional": true}
]
})"_json;
auto alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
auto res_op = coll1->search("type", {"title"}, "", {"title"}, {}, {0}, 3, 1, FREQUENCY, {true}, 5);
ASSERT_TRUE(res_op.ok());
ASSERT_EQ(1, res_op.get()["found"].get<size_t>());
ASSERT_EQ(1, res_op.get()["facet_counts"].size());
}
TEST_F(CollectionSchemaChangeTest, DropGeoPointArrayField) {
// when a value is `null` initially, and is altered, subsequent updates should not fail
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "geoloc", "type": "geopoint[]"}
]
})"_json;
auto coll_create_op = collectionManager.create_collection(schema);
ASSERT_TRUE(coll_create_op.ok());
Collection* coll1 = coll_create_op.get();
nlohmann::json doc = R"({
"geoloc": [[10, 20]]
})"_json;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto schema_changes = R"({
"fields": [
{"name": "geoloc", "drop": true},
{"name": "_geoloc", "type": "geopoint[]", "optional": true}
]
})"_json;
auto alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
}
TEST_F(CollectionSchemaChangeTest, AddingFieldWithExistingNullValue) {
// when a value is `null` initially, and is altered, subsequent updates should not fail
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "Sample Title 1";
doc["num"] = nullptr;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto schema_changes = R"({
"fields": [
{"name": "num", "type": "int32", "optional": true}
]
})"_json;
auto alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
// now try updating the doc
doc["id"] = "0";
doc["title"] = "Sample Title 1";
doc["num"] = 100;
ASSERT_TRUE(coll1->add(doc.dump(), UPSERT).ok());
auto res = coll1->search("*", {}, "num:100", {}, {}, {2}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(1, res["hits"].size());
}
TEST_F(CollectionSchemaChangeTest, DropIntegerFieldAndAddStringValues) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": ".*", "type": "auto"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
// index a label field as integer
nlohmann::json doc;
doc["id"] = "0";
doc["label"] = "hello";
doc["title"] = "Foo";
auto add_op = coll1->add(doc.dump());
ASSERT_TRUE(add_op.ok());
// drop this field from schema
auto schema_changes = R"({
"fields": [
{"name": "label", "drop": true}
]
})"_json;
auto alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
// add new document with a string label
doc["id"] = "1";
doc["label"] = 1000;
doc["title"] = "Bar";
add_op = coll1->add(doc.dump());
ASSERT_TRUE(add_op.ok());
// now we have documents which have both string and integer for the same field :BOOM:
// schema change operation should not be allowed at this point
schema_changes = R"({
"fields": [
{"name": "year", "type": "int32", "optional": true}
]
})"_json;
alter_op = coll1->alter(schema_changes);
ASSERT_FALSE(alter_op.ok());
ASSERT_EQ("Schema change is incompatible with the type of documents already stored in this collection. "
"Existing data for field `label` cannot be coerced into an int64.", alter_op.error());
// but should allow the problematic field to be dropped
schema_changes = R"({
"fields": [
{"name": "label", "drop": true}
]
})"_json;
alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
// add document with another field
doc["id"] = "2";
doc["label"] = "xyz";
doc["year"] = 1947;
add_op = coll1->add(doc.dump());
ASSERT_TRUE(add_op.ok());
// try searching for string label
auto res_op = coll1->search("xyz", {"label"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5);
ASSERT_TRUE(res_op.ok());
ASSERT_EQ(1, res_op.get()["found"].get<size_t>());
}
TEST_F(CollectionSchemaChangeTest, NestedFieldExplicitSchemaDropping) {
// Plain object field
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "title", "type": "string"},
{"name": "person", "type": "object"},
{"name": "school.city", "type": "string"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["title"] = "Test";
doc["person"] = nlohmann::json::object();
doc["person"]["name"] = "Jack";
doc["school"] = nlohmann::json::object();
doc["school"]["city"] = "NYC";
auto add_op = coll1->add(doc.dump());
ASSERT_TRUE(add_op.ok());
auto fields = coll1->get_fields();
auto schema_map = coll1->get_schema();
ASSERT_EQ(4, fields.size());
ASSERT_EQ(4, schema_map.size());
ASSERT_EQ(2, coll1->get_nested_fields().size());
// drop object field
auto schema_changes = R"({
"fields": [
{"name": "person", "drop": true}
]
})"_json;
auto alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
fields = coll1->get_fields();
schema_map = coll1->get_schema();
ASSERT_EQ(2, fields.size());
ASSERT_EQ(2, schema_map.size());
ASSERT_EQ(1, coll1->get_nested_fields().size());
// drop primitive nested field
schema_changes = R"({
"fields": [
{"name": "school.city", "drop": true}
]
})"_json;
alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
fields = coll1->get_fields();
schema_map = coll1->get_schema();
ASSERT_EQ(1, fields.size());
ASSERT_EQ(1, schema_map.size());
ASSERT_EQ(0, coll1->get_nested_fields().size());
}
TEST_F(CollectionSchemaChangeTest, NestedFieldSchemaAdditions) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["title"] = "Test";
doc["person"] = nlohmann::json::object();
doc["person"]["name"] = "Jack";
doc["school"] = nlohmann::json::object();
doc["school"]["city"] = "NYC";
doc["school"]["state"] = "NY";
auto add_op = coll1->add(doc.dump());
ASSERT_TRUE(add_op.ok());
auto fields = coll1->get_fields();
auto schema_map = coll1->get_schema();
ASSERT_EQ(1, fields.size());
ASSERT_EQ(1, schema_map.size());
ASSERT_EQ(0, coll1->get_nested_fields().size());
// add plain object field
auto schema_changes = R"({
"fields": [
{"name": "person", "type": "object"}
]
})"_json;
auto alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
fields = coll1->get_fields();
schema_map = coll1->get_schema();
ASSERT_EQ(3, fields.size());
ASSERT_EQ(3, schema_map.size());
ASSERT_EQ(1, coll1->get_nested_fields().size());
// nested primitive field
schema_changes = R"({
"fields": [
{"name": "school.city", "type": "string"}
]
})"_json;
alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
fields = coll1->get_fields();
schema_map = coll1->get_schema();
ASSERT_EQ(4, fields.size());
ASSERT_EQ(4, schema_map.size());
ASSERT_EQ(2, coll1->get_nested_fields().size());
// try searching on new fields
auto res_op = coll1->search("jack", {"person.name"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5);
ASSERT_TRUE(res_op.ok());
ASSERT_EQ(1, res_op.get()["found"].get<size_t>());
res_op = coll1->search("nyc", {"school.city"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5);
ASSERT_TRUE(res_op.ok());
ASSERT_EQ(1, res_op.get()["found"].get<size_t>());
}
TEST_F(CollectionSchemaChangeTest, DropAndReAddNestedObject) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "title", "type": "string"},
{"name": "person", "type": "object"},
{"name": "school.city", "type": "string"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["title"] = "Test";
doc["person"] = nlohmann::json::object();
doc["person"]["name"] = "Jack";
doc["school"] = nlohmann::json::object();
doc["school"]["city"] = "NYC";
auto add_op = coll1->add(doc.dump());
ASSERT_TRUE(add_op.ok());
auto fields = coll1->get_fields();
auto schema_map = coll1->get_schema();
ASSERT_EQ(4, fields.size());
ASSERT_EQ(4, schema_map.size());
// drop + re-add object field
auto schema_changes = R"({
"fields": [
{"name": "person", "drop": true},
{"name": "person", "type": "object"}
]
})"_json;
auto alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
fields = coll1->get_fields();
schema_map = coll1->get_schema();
auto res_op = coll1->search("jack", {"person.name"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5);
ASSERT_TRUE(res_op.ok());
ASSERT_EQ(1, res_op.get()["found"].get<size_t>());
ASSERT_EQ(4, fields.size());
ASSERT_EQ(4, schema_map.size());
// drop + re-add school
schema_changes = R"({
"fields": [
{"name": "school.city", "drop": true},
{"name": "school.city", "type": "string"}
]
})"_json;
alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
fields = coll1->get_fields();
schema_map = coll1->get_schema();
ASSERT_EQ(4, fields.size());
ASSERT_EQ(4, schema_map.size());
}
TEST_F(CollectionSchemaChangeTest, UpdateAfterNestedNullValue) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "lines", "optional": false, "type": "object[]"},
{"name": "lines.name", "optional": true, "type": "string[]"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc = R"(
{"id": "1", "lines": [{"name": null}]}
)"_json;
auto add_op = coll1->add(doc.dump(), CREATE, "1", DIRTY_VALUES::DROP);
ASSERT_TRUE(add_op.ok());
// add new field
auto schema_changes = R"({
"fields": [
{"name": "title", "type": "string", "optional": true}
]
})"_json;
auto alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
}
TEST_F(CollectionSchemaChangeTest, AlterShouldBeAbleToHandleFieldValueCoercion) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "product", "optional": false, "type": "object"},
{"name": "product.price", "type": "int64"},
{"name": "title", "type": "string"},
{"name": "description", "type": "string"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc = R"(
{"id": "0", "product": {"price": 56.45}, "title": "Title 1", "description": "Description 1"}
)"_json;
auto add_op = coll1->add(doc.dump(), CREATE, "0", DIRTY_VALUES::COERCE_OR_REJECT);
ASSERT_TRUE(add_op.ok());
// drop a field
auto schema_changes = R"({
"fields": [
{"name": "description", "drop": true}
]
})"_json;
auto alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
}
TEST_F(CollectionSchemaChangeTest, AlterValidationShouldNotRejectBadValues) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "info", "type": "object"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc = R"(
{"info": {"year": 1999}}
)"_json;
auto add_op = coll1->add(doc.dump(), CREATE, "0", DIRTY_VALUES::COERCE_OR_DROP);
ASSERT_TRUE(add_op.ok());
doc = R"(
{"info": {"year": "2001"}, "description": "test"}
)"_json;
add_op = coll1->add(doc.dump(), CREATE, "1", DIRTY_VALUES::COERCE_OR_DROP);
ASSERT_TRUE(add_op.ok());
// add a new field
auto schema_changes = R"({
"fields": [
{"name": "description", "type": "string", "optional": true}
]
})"_json;
auto alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
auto res_op = coll1->search("test", {"description"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true});
ASSERT_TRUE(res_op.ok());
ASSERT_EQ(1, res_op.get()["found"].get<size_t>());
}
TEST_F(CollectionSchemaChangeTest, GeoFieldSchemaAddition) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["title"] = "Title 1";
doc["location"] = {22.847641, 89.5405279};
coll1->add(doc.dump());
doc["title"] = "Title 2";
doc["location"] = {22.8951791, 89.5125549};
coll1->add(doc.dump());
// add location field
auto schema_changes = R"({
"fields": [
{"name": "location", "type": "geopoint"}
]
})"_json;
auto alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
// try searching on new fields
auto res_op = coll1->search("*", {}, "location:(22.848641, 89.5406279, 50 km)", {}, {}, {0}, 3, 1, FREQUENCY, {true});
ASSERT_TRUE(res_op.ok());
ASSERT_EQ(2, res_op.get()["found"].get<size_t>());
}
TEST_F(CollectionSchemaChangeTest, NestedFieldDrop) {
nlohmann::json schema = R"({
"name": "docs",
"enable_nested_fields": true,
"fields": [
{"name": "shops", "type": "object[]", "index": true, "optional": true},
{"name": "shops.is_available", "type": "bool[]", "index": true, "optional": true}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll = op.get();
nlohmann::json doc;
doc["shops"][0]["is_available"] = false;
coll->add(doc.dump());
nlohmann::json schema_change = R"({
"fields": [
{"name": "shops.is_available", "drop": true}
]
})"_json;
auto schema_change_op = coll->alter(schema_change);
ASSERT_TRUE(schema_change_op.ok());
auto actual_schema = coll->get_schema();
ASSERT_EQ(1, actual_schema.size());
ASSERT_EQ(1, actual_schema.count("shops"));
// add the field back
schema_change = R"({
"fields": [
{"name": "shops.is_available", "type": "bool[]", "index": true, "optional": true}
]
})"_json;
schema_change_op = coll->alter(schema_change);
ASSERT_TRUE(schema_change_op.ok());
actual_schema = coll->get_schema();
ASSERT_EQ(2, actual_schema.size());
ASSERT_EQ(1, actual_schema.count("shops"));
ASSERT_EQ(1, actual_schema.count("shops.is_available"));
}
TEST_F(CollectionSchemaChangeTest, NestedFieldReIndex) {
nlohmann::json schema = R"({
"name": "docs",
"enable_nested_fields": true,
"fields": [
{"name": "shops", "type": "object[]"},
{"name": "shops.is_available", "type": "bool[]"}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll = op.get();
nlohmann::json doc;
doc["shops"][0]["is_available"] = false;
coll->add(doc.dump());
nlohmann::json schema_change = R"({
"fields": [
{"name": "shops.is_available", "drop": true},
{"name": "shops.is_available", "type": "bool[]", "facet": true}
]
})"_json;
auto schema_change_op = coll->alter(schema_change);
ASSERT_TRUE(schema_change_op.ok());
auto actual_schema = coll->get_schema();
ASSERT_EQ(2, actual_schema.size());
ASSERT_TRUE(actual_schema["shops.is_available"].facet);
}
TEST_F(CollectionSchemaChangeTest, UpdateSchemaWithNewEmbeddingField) {
nlohmann::json schema = R"({
"name": "objects",
"fields": [
{"name": "names", "type": "string[]"}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll = op.get();
nlohmann::json update_schema = R"({
"fields": [
{"name": "embedding", "type":"float[]", "embed":{"from": ["names"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
auto res = coll->alter(update_schema);
ASSERT_TRUE(res.ok());
ASSERT_EQ(1, coll->get_embedding_fields().size());
auto search_schema = coll->get_schema();
auto embedding_field_it = search_schema.find("embedding");
ASSERT_TRUE(embedding_field_it != coll->get_schema().end());
ASSERT_EQ("embedding", embedding_field_it.value().name);
ASSERT_EQ("float[]", embedding_field_it.value().type);
ASSERT_EQ(384, embedding_field_it.value().num_dim);
nlohmann::json doc;
doc["names"] = {"hello", "world"};
auto add_op = coll->add(doc.dump());
ASSERT_TRUE(add_op.ok());
auto added_doc = add_op.get();
ASSERT_EQ(384, added_doc["embedding"].get<std::vector<float>>().size());
}
TEST_F(CollectionSchemaChangeTest, DropFieldUsedForEmbedding) {
nlohmann::json schema = R"({
"name": "objects",
"fields": [
{"name": "title", "type": "string"},
{"name": "names", "type": "string[]"},
{"name": "category", "type":"string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["names","category"],
"model_config": {"model_name": "ts/e5-small"}}},
{"name": "embedding2", "type":"float[]", "embed":{"from": ["names"],
"model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll = op.get();
LOG(INFO) << "Created collection";
auto embedding_fields = coll->get_embedding_fields();
ASSERT_EQ(2, embedding_fields.size());
ASSERT_EQ(2, embedding_fields["embedding"].embed[fields::from].get<std::vector<std::string>>().size());
ASSERT_EQ(1, embedding_fields["embedding2"].embed[fields::from].get<std::vector<std::string>>().size());
auto coll_schema = coll->get_schema();
ASSERT_EQ(5, coll_schema.size());
auto the_fields = coll->get_fields();
ASSERT_EQ(5, the_fields.size());
auto schema_changes = R"({
"fields": [
{"name": "names", "drop": true}
]
})"_json;
auto alter_op = coll->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
embedding_fields = coll->get_embedding_fields();
ASSERT_EQ(1, embedding_fields.size());
ASSERT_EQ(1, embedding_fields["embedding"].embed[fields::from].get<std::vector<std::string>>().size());
ASSERT_EQ("category", embedding_fields["embedding"].embed[fields::from].get<std::vector<std::string>>()[0]);
schema_changes = R"({
"fields": [
{"name": "category", "drop": true}
]
})"_json;
alter_op = coll->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
embedding_fields = coll->get_embedding_fields();
ASSERT_EQ(0, embedding_fields.size());
ASSERT_EQ(0, coll->_get_index()->_get_vector_index().size());
// only title remains
coll_schema = coll->get_schema();
ASSERT_EQ(1, coll_schema.size());
ASSERT_EQ("title", coll_schema["title"].name);
the_fields = coll->get_fields();
ASSERT_EQ(1, the_fields.size());
ASSERT_EQ("title", the_fields[0].name);
}
TEST_F(CollectionSchemaChangeTest, EmbeddingFieldsMapTest) {
nlohmann::json schema = R"({
"name": "objects",
"fields": [
{"name": "name", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll = op.get();
auto embedding_fields_map = coll->get_embedding_fields();
ASSERT_EQ(1, embedding_fields_map.size());
auto embedding_field_it = embedding_fields_map.find("embedding");
ASSERT_TRUE(embedding_field_it != embedding_fields_map.end());
ASSERT_EQ("embedding", embedding_field_it.value().name);
ASSERT_EQ(1, embedding_field_it.value().embed[fields::from].get<std::vector<std::string>>().size());
ASSERT_EQ("name", embedding_field_it.value().embed[fields::from].get<std::vector<std::string>>()[0]);
// drop the embedding field
nlohmann::json schema_without_embedding = R"({
"fields": [
{"name": "embedding", "drop": true}
]
})"_json;
auto update_op = coll->alter(schema_without_embedding);
ASSERT_TRUE(update_op.ok());
embedding_fields_map = coll->get_embedding_fields();
ASSERT_EQ(0, embedding_fields_map.size());
}
TEST_F(CollectionSchemaChangeTest, DropAndReindexEmbeddingField) {
nlohmann::json schema = R"({
"name": "objects",
"fields": [
{"name": "name", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto create_op = collectionManager.create_collection(schema);
ASSERT_TRUE(create_op.ok());
auto coll = create_op.get();
// drop the embedding field and reindex
nlohmann::json alter_schema = R"({
"fields": [
{"name": "embedding", "drop": true},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
auto update_op = coll->alter(alter_schema);
ASSERT_TRUE(update_op.ok());
auto embedding_fields_map = coll->get_embedding_fields();
ASSERT_EQ(1, embedding_fields_map.size());
// try adding a document
nlohmann::json doc;
doc["name"] = "hello";
auto add_op = coll->add(doc.dump());
ASSERT_TRUE(add_op.ok());
auto added_doc = add_op.get();
ASSERT_EQ(384, added_doc["embedding"].get<std::vector<float>>().size());
// alter with bad schema
alter_schema = R"({
"fields": [
{"name": "embedding", "drop": true},
{"name": "embedding", "type":"float[]", "embed":{"from": ["namez"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
update_op = coll->alter(alter_schema);
ASSERT_FALSE(update_op.ok());
ASSERT_EQ("Property `embed.from` can only refer to string, string array or image (for supported models) fields.", update_op.error());
// alter with bad model name
alter_schema = R"({
"fields": [
{"name": "embedding", "drop": true},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"], "model_config": {"model_name": "ts/x5-small"}}}
]
})"_json;
update_op = coll->alter(alter_schema);
ASSERT_FALSE(update_op.ok());
ASSERT_EQ("Model not found", update_op.error());
// should still be able to add doc after aborted alter
add_op = coll->add(doc.dump());
ASSERT_TRUE(add_op.ok());
added_doc = add_op.get();
ASSERT_EQ(384, added_doc["embedding"].get<std::vector<float>>().size());
}
TEST_F(CollectionSchemaChangeTest, EmbeddingFieldAlterDropTest) {
nlohmann::json schema = R"({
"name": "objects",
"fields": [
{"name": "name", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll = op.get();
auto& vec_index = coll->_get_index()->_get_vector_index();
ASSERT_EQ(1, vec_index.size());
ASSERT_EQ(1, vec_index.count("embedding"));
nlohmann::json schema_change = R"({
"fields": [
{"name": "embedding", "drop": true}
]
})"_json;
auto schema_change_op = coll->alter(schema_change);
ASSERT_TRUE(schema_change_op.ok());
ASSERT_EQ(0, vec_index.size());
ASSERT_EQ(0, vec_index.count("embedding"));
}
TEST_F(CollectionSchemaChangeTest, EmbeddingFieldAlterUpdateOldDocs) {
nlohmann::json schema = R"({
"name": "objects",
"fields": [
{"name": "title", "type": "string"},
{"name": "nested", "type": "object"}
],
"enable_nested_fields": true
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll = op.get();
nlohmann::json doc;
doc["title"] = "hello";
doc["nested"] = nlohmann::json::object();
doc["nested"]["hello"] = "world";
auto add_op = coll->add(doc.dump());
ASSERT_TRUE(add_op.ok());
nlohmann::json schema_change = R"({
"fields": [
{"name": "embedding", "type":"float[]", "embed":{"from": ["title"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
auto schema_change_op = coll->alter(schema_change);
ASSERT_TRUE(schema_change_op.ok());
auto search_res = coll->search("*", {}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5);
ASSERT_EQ(1, search_res.get()["found"].get<size_t>());
ASSERT_EQ(384, search_res.get()["hits"][0]["document"]["embedding"].get<std::vector<float>>().size());
ASSERT_EQ(1, search_res.get()["hits"][0]["document"]["nested"].size());
ASSERT_EQ(0, search_res.get()["hits"][0]["document"].count(".flat"));
ASSERT_EQ(0, search_res.get()["hits"][0]["document"].count("nested.hello"));
}
| 66,447
|
C++
|
.cpp
| 1,572
| 34.476463
| 142
| 0.558508
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,729
|
sorted_array_test.cpp
|
typesense_typesense/test/sorted_array_test.cpp
|
#include <gtest/gtest.h>
#include "sorted_array.h"
#include <vector>
#include <fstream>
TEST(SortedArrayTest, Append) {
sorted_array arr;
const int SIZE = 10 * 1000;
EXPECT_EQ(arr.getLength(), 0);
EXPECT_EQ(arr.indexOf(100), 0); // when not found must be equal to length (0 in this case)
for(uint32_t i=0; i < SIZE; i++) {
size_t appended_index = arr.append(i);
ASSERT_EQ(i, appended_index);
}
EXPECT_EQ(arr.getLength(), SIZE);
for(uint32_t i=0; i < SIZE; i++) {
EXPECT_EQ(arr.at(i), i);
EXPECT_EQ(arr.indexOf(i), i);
EXPECT_EQ(arr.contains(i), true);
}
EXPECT_EQ(arr.contains(SIZE), false);
EXPECT_EQ(arr.indexOf(SIZE), SIZE);
EXPECT_EQ(arr.indexOf(SIZE+1), SIZE);
sorted_array arr_small;
size_t appended_index = arr_small.append(100);
EXPECT_EQ(0, appended_index);
EXPECT_EQ(arr_small.getLength(), 1);
EXPECT_EQ(arr_small.at(0), 100);
}
TEST(SortedArrayTest, AppendOutOfOrder) {
sorted_array arr;
for(size_t i=5; i<=10; i++) {
size_t appended_index = arr.append(i);
ASSERT_EQ(i-5, appended_index);
}
EXPECT_EQ(6, arr.getLength());
int appended_index = -1;
appended_index = arr.append(1);
ASSERT_EQ(0, appended_index);
appended_index = arr.append(3);
ASSERT_EQ(1, appended_index);
appended_index = arr.append(2);
ASSERT_EQ(1, appended_index);
appended_index = arr.append(4);
ASSERT_EQ(3, appended_index);
appended_index = arr.append(11);
ASSERT_EQ(10, appended_index);
appended_index = arr.append(14);
ASSERT_EQ(11, appended_index);
appended_index = arr.append(12);
ASSERT_EQ(11, appended_index);
EXPECT_EQ(13, arr.getLength());
}
TEST(SortedArrayTest, InsertAtIndex) {
std::vector<uint32_t> eles;
sorted_array arr;
for(size_t i=5; i<=9; i++) {
arr.append(i);
}
arr.append(11);
eles = {5, 6, 7, 8, 9, 11};
for(size_t i=0; i < eles.size(); i++) {
ASSERT_EQ(eles[i], arr.at(i));
}
arr.insert(0, 1);
eles = { 1, 5, 6, 7, 8, 9, 11 };
for(size_t i=0; i < eles.size(); i++) {
ASSERT_EQ(eles[i], arr.at(i));
}
ASSERT_EQ(1, arr.at(0));
ASSERT_EQ(5, arr.at(1));
arr.insert(1, 2);
eles = {1, 2, 5, 6, 7, 8, 9, 11};
ASSERT_EQ(1, arr.at(0));
ASSERT_EQ(2, arr.at(1));
ASSERT_EQ(8, arr.getLength());
for(size_t i=0; i < eles.size(); i++) {
ASSERT_EQ(eles[i], arr.at(i));
}
arr.insert(7, 10);
eles = { 1, 2, 5, 6, 7, 8, 9, 10, 11};
ASSERT_EQ(10, arr.at(7));
ASSERT_EQ(11, arr.at(8));
ASSERT_EQ(9, arr.getLength());
for(size_t i=0; i < eles.size(); i++) {
ASSERT_EQ(eles[i], arr.at(i));
}
ASSERT_FALSE(arr.insert(9, 12)); // index out of range
}
TEST(SortedArrayTest, Load) {
sorted_array arr;
// To ensure that previous contents are erased
arr.append(100);
arr.append(200);
const size_t SIZE = 10*1000;
uint32_t *array = new uint32_t[SIZE];
for(size_t i=0; i<SIZE; i++) {
array[i] = (uint32_t) i;
}
arr.load(array, SIZE);
for(size_t i=0; i<SIZE; i++) {
ASSERT_EQ(array[i], arr.at(i));
}
delete [] array;
}
TEST(SortedArrayTest, Uncompress) {
sorted_array sorted_arr;
const size_t SIZE = 10*1000;
for(size_t i=0; i<SIZE; i++) {
sorted_arr.append(i);
}
uint32_t *raw_sorted_arr = sorted_arr.uncompress();
for(size_t i=0; i<sorted_arr.getLength(); i++) {
ASSERT_EQ(raw_sorted_arr[i], sorted_arr.at(i));
}
delete[] raw_sorted_arr;
}
TEST(SortedArrayTest, RemoveValue) {
sorted_array arr;
// remove value on an empty arr
arr.append(100);
arr.remove_value(100);
arr.remove_value(110);
const size_t SIZE = 10*1000;
for(size_t i=0; i<SIZE; i++) {
arr.append(i);
}
uint32_t values[5] = {0, 100, 1000, 2000, SIZE-1};
for(size_t i=0; i<5; i++) {
arr.remove_value(values[i]);
}
ASSERT_EQ(arr.getLength(), SIZE-5);
for(size_t i=0; i<SIZE-5; i++) {
uint32_t value = arr.at(i);
ASSERT_FALSE(value == 0);
ASSERT_FALSE(value == 100);
ASSERT_FALSE(value == 1000);
ASSERT_FALSE(value == 2000);
ASSERT_FALSE(value == SIZE-1);
}
}
TEST(SortedArrayTest, RemoveValues) {
sorted_array arr;
const size_t SIZE = 10*1000;
for(size_t i=0; i<SIZE; i++) {
arr.append(i);
}
uint32_t values[5] = {0, 100, 1000, 2000, SIZE-1};
arr.remove_values(values, 5);
ASSERT_EQ(arr.getLength(), SIZE-5);
for(size_t i=0; i<SIZE-5; i++) {
uint32_t value = arr.at(i);
ASSERT_FALSE(value == 0);
ASSERT_FALSE(value == 100);
ASSERT_FALSE(value == 1000);
ASSERT_FALSE(value == 2000);
ASSERT_FALSE(value == SIZE-1);
}
}
TEST(SortedArrayTest, BulkIndexOf) {
std::ifstream infile(std::string(ROOT_DIR)+"test/ids.txt");
sorted_array ids;
std::string line;
while (std::getline(infile, line)) {
ids.append(std::stoi(line));
}
infile.close();
std::vector<uint32_t> search_ids = { 17879, 37230, 412020, 445251, 447484, 501102, 640551, 656498, 656531,
770014, 877700, 1034172, 1115941, 1129099, 1159053, 1221486, 1295375 };
uint32_t *results = new uint32_t[search_ids.size()];
ids.indexOf(&search_ids[0], search_ids.size(), results);
for(size_t i = 0; i < search_ids.size(); i++) {
auto search_id = search_ids.at(i);
ASSERT_EQ(ids.indexOf(search_id), results[i]);
}
// when some IDs are not to be found
search_ids.clear();
search_ids = { 7879, 37230, 422020, 445251, 457484, 501102, 630551};
delete [] results;
results = new uint32_t[search_ids.size()];
ids.indexOf(&search_ids[0], search_ids.size(), results);
for(size_t i = 0; i < search_ids.size(); i++) {
auto search_id = search_ids.at(i);
ASSERT_EQ(ids.indexOf(search_id), results[i]);
}
// search with IDs that don't exist
search_ids = {100};
delete [] results;
results = new uint32_t[search_ids.size()];
ids.indexOf(&search_ids[0], search_ids.size(), results);
ASSERT_EQ(562, results[0]);
search_ids = {100, 105};
delete [] results;
results = new uint32_t[search_ids.size()];
ids.indexOf(&search_ids[0], search_ids.size(), results);
ASSERT_EQ(562, results[0]);
ASSERT_EQ(562, results[1]);
delete [] results;
}
TEST(SortedArrayTest, BulkIndexOf2) {
std::vector<uint32_t> ids = {3, 44, 51, 54, 57, 60, 121, 136, 232, 238, 278, 447, 452, 454, 455, 456, 457, 459, 463,
465, 471, 472, 473, 474, 475, 478, 479, 480, 486, 490, 492, 496, 503, 508, 510, 512,
515, 526, 527, 533, 534, 537, 544, 547, 551, 553, 565, 573, 574, 577, 579, 617, 621,
626, 628, 635, 653, 667, 672, 675, 689, 696, 705, 711, 714, 716, 725, 731, 735, 738,
739, 747, 751, 758, 762, 773, 778, 786, 787, 801, 810, 817, 821, 826, 829, 835, 836,
844, 852, 853, 854, 856, 860, 861, 895, 906, 952, 953, 955, 961, 966, 968, 985, 987,
988, 994, 996, 999, 1005, 1007, 1027, 1030, 1034, 1037, 1040, 1041, 1043, 1057, 1060,
1062, 1065, 1073, 1095, 1119, 1127, 1136, 1137, 1144, 1148, 1150, 1158, 1161, 1167,
1168, 1170, 1182, 1191, 1223, 1229, 1241, 1247, 1279, 1282, 1287, 1290, 1293, 1302,
1308, 1319, 1323, 1328, 1329, 1344, 1345, 1349, 1351, 1353, 1357, 1364, 1368, 1374,
1386, 1389, 1405, 1411, 1421, 1423, 1424, 1439, 1442, 1449, 1452, 1453, 1455, 1458,
1496, 1500, 1501, 1508, 1512, 1526, 1533, 1541, 1546, 1551, 1568, 1579, 1582, 1588,
1589, 1604, 1656, 1658, 1662, 1667, 1682, 1699, 1704, 1714, 1719, 1723, 1728, 1736,
1737, 1744, 1749, 1764, 1768, 1772, 1778, 1820, 1841, 1860, 1880, 1882, 1896, 1907,
1921, 1949, 1959, 1988, 1990, 1995, 2006, 2027, 2090, 2097, 2110, 2130, 2135, 2138,
2144, 2154, 2159, 2165, 2177, 2186, 2204, 2229, 2234, 2255, 2272, 2301, 2319, 2360,
2372, 2383, 2400, 2404, 2441, 2444, 2447, 2460, 2464, 2477, 2480, 2490, 2497, 2513,
2519, 2539, 2547, 2553, 2562, 2570, 2585, 2587, 2590, 2607, 2625, 2633, 2641, 2649,
2650, 2679, 2680, 2698, 2699, 2752, 2782, 2788, 2818, 2829, 2834, 2885, 2892, 2926,
2948, 2954, 2958, 3071, 3088, 3094, 3099, 3124, 3148, 3149, 3151, 3152, 3197, 3212,
3250, 3256, 3269};
std::vector<uint32_t> filter_ids = {9, 19, 21, 22, 23, 25, 26, 27, 28, 29, 30, 32, 33, 34, 35, 36, 38, 39, 41, 42,
46, 47, 48, 49, 52, 57, 58, 60, 61, 63, 67, 68, 69, 71, 72, 74, 75, 76, 77, 78,
79, 80, 85, 86, 87, 89, 91, 93, 94, 96, 100, 102, 103, 104, 106, 109, 111, 112,
113, 114, 115, 117, 118, 119, 123, 124, 127, 128, 129, 132, 133, 134, 135, 139,
141, 142, 143, 144, 146, 147, 148, 149, 151, 152, 154, 155, 157, 158, 159, 161,
162, 163, 164, 169, 170, 172, 174, 178, 179, 180, 181, 182, 183, 184, 185, 186,
187, 188, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 206,
207, 211, 212, 213, 215, 217, 219, 222, 223, 225, 226, 229, 230, 232, 233, 234,
237, 239, 240, 241, 243, 244, 245, 246, 247, 248, 256, 257, 261, 262, 263, 264,
265, 266, 267, 269, 270, 271, 272, 273, 274, 275, 279, 280, 281, 282, 284, 286,
288, 289, 291, 292, 296, 297, 298, 299, 303, 304, 305, 307, 308, 309, 310, 311,
312, 313, 314, 317, 318, 320, 321, 325, 326, 333, 337, 340, 341, 344, 345, 346,
347, 350, 352, 354, 357, 359, 360, 361, 362, 363, 368, 375, 381, 383, 384, 385,
386, 390, 391, 394, 395, 398, 399, 401, 404, 405, 407, 408, 409, 410, 411, 413,
414, 417, 418, 419, 421, 424, 425, 427, 433, 434, 435, 437, 441, 445, 446, 1140,
1495, 1590, 1646, 1707, 1872, 2201, 2844, 2866};
sorted_array arr;
for(auto id: ids) {
arr.append(id);
}
uint32_t* indices = new uint32_t[filter_ids.size()];
arr.indexOf(&filter_ids[0], filter_ids.size(), indices);
ASSERT_EQ(57, filter_ids[25]);
ASSERT_EQ(4, indices[25]);
ASSERT_EQ(60, filter_ids[27]);
ASSERT_EQ(5, indices[27]);
ASSERT_EQ(232, filter_ids[135]);
ASSERT_EQ(8, indices[135]);
delete [] indices;
indices = nullptr;
ids = {4,5,6,7,8};
filter_ids = {1,2,3,4,6,7,8,9,10};
sorted_array arr2;
for(auto id: ids) {
arr2.append(id);
}
indices = new uint32_t[filter_ids.size()];
arr2.indexOf(&filter_ids[0], filter_ids.size(), indices);
ASSERT_EQ(4, filter_ids[3]);
ASSERT_EQ(0, indices[3]);
ASSERT_EQ(6, filter_ids[4]);
ASSERT_EQ(2, indices[4]);
ASSERT_EQ(7, filter_ids[5]);
ASSERT_EQ(3, indices[5]);
ASSERT_EQ(8, filter_ids[6]);
ASSERT_EQ(4, indices[6]);
delete [] indices;
}
TEST(SortedArrayTest, NumFoundOfSortedArrayGreaterThanValues) {
std::vector<uint32_t> ids = {3, 44, 51, 54, 57, 60, 121, 136, 232, 238, 278, 447, 452, 454, 455, 456, 457, 459, 463,
465, 471, 472, 473, 474, 475, 478, 479, 480, 486, 490, 492, 496, 503, 508, 510, 512,
515, 526, 527, 533, 534, 537, 544, 547, 551, 553, 565, 573, 574, 577, 579, 617, 621,
626, 628, 635, 653, 667, 672, 675, 689, 696, 705, 711, 714, 716, 725, 731, 735, 738,
739, 747, 751, 758, 762, 773, 778, 786, 787, 801, 810, 817, 821, 826, 829, 835, 836,
844, 852, 853, 854, 856, 860, 861, 895, 906, 952, 953, 955, 961, 966, 968, 985, 987,
988, 994, 996, 999, 1005, 1007, 1027, 1030, 1034, 1037, 1040, 1041, 1043, 1057, 1060,
1062, 1065, 1073, 1095, 1119, 1127, 1136, 1137, 1144, 1148, 1150, 1158, 1161, 1167,
1168, 1170, 1182, 1191, 1223, 1229, 1241, 1247, 1279, 1282, 1287, 1290, 1293, 1302,
1308, 1319, 1323, 1328, 1329, 1344, 1345, 1349, 1351, 1353, 1357, 1364, 1368, 1374,
1386, 1389, 1405, 1411, 1421, 1423, 1424, 1439, 1442, 1449, 1452, 1453, 1455, 1458,
1496, 1500, 1501, 1508, 1512, 1526, 1533, 1541, 1546, 1551, 1568, 1579, 1582, 1588,
1589, 1604, 1656, 1658, 1662, 1667, 1682, 1699, 1704, 1714, 1719, 1723, 1728, 1736,
1737, 1744, 1749, 1764, 1768, 1772, 1778, 1820, 1841, 1860, 1880, 1882, 1896, 1907,
1921, 1949, 1959, 1988, 1990, 1995, 2006, 2027, 2090, 2097, 2110, 2130, 2135, 2138,
2144, 2154, 2159, 2165, 2177, 2186, 2204, 2229, 2234, 2255, 2272, 2301, 2319, 2360,
2372, 2383, 2400, 2404, 2441, 2444, 2447, 2460, 2464, 2477, 2480, 2490, 2497, 2513,
2519, 2539, 2547, 2553, 2562, 2570, 2585, 2587, 2590, 2607, 2625, 2633, 2641, 2649,
2650, 2679, 2680, 2698, 2699, 2752, 2782, 2788, 2818, 2829, 2834, 2885, 2892, 2926,
2948, 2954, 2958, 3071, 3088, 3094, 3099, 3124, 3148, 3149, 3151, 3152, 3197, 3212,
3250, 3256, 3269};
std::vector<uint32_t> filter_ids = {9, 19, 21, 22, 23, 25, 26, 27, 28, 29, 30, 32, 33, 34, 35, 36, 38, 39, 41, 42,
46, 47, 48, 49, 52, 57, 58, 60, 61, 63, 67, 68, 69, 71, 72, 74, 75, 76, 77, 78,
79, 80, 85, 86, 87, 89, 91, 93, 94, 96, 100, 102, 103, 104, 106, 109, 111, 112,
113, 114, 115, 117, 118, 119, 123, 124, 127, 128, 129, 132, 133, 134, 135, 139,
141, 142, 143, 144, 146, 147, 148, 149, 151, 152, 154, 155, 157, 158, 159, 161,
162, 163, 164, 169, 170, 172, 174, 178, 179, 180, 181, 182, 183, 184, 185, 186,
187, 188, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 206,
207, 211, 212, 213, 215, 217, 219, 222, 223, 225, 226, 229, 230, 232, 233, 234,
237, 239, 240, 241, 243, 244, 245, 246, 247, 248, 256, 257, 261, 262, 263, 264,
265, 266, 267, 269, 270, 271, 272, 273, 274, 275, 279, 280, 281, 282, 284, 286,
288, 289, 291, 292, 296, 297, 298, 299, 303, 304, 305, 307, 308, 309, 310, 311,
312, 313, 314, 317, 318, 320, 321, 325, 326, 333, 337, 340, 341, 344, 345, 346,
347, 350, 352, 354, 357, 359, 360, 361, 362, 363, 368, 375, 381, 383, 384, 385,
386, 390, 391, 394, 395, 398, 399, 401, 404, 405, 407, 408, 409, 410, 411, 413,
414, 417, 418, 419, 421, 424, 425, 427, 433, 434, 435, 437, 441, 445, 446, 1140,
1495, 1590, 1646, 1707, 1872, 2201, 2844, 2866};
sorted_array arr;
for(auto id: ids) {
arr.append(id);
}
auto num_found = arr.numFoundOf(&filter_ids[0], filter_ids.size());
ASSERT_EQ(3, num_found);
filter_ids = {4,5,6,7,8};
ids = {1,2,3,4,6,7,8,9,10};
sorted_array arr2;
for(auto id: ids) {
arr2.append(id);
}
num_found = arr2.numFoundOf(&filter_ids[0], filter_ids.size());
ASSERT_EQ(4, num_found);
}
TEST(SortedArrayTest, NumFoundOfSortedArrayLessThanValues) {
std::vector<uint32_t> filter_ids = {3, 44, 51, 54, 57, 60, 121, 136, 232, 238, 278, 447, 452, 454, 455, 456, 457, 459, 463,
465, 471, 472, 473, 474, 475, 478, 479, 480, 486, 490, 492, 496, 503, 508, 510, 512,
515, 526, 527, 533, 534, 537, 544, 547, 551, 553, 565, 573, 574, 577, 579, 617, 621,
626, 628, 635, 653, 667, 672, 675, 689, 696, 705, 711, 714, 716, 725, 731, 735, 738,
739, 747, 751, 758, 762, 773, 778, 786, 787, 801, 810, 817, 821, 826, 829, 835, 836,
844, 852, 853, 854, 856, 860, 861, 895, 906, 952, 953, 955, 961, 966, 968, 985, 987,
988, 994, 996, 999, 1005, 1007, 1027, 1030, 1034, 1037, 1040, 1041, 1043, 1057, 1060,
1062, 1065, 1073, 1095, 1119, 1127, 1136, 1137, 1144, 1148, 1150, 1158, 1161, 1167,
1168, 1170, 1182, 1191, 1223, 1229, 1241, 1247, 1279, 1282, 1287, 1290, 1293, 1302,
1308, 1319, 1323, 1328, 1329, 1344, 1345, 1349, 1351, 1353, 1357, 1364, 1368, 1374,
1386, 1389, 1405, 1411, 1421, 1423, 1424, 1439, 1442, 1449, 1452, 1453, 1455, 1458,
1496, 1500, 1501, 1508, 1512, 1526, 1533, 1541, 1546, 1551, 1568, 1579, 1582, 1588,
1589, 1604, 1656, 1658, 1662, 1667, 1682, 1699, 1704, 1714, 1719, 1723, 1728, 1736,
1737, 1744, 1749, 1764, 1768, 1772, 1778, 1820, 1841, 1860, 1880, 1882, 1896, 1907,
1921, 1949, 1959, 1988, 1990, 1995, 2006, 2027, 2090, 2097, 2110, 2130, 2135, 2138,
2144, 2154, 2159, 2165, 2177, 2186, 2204, 2229, 2234, 2255, 2272, 2301, 2319, 2360,
2372, 2383, 2400, 2404, 2441, 2444, 2447, 2460, 2464, 2477, 2480, 2490, 2497, 2513,
2519, 2539, 2547, 2553, 2562, 2570, 2585, 2587, 2590, 2607, 2625, 2633, 2641, 2649,
2650, 2679, 2680, 2698, 2699, 2752, 2782, 2788, 2818, 2829, 2834, 2885, 2892, 2926,
2948, 2954, 2958, 3071, 3088, 3094, 3099, 3124, 3148, 3149, 3151, 3152, 3197, 3212,
3250, 3256, 3269};
std::vector<uint32_t> ids = {9, 19, 21, 22, 23, 25, 26, 27, 28, 29, 30, 32, 33, 34, 35, 36, 38, 39, 41, 42,
46, 47, 48, 49, 52, 57, 58, 60, 61, 63, 67, 68, 69, 71, 72, 74, 75, 76, 77, 78,
79, 80, 85, 86, 87, 89, 91, 93, 94, 96, 100, 102, 103, 104, 106, 109, 111, 112,
113, 114, 115, 117, 118, 119, 123, 124, 127, 128, 129, 132, 133, 134, 135, 139,
141, 142, 143, 144, 146, 147, 148, 149, 151, 152, 154, 155, 157, 158, 159, 161,
162, 163, 164, 169, 170, 172, 174, 178, 179, 180, 181, 182, 183, 184, 185, 186,
187, 188, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 206,
207, 211, 212, 213, 215, 217, 219, 222, 223, 225, 226, 229, 230, 232, 233, 234,
237, 239, 240, 241, 243, 244, 245, 246, 247, 248, 256, 257, 261, 262, 263, 264,
265, 266, 267, 269, 270, 271, 272, 273, 274, 275, 279, 280, 281, 282, 284, 286,
288, 289, 291, 292, 296, 297, 298, 299, 303, 304, 305, 307, 308, 309, 310, 311,
312, 313, 314, 317, 318, 320, 321, 325, 326, 333, 337, 340, 341, 344, 345, 346,
347, 350, 352, 354, 357, 359, 360, 361, 362, 363, 368, 375, 381, 383, 384, 385,
386, 390, 391, 394, 395, 398, 399, 401, 404, 405, 407, 408, 409, 410, 411, 413,
414, 417, 418, 419, 421, 424, 425, 427, 433, 434, 435, 437, 441, 445, 446, 1140,
1495, 1590, 1646, 1707, 1872, 2201, 2844, 2866};
sorted_array arr;
for(auto id: ids) {
arr.append(id);
}
auto num_found = arr.numFoundOf(&filter_ids[0], filter_ids.size());
//ASSERT_EQ(3, num_found);
ids = {4,5,6,7,8};
filter_ids = {1,2,3,4,6,7,8,9,10};
sorted_array arr2;
for(auto id: ids) {
arr2.append(id);
}
num_found = arr2.numFoundOf(&filter_ids[0], filter_ids.size());
ASSERT_EQ(4, num_found);
}
| 21,306
|
C++
|
.cpp
| 374
| 42.772727
| 127
| 0.504224
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,730
|
collection_operations_test.cpp
|
typesense_typesense/test/collection_operations_test.cpp
|
#include <gtest/gtest.h>
#include <string>
#include <vector>
#include <fstream>
#include <algorithm>
#include <collection_manager.h>
#include "collection.h"
class CollectionOperationsTest : public ::testing::Test {
protected:
Store *store;
CollectionManager & collectionManager = CollectionManager::get_instance();
std::atomic<bool> quit = false;
std::vector<std::string> query_fields;
std::vector<sort_by> sort_fields;
void setupCollection() {
std::string state_dir_path = "/tmp/typesense_test/collection_operations";
LOG(INFO) << "Truncating and creating: " << state_dir_path;
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
store = new Store(state_dir_path);
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
}
virtual void SetUp() {
setupCollection();
}
virtual void TearDown() {
collectionManager.dispose();
delete store;
}
};
TEST_F(CollectionOperationsTest, IncrementInt32Value) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"},
{"name": "points", "type": "int32"}
]
})"_json;
Collection *coll = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "Sherlock Holmes";
doc["points"] = 100;
ASSERT_TRUE(coll->add(doc.dump()).ok());
// increment by 1
doc.erase("points");
doc["id"] = "0";
doc["$operations"] = R"({"increment": {"points": 1}})"_json;
ASSERT_TRUE(coll->add(doc.dump(), UPDATE).ok());
auto res = coll->search("*", {"title"}, "points:101", {}, {}, {0}, 3, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ(3, res["hits"][0]["document"].size());
ASSERT_EQ("0", res["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("Sherlock Holmes", res["hits"][0]["document"]["title"].get<std::string>());
ASSERT_EQ(101, res["hits"][0]["document"]["points"].get<size_t>());
// increment by 10
doc["id"] = "0";
doc["$operations"] = R"({"increment": {"points": 10}})"_json;
ASSERT_TRUE(coll->add(doc.dump(), UPDATE).ok());
res = coll->search("*", {"title"}, "points:111", {}, {}, {0}, 3, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ(3, res["hits"][0]["document"].size());
ASSERT_EQ("0", res["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("Sherlock Holmes", res["hits"][0]["document"]["title"].get<std::string>());
ASSERT_EQ(111, res["hits"][0]["document"]["points"].get<size_t>());
// decrement by 10 using negative number
doc["id"] = "0";
doc["$operations"] = R"({"increment": {"points": -10}})"_json;
ASSERT_TRUE(coll->add(doc.dump(), UPDATE).ok());
res = coll->search("*", {"title"}, "points:101", {}, {}, {0}, 3, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ(3, res["hits"][0]["document"].size());
ASSERT_EQ("0", res["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("Sherlock Holmes", res["hits"][0]["document"]["title"].get<std::string>());
ASSERT_EQ(101, res["hits"][0]["document"]["points"].get<size_t>());
// bad field - should not increment but title field should be updated
doc["id"] = "0";
doc["title"] = "The Sherlock Holmes";
doc["$operations"] = R"({"increment": {"pointsx": -10}})"_json;
ASSERT_TRUE(coll->add(doc.dump(), UPDATE).ok());
res = coll->search("*", {"title"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ(3, res["hits"][0]["document"].size());
ASSERT_EQ("0", res["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("The Sherlock Holmes", res["hits"][0]["document"]["title"].get<std::string>());
ASSERT_EQ(101, res["hits"][0]["document"]["points"].get<size_t>());
}
TEST_F(CollectionOperationsTest, IncrementInt32ValueCreationViaOptionalField) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"},
{"name": "points", "type": "int32", "optional": true}
]
})"_json;
Collection *coll = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "Sherlock Holmes";
doc["$operations"] = R"({"increment": {"points": 1}})"_json;
ASSERT_TRUE(coll->add(doc.dump(), EMPLACE).ok());
auto res = coll->search("*", {"title"}, "points:1", {}, {}, {0}, 3, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ(3, res["hits"][0]["document"].size());
ASSERT_EQ("0", res["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("Sherlock Holmes", res["hits"][0]["document"]["title"].get<std::string>());
ASSERT_EQ(1, res["hits"][0]["document"]["points"].get<size_t>());
// try same with CREATE action
doc.clear();
doc["id"] = "1";
doc["title"] = "Harry Potter";
doc["$operations"] = R"({"increment": {"points": 10}})"_json;
ASSERT_TRUE(coll->add(doc.dump(), CREATE).ok());
res = coll->search("*", {"title"}, "points:10", {}, {}, {0}, 3, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_EQ(3, res["hits"][0]["document"].size());
ASSERT_EQ("1", res["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("Harry Potter", res["hits"][0]["document"]["title"].get<std::string>());
ASSERT_EQ(10, res["hits"][0]["document"]["points"].get<size_t>());
}
| 5,659
|
C++
|
.cpp
| 120
| 41.666667
| 103
| 0.577592
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,731
|
art_test.cpp
|
typesense_typesense/test/art_test.cpp
|
#include <fcntl.h>
#include <inttypes.h>
#include <stdio.h>
#include <string.h>
#include <cmath>
#include <gtest/gtest.h>
#include <art.h>
#include <chrono>
#include <posting.h>
#define words_file_path (std::string(ROOT_DIR) + std::string("external/libart/tests/words.txt")).c_str()
#define uuid_file_path (std::string(ROOT_DIR) + std::string("external/libart/tests/uuid.txt")).c_str()
#define skus_file_path (std::string(ROOT_DIR) + std::string("test/skus.txt")).c_str()
#define ill_file_path (std::string(ROOT_DIR) + std::string("test/ill.txt")).c_str()
art_document get_document(uint32_t id) {
art_document document(id, id, {0});
return document;
}
std::set<std::string> exclude_leaves;
TEST(ArtTest, test_art_init_and_destroy) {
art_tree t;
int res = art_tree_init(&t);
ASSERT_TRUE(res == 0);
ASSERT_TRUE(art_size(&t) == 0);
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_art_insert) {
art_tree t;
int res = art_tree_init(&t);
ASSERT_TRUE(res == 0);
size_t len;
char buf[512];
FILE *f;
f = fopen(words_file_path, "r");
uintptr_t line = 1;
while (fgets(buf, sizeof buf, f)) {
len = strlen(buf);
buf[len-1] = '\0';
art_document document = get_document(line);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)buf, len, &document));
ASSERT_TRUE(art_size(&t) == line);
line++;
}
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_art_insert_verylong) {
art_tree t;
int res = art_tree_init(&t);
ASSERT_TRUE(res == 0);
unsigned char key1[300] = {16,0,0,0,7,10,0,0,0,2,17,10,0,0,0,120,10,0,0,0,120,10,0,
0,0,216,10,0,0,0,202,10,0,0,0,194,10,0,0,0,224,10,0,0,0,
230,10,0,0,0,210,10,0,0,0,206,10,0,0,0,208,10,0,0,0,232,
10,0,0,0,124,10,0,0,0,124,2,16,0,0,0,2,12,185,89,44,213,
251,173,202,211,95,185,89,110,118,251,173,202,199,101,0,
8,18,182,92,236,147,171,101,150,195,112,185,218,108,246,
139,164,234,195,58,177,0,8,16,0,0,0,2,12,185,89,44,213,
251,173,202,211,95,185,89,110,118,251,173,202,199,101,0,
8,18,180,93,46,151,9,212,190,95,102,178,217,44,178,235,
29,190,218,8,16,0,0,0,2,12,185,89,44,213,251,173,202,
211,95,185,89,110,118,251,173,202,199,101,0,8,18,180,93,
46,151,9,212,190,95,102,183,219,229,214,59,125,182,71,
108,180,220,238,150,91,117,150,201,84,183,128,8,16,0,0,
0,2,12,185,89,44,213,251,173,202,211,95,185,89,110,118,
251,173,202,199,101,0,8,18,180,93,46,151,9,212,190,95,
108,176,217,47,50,219,61,134,207,97,151,88,237,246,208,
8,18,255,255,255,219,191,198,134,5,223,212,72,44,208,
250,180,14,1,0,0,8, '\0'};
unsigned char key2[303] = {16,0,0,0,7,10,0,0,0,2,17,10,0,0,0,120,10,0,0,0,120,10,0,
0,0,216,10,0,0,0,202,10,0,0,0,194,10,0,0,0,224,10,0,0,0,
230,10,0,0,0,210,10,0,0,0,206,10,0,0,0,208,10,0,0,0,232,
10,0,0,0,124,10,0,0,0,124,2,16,0,0,0,2,12,185,89,44,213,
251,173,202,211,95,185,89,110,118,251,173,202,199,101,0,
8,18,182,92,236,147,171,101,150,195,112,185,218,108,246,
139,164,234,195,58,177,0,8,16,0,0,0,2,12,185,89,44,213,
251,173,202,211,95,185,89,110,118,251,173,202,199,101,0,
8,18,180,93,46,151,9,212,190,95,102,178,217,44,178,235,
29,190,218,8,16,0,0,0,2,12,185,89,44,213,251,173,202,
211,95,185,89,110,118,251,173,202,199,101,0,8,18,180,93,
46,151,9,212,190,95,102,183,219,229,214,59,125,182,71,
108,180,220,238,150,91,117,150,201,84,183,128,8,16,0,0,
0,3,12,185,89,44,213,251,133,178,195,105,183,87,237,150,
155,165,150,229,97,182,0,8,18,161,91,239,50,10,61,150,
223,114,179,217,64,8,12,186,219,172,150,91,53,166,221,
101,178,0,8,18,255,255,255,219,191,198,134,5,208,212,72,
44,208,250,180,14,1,0,0,8, '\0'};
art_document doc1 = get_document(1);
art_document doc2 = get_document(2);
ASSERT_TRUE(NULL == art_insert(&t, key1, 299, &doc1));
ASSERT_TRUE(NULL == art_insert(&t, key2, 302, &doc2));
art_insert(&t, key2, 302, &doc2);
EXPECT_EQ(art_size(&t), 2);
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_art_insert_search) {
art_tree t;
int res = art_tree_init(&t);
ASSERT_TRUE(res == 0);
int len;
char buf[512];
FILE *f = fopen(words_file_path, "r");
uintptr_t line = 1;
while (fgets(buf, sizeof buf, f)) {
len = strlen(buf);
buf[len-1] = '\0';
art_document doc = get_document((uint32_t) line);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)buf, len, &doc));
line++;
}
// Seek back to the start
fseek(f, 0, SEEK_SET);
// Search for each line
line = 1;
while (fgets(buf, sizeof buf, f)) {
len = strlen(buf);
buf[len-1] = '\0';
art_leaf* l = (art_leaf *) art_search(&t, (unsigned char*)buf, len);
EXPECT_EQ(line, posting_t::first_id(l->values));
line++;
}
// Check the minimum
art_leaf *l = art_minimum(&t);
ASSERT_TRUE(l && strcmp((char*)l->key, "A") == 0);
// Check the maximum
l = art_maximum(&t);
ASSERT_TRUE(l && strcmp((char*)l->key, "zythum") == 0);
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_art_insert_delete) {
art_tree t;
int res = art_tree_init(&t);
ASSERT_TRUE(res == 0);
int len;
char buf[512];
FILE *f = fopen(words_file_path, "r");
uintptr_t line = 1, nlines;
while (fgets(buf, sizeof buf, f)) {
len = strlen(buf);
buf[len-1] = '\0';
art_document doc = get_document((uint32_t) line);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)buf, len, &doc));
line++;
}
nlines = line - 1;
// Seek back to the start
fseek(f, 0, SEEK_SET);
// Search for each line
line = 1;
while (fgets(buf, sizeof buf, f)) {
len = strlen(buf);
buf[len-1] = '\0';
// Search first, ensure all entries still
// visible
art_leaf* l = (art_leaf *) art_search(&t, (unsigned char*)buf, len);
EXPECT_EQ(line, posting_t::first_id(l->values));
// Delete, should get lineno back
void* values = art_delete(&t, (unsigned char*)buf, len);
EXPECT_EQ(line, posting_t::first_id(values));
posting_t::destroy_list(values);
// Check the size
ASSERT_TRUE(art_size(&t) == nlines - line);
line++;
}
// Check the minimum and maximum
ASSERT_TRUE(!art_minimum(&t));
ASSERT_TRUE(!art_maximum(&t));
ASSERT_TRUE(art_size(&t) == 0);
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
int iter_cb(void *data, const unsigned char* key, uint32_t key_len, void *val) {
uint64_t *out = (uint64_t*)data;
uintptr_t line = posting_t::first_id(val);
uint64_t mask = (line * (key[0] + key_len));
out[0]++;
out[1] ^= mask;
return 0;
}
TEST(ArtTest, test_art_insert_iter) {
art_tree t;
int res = art_tree_init(&t);
ASSERT_TRUE(res == 0);
int len;
char buf[512];
FILE *f = fopen(words_file_path, "r");
uint64_t xor_mask = 0;
uintptr_t line = 1, nlines;
while (fgets(buf, sizeof buf, f)) {
len = strlen(buf);
buf[len-1] = '\0';
art_document doc = get_document((uint32_t) line);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)buf, len, &doc));
xor_mask ^= (line * (buf[0] + len));
line++;
}
nlines = line - 1;
uint64_t out[] = {0, 0};
ASSERT_TRUE(art_iter(&t, iter_cb, &out) == 0);
ASSERT_TRUE(out[0] == nlines);
ASSERT_TRUE(out[1] == xor_mask);
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
typedef struct {
int count;
int max_count;
const char **expected;
} prefix_data;
static int test_prefix_cb(void *data, const unsigned char *k, uint32_t k_len, void *val) {
prefix_data *p = (prefix_data*)data;
assert(p->count < p->max_count);
assert(memcmp(k, p->expected[p->count], k_len) == 0);
p->count++;
return 0;
}
TEST(ArtTest, test_art_iter_prefix) {
art_tree t;
int res = art_tree_init(&t);
ASSERT_TRUE(res == 0);
const char *s = "api.foo.bar";
art_document doc = get_document((uint32_t) 1);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)s, strlen(s)+1, &doc));
s = "api.foo.baz";
auto doc2 = get_document((uint32_t) 2);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)s, strlen(s)+1, &doc2));
s = "api.foe.fum";
auto doc3 = get_document((uint32_t) 3);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)s, strlen(s)+1, &doc3));
s = "abc.123.456";
auto doc4 = get_document((uint32_t) 4);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)s, strlen(s)+1, &doc4));
s = "api.foo";
auto doc5 = get_document((uint32_t) 5);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)s, strlen(s)+1, &doc5));
s = "api";
auto doc6 = get_document((uint32_t) 6);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)s, strlen(s)+1, &doc6));
// Iterate over api
const char *expected[] = {"api", "api.foe.fum", "api.foo", "api.foo.bar", "api.foo.baz"};
prefix_data p = { 0, 5, expected };
ASSERT_TRUE(!art_iter_prefix(&t, (unsigned char*)"api", 3, test_prefix_cb, &p));
ASSERT_TRUE(p.count == p.max_count);
// Iterate over 'a'
const char *expected2[] = {"abc.123.456", "api", "api.foe.fum", "api.foo", "api.foo.bar", "api.foo.baz"};
prefix_data p2 = { 0, 6, expected2 };
ASSERT_TRUE(!art_iter_prefix(&t, (unsigned char*)"a", 1, test_prefix_cb, &p2));
ASSERT_TRUE(p2.count == p2.max_count);
// Check a failed iteration
prefix_data p3 = { 0, 0, NULL };
ASSERT_TRUE(!art_iter_prefix(&t, (unsigned char*)"b", 1, test_prefix_cb, &p3));
ASSERT_TRUE(p3.count == 0);
// Iterate over api.
const char *expected4[] = {"api.foe.fum", "api.foo", "api.foo.bar", "api.foo.baz"};
prefix_data p4 = { 0, 4, expected4 };
ASSERT_TRUE(!art_iter_prefix(&t, (unsigned char*)"api.", 4, test_prefix_cb, &p4));
ASSERT_TRUE(p4.count == p4.max_count);
// Iterate over api.foo.ba
const char *expected5[] = {"api.foo.bar"};
prefix_data p5 = { 0, 1, expected5 };
ASSERT_TRUE(!art_iter_prefix(&t, (unsigned char*)"api.foo.bar", 11, test_prefix_cb, &p5));
ASSERT_TRUE(p5.count == p5.max_count);
// Check a failed iteration on api.end
prefix_data p6 = { 0, 0, NULL };
ASSERT_TRUE(!art_iter_prefix(&t, (unsigned char*)"api.end", 7, test_prefix_cb, &p6));
ASSERT_TRUE(p6.count == 0);
// Iterate over empty prefix
prefix_data p7 = { 0, 6, expected2 };
ASSERT_TRUE(!art_iter_prefix(&t, (unsigned char*)"", 0, test_prefix_cb, &p7));
ASSERT_TRUE(p7.count == p7.max_count);
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_art_long_prefix) {
art_tree t;
int res = art_tree_init(&t);
ASSERT_TRUE(res == 0);
uintptr_t id;
const char *s;
s = "this:key:has:a:long:prefix:3";
id = 3;
art_document doc = get_document((uint32_t) id);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)s, strlen(s)+1, &doc));
s = "this:key:has:a:long:common:prefix:2";
id = 2;
auto doc2 = get_document((uint32_t) id);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)s, strlen(s)+1, &doc2));
s = "this:key:has:a:long:common:prefix:1";
id = 1;
auto doc3 = get_document((uint32_t) id);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)s, strlen(s)+1, &doc3));
// Search for the keys
s = "this:key:has:a:long:common:prefix:1";
EXPECT_EQ(1, posting_t::first_id(((art_leaf *)art_search(&t, (unsigned char*)s, strlen(s)+1))->values));
s = "this:key:has:a:long:common:prefix:2";
EXPECT_EQ(2, posting_t::first_id(((art_leaf *)art_search(&t, (unsigned char*)s, strlen(s)+1))->values));
s = "this:key:has:a:long:prefix:3";
EXPECT_EQ(3, posting_t::first_id(((art_leaf *)art_search(&t, (unsigned char*)s, strlen(s)+1))->values));
const char *expected[] = {
"this:key:has:a:long:common:prefix:1",
"this:key:has:a:long:common:prefix:2",
"this:key:has:a:long:prefix:3",
};
prefix_data p = { 0, 3, expected };
ASSERT_TRUE(!art_iter_prefix(&t, (unsigned char*)"this:key:has", 12, test_prefix_cb, &p));
ASSERT_TRUE(p.count == p.max_count);
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_art_insert_search_uuid) {
art_tree t;
int res = art_tree_init(&t);
ASSERT_TRUE(res == 0);
int len;
char buf[512];
FILE *f = fopen(uuid_file_path, "r");
uintptr_t line = 1;
while (fgets(buf, sizeof buf, f)) {
len = strlen(buf);
buf[len-1] = '\0';
art_document doc = get_document((uint32_t) line);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)buf, len, &doc));
line++;
}
// Seek back to the start
fseek(f, 0, SEEK_SET);
// Search for each line
line = 1;
while (fgets(buf, sizeof buf, f)) {
len = strlen(buf);
buf[len-1] = '\0';
uintptr_t id = posting_t::first_id(((art_leaf*)art_search(&t, (unsigned char*)buf, len))->values);
ASSERT_TRUE(line == id);
line++;
}
// Check the minimum
art_leaf *l = art_minimum(&t);
ASSERT_TRUE(l && strcmp((char*)l->key, "00026bda-e0ea-4cda-8245-522764e9f325") == 0);
// Check the maximum
l = art_maximum(&t);
ASSERT_TRUE(l && strcmp((char*)l->key, "ffffcb46-a92e-4822-82af-a7190f9c1ec5") == 0);
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_art_max_prefix_len_scan_prefix) {
art_tree t;
int res = art_tree_init(&t);
ASSERT_TRUE(res == 0);
const char* key1 = "foobarbaz1-test1-foo";
art_document doc = get_document((uint32_t) 1);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)key1, strlen(key1)+1, &doc));
const char *key2 = "foobarbaz1-test1-bar";
auto doc2 = get_document((uint32_t) 2);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)key2, strlen(key2)+1, &doc2));
const char *key3 = "foobarbaz1-test2-foo";
auto doc3 = get_document((uint32_t) 3);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)key3, strlen(key3)+1, &doc3));
ASSERT_TRUE(art_size(&t) == 3);
// Iterate over api
const char *expected[] = {key2, key1};
prefix_data p = { 0, 2, expected };
const char *prefix = "foobarbaz1-test1";
ASSERT_TRUE(!art_iter_prefix(&t, (unsigned char*)prefix, strlen(prefix), test_prefix_cb, &p));
ASSERT_TRUE(p.count == p.max_count);
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_art_prefix_iter_out_of_bounds) {
// Regression: ensures that `assert(depth < key_len);` is not invoked
art_tree t;
int res = art_tree_init(&t);
ASSERT_TRUE(res == 0);
const char* key1 = "foobarbaz1-long-test1-foo";
art_document doc = get_document((uint32_t) 1);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)key1, strlen(key1)+1, &doc));
const char *key2 = "foobarbaz1-long-test1-bar";
auto doc2 = get_document((uint32_t) 2);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)key2, strlen(key2)+1, &doc2));
const char *key3 = "foobarbaz1-long-test2-foo";
auto doc3 = get_document((uint32_t) 3);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)key3, strlen(key3)+1, &doc3));
ASSERT_TRUE(art_size(&t) == 3);
// Iterate over api
const char *expected[] = {key2, key1};
prefix_data p = { 0, 0, expected };
const char *prefix = "f2oobar";
ASSERT_TRUE(!art_iter_prefix(&t, (unsigned char*)prefix, strlen(prefix), test_prefix_cb, &p));
ASSERT_TRUE(p.count == p.max_count);
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_art_search_out_of_bounds) {
// Regression: ensures that `assert(depth < key_len);` is not invoked
art_tree t;
int res = art_tree_init(&t);
ASSERT_TRUE(res == 0);
const char* key1 = "foobarbaz1-long-test1-foo";
art_document doc = get_document((uint32_t) 1);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)key1, strlen(key1)+1, &doc));
const char *key2 = "foobarbaz1-long-test1-bar";
auto doc2 = get_document((uint32_t) 2);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)key2, strlen(key2)+1, &doc2));
const char *key3 = "foobarbaz1-long-test2-foo";
auto doc3 = get_document((uint32_t) 3);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)key3, strlen(key3)+1, &doc3));
ASSERT_TRUE(art_size(&t) == 3);
// Search for a non-existing key
const char *prefix = "foobarbaz1-long-";
art_leaf* l = (art_leaf *) art_search(&t, (const unsigned char *) prefix, strlen(prefix));
ASSERT_EQ(NULL, l);
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_art_delete_out_of_bounds) {
// Regression: ensures that `assert(depth < key_len);` is not invoked
art_tree t;
int res = art_tree_init(&t);
ASSERT_TRUE(res == 0);
const char* key1 = "foobarbaz1-long-test1-foo";
art_document doc = get_document((uint32_t) 1);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)key1, strlen(key1)+1, &doc));
const char *key2 = "foobarbaz1-long-test1-bar";
art_document doc2 = get_document((uint32_t) 2);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)key2, strlen(key2)+1, &doc2));
const char *key3 = "foobarbaz1-long-test2-foo";
art_document doc3 = get_document((uint32_t) 3);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)key3, strlen(key3)+1, &doc3));
ASSERT_TRUE(art_size(&t) == 3);
// Try to delete a non-existing key
const char *prefix = "foobarbaz1-long-";
void* values = art_delete(&t, (const unsigned char *) prefix, strlen(prefix));
ASSERT_EQ(nullptr, values);
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_art_insert_multiple_ids_for_same_token) {
art_tree t;
int res = art_tree_init(&t);
ASSERT_TRUE(res == 0);
const char* key1 = "implement";
art_document doc = get_document((uint32_t) 1);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)key1, strlen(key1)+1, &doc));
art_document doc2 = get_document((uint32_t) 2);
void* value = art_insert(&t, (unsigned char*)key1, strlen(key1) + 1, &doc2);
ASSERT_TRUE(value != NULL);
ASSERT_EQ(posting_t::num_ids(value), 2);
ASSERT_EQ(posting_t::first_id(value), 1);
ASSERT_TRUE(posting_t::contains(value, 2));
art_document doc3 = get_document((uint32_t) 3);
void* reinsert_value = art_insert(&t, (unsigned char*) key1, strlen(key1) + 1, &doc3);
ASSERT_TRUE(art_size(&t) == 1);
ASSERT_EQ(posting_t::num_ids(reinsert_value), 3);
ASSERT_EQ(posting_t::first_id(reinsert_value), 1);
ASSERT_TRUE(posting_t::contains(reinsert_value, 2));
ASSERT_TRUE(posting_t::contains(reinsert_value, 3));
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_art_fuzzy_search_single_leaf) {
art_tree t;
int res = art_tree_init(&t);
ASSERT_TRUE(res == 0);
const char* implement_key = "implement";
art_document doc = get_document((uint32_t) 1);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)implement_key, strlen(implement_key)+1, &doc));
art_leaf* l = (art_leaf *) art_search(&t, (const unsigned char *)implement_key, strlen(implement_key)+1);
EXPECT_EQ(1, posting_t::first_id(l->values));
std::vector<art_leaf*> leaves;
art_fuzzy_search(&t, (const unsigned char *) implement_key, strlen(implement_key) + 1, 0, 0, 10, FREQUENCY, false, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(1, leaves.size());
const char* implement_key_typo1 = "implment";
const char* implement_key_typo2 = "implwnent";
leaves.clear();
exclude_leaves.clear();
art_fuzzy_search(&t, (const unsigned char *) implement_key_typo1, strlen(implement_key_typo1) + 1, 0, 0, 10, FREQUENCY, false, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(0, leaves.size());
leaves.clear();
exclude_leaves.clear();
art_fuzzy_search(&t, (const unsigned char *) implement_key_typo1, strlen(implement_key_typo1) + 1, 0, 1, 10, FREQUENCY, false, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(1, leaves.size());
leaves.clear();
exclude_leaves.clear();
art_fuzzy_search(&t, (const unsigned char *) implement_key_typo2, strlen(implement_key_typo2) + 1, 0, 2, 10, FREQUENCY, false, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(1, leaves.size());
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_art_fuzzy_search_single_leaf_prefix) {
art_tree t;
int res = art_tree_init(&t);
ASSERT_TRUE(res == 0);
const char* key = "application";
art_document doc = get_document((uint32_t) 1);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)key, strlen(key)+1, &doc));
art_leaf* l = (art_leaf *) art_search(&t, (const unsigned char *)key, strlen(key)+1);
EXPECT_EQ(1, posting_t::first_id(l->values));
std::vector<art_leaf*> leaves;
std::string term = "aplication";
art_fuzzy_search(&t, (const unsigned char *)(term.c_str()), term.size(), 0, 1, 10, FREQUENCY, true, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(1, leaves.size());
leaves.clear();
exclude_leaves.clear();
art_fuzzy_search(&t, (const unsigned char *)(term.c_str()), term.size(), 0, 2, 10, FREQUENCY, true, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(1, leaves.size());
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_art_fuzzy_search_single_leaf_qlen_greater_than_key) {
art_tree t;
int res = art_tree_init(&t);
ASSERT_TRUE(res == 0);
const char* key = "storka";
art_document doc = get_document((uint32_t) 1);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)key, strlen(key)+1, &doc));
std::string term = "starkbin";
std::vector<art_leaf*> leaves;
art_fuzzy_search(&t, (const unsigned char *)(term.c_str()), term.size(), 0, 2, 10, FREQUENCY, true, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(0, leaves.size());
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_art_fuzzy_search_single_leaf_non_prefix) {
art_tree t;
int res = art_tree_init(&t);
ASSERT_TRUE(res == 0);
const char* key = "spz005";
art_document doc = get_document((uint32_t) 1);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)key, strlen(key)+1, &doc));
std::string term = "spz";
std::vector<art_leaf*> leaves;
art_fuzzy_search(&t, (const unsigned char *)(term.c_str()), term.size()+1, 0, 1, 10, FREQUENCY, false, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(0, leaves.size());
leaves.clear();
exclude_leaves.clear();
art_fuzzy_search(&t, (const unsigned char *)(term.c_str()), term.size(), 0, 1, 10, FREQUENCY, true, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(1, leaves.size());
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_art_prefix_larger_than_key) {
art_tree t;
int res = art_tree_init(&t);
ASSERT_TRUE(res == 0);
const char* key = "arvin";
art_document doc = get_document((uint32_t) 1);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)key, strlen(key)+1, &doc));
std::string term = "earrings";
std::vector<art_leaf*> leaves;
art_fuzzy_search(&t, (const unsigned char *)(term.c_str()), term.size()+1, 0, 2, 10, FREQUENCY, false, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(0, leaves.size());
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_art_fuzzy_search_prefix_token_ordering) {
art_tree t;
int res = art_tree_init(&t);
ASSERT_TRUE(res == 0);
// the last "e" should be returned first because of exact match
std::vector<const char*> keys = {
"enter", "elephant", "enamel", "ercot", "enyzme", "energy",
"epoch", "epyc", "express", "everest", "end", "e"
};
for(size_t i = 0; i < keys.size(); i++) {
art_document doc(i, keys.size() - i, {0});
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)keys[i], strlen(keys[i])+1, &doc));
}
std::vector<art_leaf*> leaves;
art_fuzzy_search(&t, (const unsigned char *) "e", 1, 0, 0, 3, MAX_SCORE, true, false, "", nullptr, 0, leaves, exclude_leaves);
std::string first_key(reinterpret_cast<char*>(leaves[0]->key), leaves[0]->key_len - 1);
ASSERT_EQ("e", first_key);
std::string second_key(reinterpret_cast<char*>(leaves[1]->key), leaves[1]->key_len - 1);
ASSERT_EQ("enter", second_key);
std::string third_key(reinterpret_cast<char*>(leaves[2]->key), leaves[2]->key_len - 1);
ASSERT_EQ("elephant", third_key);
leaves.clear();
exclude_leaves.clear();
art_fuzzy_search(&t, (const unsigned char *) "enter", 5, 1, 1, 3, MAX_SCORE, true, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_TRUE(leaves.empty());
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_art_fuzzy_search) {
art_tree t;
int res = art_tree_init(&t);
ASSERT_TRUE(res == 0);
int len;
char buf[512];
FILE *f = fopen(words_file_path, "r");
uintptr_t line = 1;
while (fgets(buf, sizeof buf, f)) {
len = strlen(buf);
buf[len-1] = '\0';
art_document doc = get_document((uint32_t) line);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)buf, len, &doc));
line++;
}
std::vector<art_leaf*> leaves;
auto begin = std::chrono::high_resolution_clock::now();
leaves.clear();
exclude_leaves.clear();
art_fuzzy_search(&t, (const unsigned char *) "pltinum", strlen("pltinum"), 0, 1, 10, FREQUENCY, true, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(2, leaves.size());
ASSERT_STREQ("platinumsmith", (const char *)leaves.at(0)->key);
ASSERT_STREQ("platinum", (const char *)leaves.at(1)->key);
leaves.clear();
exclude_leaves.clear();
// extra char
art_fuzzy_search(&t, (const unsigned char *) "higghliving", strlen("higghliving") + 1, 0, 1, 10, FREQUENCY, false, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(1, leaves.size());
ASSERT_STREQ("highliving", (const char *)leaves.at(0)->key);
// transpose
leaves.clear();
exclude_leaves.clear();
art_fuzzy_search(&t, (const unsigned char *) "zymosthneic", strlen("zymosthneic") + 1, 0, 1, 10, FREQUENCY, false, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(1, leaves.size());
ASSERT_STREQ("zymosthenic", (const char *)leaves.at(0)->key);
// transpose + missing
leaves.clear();
exclude_leaves.clear();
art_fuzzy_search(&t, (const unsigned char *) "dacrcyystlgia", strlen("dacrcyystlgia") + 1, 0, 2, 10, FREQUENCY, false, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(1, leaves.size());
ASSERT_STREQ("dacrycystalgia", (const char *)leaves.at(0)->key);
leaves.clear();
exclude_leaves.clear();
art_fuzzy_search(&t, (const unsigned char *) "dacrcyystlgia", strlen("dacrcyystlgia") + 1, 1, 2, 10, FREQUENCY, false, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(1, leaves.size());
ASSERT_STREQ("dacrycystalgia", (const char *)leaves.at(0)->key);
// missing char
leaves.clear();
exclude_leaves.clear();
art_fuzzy_search(&t, (const unsigned char *) "gaberlunze", strlen("gaberlunze") + 1, 0, 1, 10, FREQUENCY, false, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(1, leaves.size());
ASSERT_STREQ("gaberlunzie", (const char *)leaves.at(0)->key);
// substituted char
leaves.clear();
exclude_leaves.clear();
art_fuzzy_search(&t, (const unsigned char *) "eacemiferous", strlen("eacemiferous") + 1, 0, 1, 10, FREQUENCY, false, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(1, leaves.size());
ASSERT_STREQ("racemiferous", (const char *)leaves.at(0)->key);
// missing char + extra char
leaves.clear();
exclude_leaves.clear();
art_fuzzy_search(&t, (const unsigned char *) "Sarbruckken", strlen("Sarbruckken") + 1, 0, 2, 10, FREQUENCY, false, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(1, leaves.size());
ASSERT_STREQ("Saarbrucken", (const char *)leaves.at(0)->key);
// multiple matching results
leaves.clear();
exclude_leaves.clear();
art_fuzzy_search(&t, (const unsigned char *) "hown", strlen("hown") + 1, 0, 1, 10, FREQUENCY, false, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(10, leaves.size());
std::set<std::string> expected_words = {"town", "sown", "shown", "own", "mown", "lown", "howl", "howk", "howe", "how"};
for(size_t leaf_index = 0; leaf_index < leaves.size(); leaf_index++) {
art_leaf*& leaf = leaves.at(leaf_index);
std::string tok(reinterpret_cast<char*>(leaf->key), leaf->key_len - 1);
ASSERT_NE(expected_words.count(tok), 0);
}
// fuzzy prefix search
leaves.clear();
exclude_leaves.clear();
art_fuzzy_search(&t, (const unsigned char *) "lionhear", strlen("lionhear"), 0, 0, 10, FREQUENCY, true, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(3, leaves.size());
leaves.clear();
exclude_leaves.clear();
art_fuzzy_search(&t, (const unsigned char *) "lineage", strlen("lineage"), 0, 0, 10, FREQUENCY, true, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(2, leaves.size());
leaves.clear();
exclude_leaves.clear();
art_fuzzy_search(&t, (const unsigned char *) "liq", strlen("liq"), 0, 0, 50, FREQUENCY, true, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(39, leaves.size());
leaves.clear();
exclude_leaves.clear();
art_fuzzy_search(&t, (const unsigned char *) "antitraditiana", strlen("antitraditiana"), 0, 1, 10, FREQUENCY, true, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(1, leaves.size());
leaves.clear();
exclude_leaves.clear();
art_fuzzy_search(&t, (const unsigned char *) "antisocao", strlen("antisocao"), 0, 2, 10, FREQUENCY, true, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(6, leaves.size());
long long int timeMillis = std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::high_resolution_clock::now() - begin).count();
LOG(INFO) << "Time taken for: " << timeMillis << "ms";
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_art_fuzzy_search_unicode_chars) {
art_tree t;
int res = art_tree_init(&t);
ASSERT_TRUE(res == 0);
std::vector<const char*> keys = {
"роман", "обладать", "роисхождения", "без", "பஞ்சமம்", "சுதந்திரமாகவே", "அல்லது", "அடிப்படையில்"
};
for(const char* key: keys) {
art_document doc = get_document((uint32_t) 1);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)key, strlen(key)+1, &doc));
}
for(const char* key: keys) {
art_leaf* l = (art_leaf *) art_search(&t, (const unsigned char *)key, strlen(key)+1);
EXPECT_EQ(1, posting_t::first_id(l->values));
std::vector<art_leaf*> leaves;
art_fuzzy_search(&t, (unsigned char *)key, strlen(key), 0, 0, 10, FREQUENCY, true, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(1, leaves.size());
}
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_art_fuzzy_search_extra_chars) {
art_tree t;
int res = art_tree_init(&t);
ASSERT_TRUE(res == 0);
std::vector<const char*> keys = {
"abbviation"
};
for(const char* key: keys) {
art_document doc = get_document((uint32_t) 1);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)key, strlen(key)+1, &doc));
}
const char* query = "abbreviation";
std::vector<art_leaf*> leaves;
art_fuzzy_search(&t, (unsigned char *)query, strlen(query), 0, 2, 10, FREQUENCY, true, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(1, leaves.size());
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_art_search_sku_like_tokens) {
art_tree t;
int res = art_tree_init(&t);
ASSERT_TRUE(res == 0);
std::vector<std::string> keys;
int len;
char buf[512];
FILE *f = fopen(skus_file_path, "r");
uintptr_t line = 1;
while (fgets(buf, sizeof buf, f)) {
len = strlen(buf);
buf[len - 1] = '\0';
art_document doc = get_document((uint32_t) line);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char *) buf, len, &doc));
keys.push_back(std::string(buf, len-1));
line++;
}
const char* key1 = "abc12345678217521";
// exact search
art_leaf* l = (art_leaf *) art_search(&t, (const unsigned char *)key1, strlen(key1)+1);
EXPECT_EQ(1, posting_t::num_ids(l->values));
// exact search all tokens via fuzzy API
for (const auto &key : keys) {
std::vector<art_leaf *> leaves;
art_fuzzy_search(&t, (const unsigned char*)key.c_str(), key.size(), 0, 0, 10,
FREQUENCY, true, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(1, leaves.size());
ASSERT_STREQ(key.c_str(), (const char *) leaves.at(0)->key);
leaves.clear();
exclude_leaves.clear();
// non prefix
art_fuzzy_search(&t, (const unsigned char*)key.c_str(), key.size()+1, 0, 0, 10,
FREQUENCY, false, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(1, leaves.size());
ASSERT_STREQ(key.c_str(), (const char *) leaves.at(0)->key);
}
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_art_search_ill_like_tokens) {
art_tree t;
int res = art_tree_init(&t);
ASSERT_TRUE(res == 0);
std::vector<std::string> keys;
int len;
char buf[512];
FILE *f = fopen(ill_file_path, "r");
uintptr_t line = 1;
while (fgets(buf, sizeof buf, f)) {
len = strlen(buf);
buf[len - 1] = '\0';
art_document doc = get_document((uint32_t) line);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char *) buf, len, &doc));
keys.push_back(std::string(buf, len-1));
line++;
}
std::map<std::string, size_t> key_to_count {
std::make_pair("input", 2),
std::make_pair("illustration", 2),
std::make_pair("image", 7),
std::make_pair("instrument", 2),
std::make_pair("in", 10),
std::make_pair("info", 2),
std::make_pair("inventor", 2),
std::make_pair("imageresize", 2),
std::make_pair("id", 5),
std::make_pair("insect", 2),
std::make_pair("ice", 2),
};
std::string key = "input";
for (const auto &key : keys) {
art_leaf* l = (art_leaf *) art_search(&t, (const unsigned char *)key.c_str(), key.size()+1);
ASSERT_FALSE(l == nullptr);
EXPECT_EQ(1, posting_t::num_ids(l->values));
std::vector<art_leaf *> leaves;
exclude_leaves.clear();
art_fuzzy_search(&t, (const unsigned char*)key.c_str(), key.size(), 0, 0, 10,
FREQUENCY, true, false, "", nullptr, 0, leaves, exclude_leaves);
if(key_to_count.count(key) != 0) {
ASSERT_EQ(key_to_count[key], leaves.size());
} else {
ASSERT_EQ(1, leaves.size());
ASSERT_STREQ(key.c_str(), (const char *) leaves.at(0)->key);
}
leaves.clear();
exclude_leaves.clear();
// non prefix
art_fuzzy_search(&t, (const unsigned char*)key.c_str(), key.size()+1, 0, 0, 10,
FREQUENCY, false, false, "", nullptr, 0, leaves, exclude_leaves);
if(leaves.size() != 1) {
LOG(INFO) << key;
}
ASSERT_EQ(1, leaves.size());
ASSERT_STREQ(key.c_str(), (const char *) leaves.at(0)->key);
}
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_art_search_ill_like_tokens2) {
art_tree t;
int res = art_tree_init(&t);
ASSERT_TRUE(res == 0);
std::vector<std::string> keys;
keys = {"input", "illustrations", "illustration"};
art_document doc = get_document((uint32_t) 1);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char *) keys[0].c_str(), keys[0].size()+1, &doc));
art_document doc2 = get_document((uint32_t) 2);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char *) keys[1].c_str(), keys[1].size()+1, &doc2));
art_document doc3 = get_document((uint32_t) 3);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char *) keys[2].c_str(), keys[2].size()+1, &doc3));
for (const auto &key : keys) {
art_leaf* l = (art_leaf *) art_search(&t, (const unsigned char *)key.c_str(), key.size()+1);
ASSERT_FALSE(l == nullptr);
EXPECT_EQ(1, posting_t::num_ids(l->values));
std::vector<art_leaf *> leaves;
exclude_leaves.clear();
art_fuzzy_search(&t, (const unsigned char*)key.c_str(), key.size(), 0, 0, 10,
FREQUENCY, true, false, "", nullptr, 0, leaves, exclude_leaves);
if(key == "illustration") {
ASSERT_EQ(2, leaves.size());
} else {
ASSERT_EQ(1, leaves.size());
ASSERT_STREQ(key.c_str(), (const char *) leaves.at(0)->key);
}
leaves.clear();
exclude_leaves.clear();
// non prefix
art_fuzzy_search(&t, (const unsigned char*)key.c_str(), key.size() + 1, 0, 0, 10,
FREQUENCY, false, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(1, leaves.size());
ASSERT_STREQ(key.c_str(), (const char *) leaves.at(0)->key);
}
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_art_search_roche_chews) {
art_tree t;
int res = art_tree_init(&t);
ASSERT_TRUE(res == 0);
std::vector<std::string> keys;
keys = {"roche"};
art_document doc = get_document((uint32_t) 1);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char *) keys[0].c_str(), keys[0].size()+1, &doc));
std::string term = "chews";
std::vector<art_leaf *> leaves;
art_fuzzy_search(&t, (const unsigned char*)term.c_str(), term.size(), 0, 2, 10,
FREQUENCY, true, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(0, leaves.size());
art_fuzzy_search(&t, (const unsigned char*)keys[0].c_str(), keys[0].size() + 1, 0, 0, 10,
FREQUENCY, false, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(1, leaves.size());
term = "xxroche";
leaves.clear();
exclude_leaves.clear();
art_fuzzy_search(&t, (const unsigned char*)term.c_str(), term.size()+1, 0, 2, 10,
FREQUENCY, false, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(1, leaves.size());
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_art_search_raspberry) {
art_tree t;
int res = art_tree_init(&t);
ASSERT_TRUE(res == 0);
std::vector<std::string> keys;
keys = {"raspberry", "raspberries"};
for (const auto &key : keys) {
art_document doc = get_document((uint32_t) 1);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char *) key.c_str(), key.size()+1, &doc));
}
// prefix search
std::vector<art_leaf *> leaves;
std::string q_raspberries = "raspberries";
art_fuzzy_search(&t, (const unsigned char*)q_raspberries.c_str(), q_raspberries.size(), 0, 2, 10,
FREQUENCY, true, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(2, leaves.size());
leaves.clear();
exclude_leaves.clear();
std::string q_raspberry = "raspberry";
art_fuzzy_search(&t, (const unsigned char*)q_raspberry.c_str(), q_raspberry.size(), 0, 2, 10,
FREQUENCY, true, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(2, leaves.size());
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_art_search_highliving) {
art_tree t;
int res = art_tree_init(&t);
ASSERT_TRUE(res == 0);
std::vector<std::string> keys;
keys = {"highliving"};
for (const auto &key : keys) {
art_document doc = get_document((uint32_t) 1);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char *) key.c_str(), key.size()+1, &doc));
}
// prefix search
std::vector<art_leaf *> leaves;
std::string query = "higghliving";
art_fuzzy_search(&t, (const unsigned char*)query.c_str(), query.size() + 1, 0, 1, 10,
FREQUENCY, false, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(1, leaves.size());
leaves.clear();
exclude_leaves.clear();
exclude_leaves.clear();
exclude_leaves.clear();
art_fuzzy_search(&t, (const unsigned char*)query.c_str(), query.size(), 0, 2, 10,
FREQUENCY, true, false, "", nullptr, 0, leaves, exclude_leaves);
ASSERT_EQ(1, leaves.size());
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_encode_int32) {
unsigned char chars[8];
// 175 => 0000,0000,0000,0000,0000,0000,1010,1111
unsigned char chars_175[8] = {0, 0, 0, 0, 0, 0, 10, 15};
encode_int32(175, chars);
for(uint32_t i = 0; i < 8; i++) {
ASSERT_EQ(chars_175[i], chars[i]);
}
// 0 => 0000,0000,0000,0000,0000,0000,0000,0000
unsigned char chars_0[8] = {0, 0, 0, 0, 0, 0, 0, 0};
encode_int32(0, chars);
for(uint32_t i = 0; i < 8; i++) {
ASSERT_EQ(chars_0[i], chars[i]);
}
// 255 => 0000,0000,0000,0000,0000,0000,1111,1111
unsigned char chars_255[8] = {0, 0, 0, 0, 0, 0, 15, 15};
encode_int32(255, chars);
for(uint32_t i = 0; i < 8; i++) {
ASSERT_EQ(chars_255[i], chars[i]);
}
// 4531 => 0000,0000,0000,0000,0001,0001,1011,0011
unsigned char chars_4531[8] = {0, 0, 0, 0, 1, 1, 11, 3};
encode_int32(4531, chars);
for(uint32_t i = 0; i < 8; i++) {
ASSERT_EQ(chars_4531[i], chars[i]);
}
// 1200000 => 0000,0000,0001,0010,0100,1111,1000,0000
unsigned char chars_1M[8] = {0, 0, 1, 2, 4, 15, 8, 0};
encode_int32(1200000, chars);
for(uint32_t i = 0; i < 8; i++) {
ASSERT_EQ(chars_1M[i], chars[i]);
}
unsigned char chars_neg_4531[8] = {15, 15, 15, 15, 14, 14, 4, 13};
encode_int32(-4531, chars);
for(uint32_t i = 0; i < 8; i++) {
ASSERT_EQ(chars_neg_4531[i], chars[i]);
}
}
TEST(ArtTest, test_int32_overlap) {
art_tree t;
art_tree_init(&t);
const int CHAR_LEN = 8;
unsigned char chars[CHAR_LEN];
std::vector<const art_leaf *> results;
std::vector<std::vector<uint32_t>> values = {{2014, 2015, 2016}, {2015, 2016}, {2016},
{1981, 1985}, {1999, 2000, 2001, 2002}};
for(uint32_t i = 0; i < values.size(); i++) {
for(size_t j = 0; j < values[i].size(); j++) {
encode_int32(values[i][j], chars);
art_document doc = get_document(i);
art_insert(&t, (unsigned char*)chars, CHAR_LEN, &doc);
}
}
int res = art_int32_search(&t, 2002, GREATER_THAN, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(3, results.size());
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_int32_range_hundreds) {
art_tree t;
art_tree_init(&t);
art_document doc = get_document(1);
const int CHAR_LEN = 8;
unsigned char chars[CHAR_LEN];
std::vector<const art_leaf*> results;
for(uint32_t i = 100; i < 110; i++) {
encode_int32(i, chars);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)chars, CHAR_LEN, &doc));
}
encode_int32(106, chars);
int res = art_int32_search(&t, 106, EQUALS, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(1, results.size());
results.clear();
res = art_int32_search(&t, 106, GREATER_THAN_EQUALS, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(4, results.size());
results.clear();
res = art_int32_search(&t, 106, GREATER_THAN, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(3, results.size());
results.clear();
res = art_int32_search(&t, 106, LESS_THAN_EQUALS, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(7, results.size());
results.clear();
res = art_int32_search(&t, 106, LESS_THAN, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(6, results.size());
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_int32_duplicates) {
art_tree t;
art_tree_init(&t);
const int CHAR_LEN = 8;
unsigned char chars[CHAR_LEN];
for(size_t i = 0; i < 10000; i++) {
art_document doc = get_document(i);
int value = 1900 + (rand() % static_cast<int>(2018 - 1900 + 1));
encode_int32(value, chars);
art_insert(&t, (unsigned char*)chars, CHAR_LEN, &doc);
}
std::vector<const art_leaf*> results;
int res = art_int32_search(&t, 0, GREATER_THAN, results);
ASSERT_TRUE(res == 0);
size_t counter = 0;
for(auto res: results) {
counter += posting_t::num_ids(res->values);
}
ASSERT_EQ(10000, counter);
results.clear();
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_int32_negative) {
art_tree t;
art_tree_init(&t);
art_document doc = get_document(1);
const int CHAR_LEN = 8;
unsigned char chars[CHAR_LEN];
for(int32_t i = -100; i < 0; i++) {
encode_int32(i, chars);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)chars, CHAR_LEN, &doc));
}
encode_int32(-99, chars);
std::vector<const art_leaf*> results;
int res = art_int32_search(&t, -99, EQUALS, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(1, results.size());
results.clear();
res = art_int32_search(&t, -90, GREATER_THAN_EQUALS, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(90, results.size());
results.clear();
res = art_int32_search(&t, -90, GREATER_THAN, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(89, results.size());
results.clear();
res = art_int32_search(&t, -99, LESS_THAN_EQUALS, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(2, results.size());
results.clear();
res = art_int32_search(&t, -99, LESS_THAN, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(1, results.size());
results.clear();
res = art_int32_search(&t, -100, LESS_THAN_EQUALS, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(1, results.size());
results.clear();
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_int32_million) {
art_tree t;
art_tree_init(&t);
art_document doc = get_document(1);
const int CHAR_LEN = 8;
unsigned char chars[CHAR_LEN];
for(uint32_t i = 0; i < 1000000; i++) {
encode_int32(i, chars);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)chars, CHAR_LEN, &doc));
}
encode_int32(5, chars);
std::vector<const art_leaf*> results;
// ==
for(uint32_t i = 0; i < 6; i++) {
results.clear();
art_int32_search(&t, (uint32_t) pow(10, i), EQUALS, results);
ASSERT_EQ(1, results.size());
results.clear();
art_int32_search(&t, (uint32_t) (pow(10, i) + 7), EQUALS, results);
ASSERT_EQ(1, results.size());
}
results.clear();
art_int32_search(&t, 1000000 - 1, EQUALS, results);
ASSERT_EQ(1, results.size());
// >=
results.clear();
art_int32_search(&t, 1000000 - 5, GREATER_THAN_EQUALS, results);
ASSERT_EQ(5, results.size());
results.clear();
art_int32_search(&t, 1000000 - 5, GREATER_THAN, results);
ASSERT_EQ(4, results.size());
results.clear();
art_int32_search(&t, 1000000 - 1, GREATER_THAN_EQUALS, results);
ASSERT_EQ(1, results.size());
results.clear();
art_int32_search(&t, 1000000, GREATER_THAN_EQUALS, results);
ASSERT_EQ(0, results.size());
results.clear();
art_int32_search(&t, 5, GREATER_THAN_EQUALS, results);
ASSERT_EQ(1000000-5, results.size());
// <=
results.clear();
art_int32_search(&t, 1000000 - 5, LESS_THAN_EQUALS, results);
ASSERT_EQ(1000000-5+1, results.size());
results.clear();
art_int32_search(&t, 1000000 - 1, LESS_THAN_EQUALS, results);
ASSERT_EQ(1000000, results.size());
results.clear();
art_int32_search(&t, 1000000 - 1, LESS_THAN, results);
ASSERT_EQ(1000000-1, results.size());
results.clear();
art_int32_search(&t, 1000000, LESS_THAN_EQUALS, results);
ASSERT_EQ(1000000, results.size());
results.clear();
art_int32_search(&t, 5, LESS_THAN_EQUALS, results);
ASSERT_EQ(5+1, results.size());
results.clear();
art_int32_search(&t, 5, LESS_THAN, results);
ASSERT_EQ(5, results.size());
auto res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_int_range_byte_boundary) {
art_tree t;
art_tree_init(&t);
art_document doc = get_document(1);
const int CHAR_LEN = 8;
unsigned char chars[CHAR_LEN];
for(uint32_t i = 200; i < 300; i++) {
encode_int32(i, chars);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)chars, CHAR_LEN, &doc));
}
encode_int32(255, chars);
std::vector<const art_leaf*> results;
results.clear();
art_int32_search(&t, 255, GREATER_THAN_EQUALS, results);
ASSERT_EQ(45, results.size());
results.clear();
art_int32_search(&t, 255, GREATER_THAN, results);
ASSERT_EQ(44, results.size());
auto res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_encode_int64) {
unsigned char chars[8];
unsigned char chars_175[8] = {0, 0, 0, 0, 0, 0, 0, 175};
encode_int64(175, chars);
for(uint32_t i = 0; i < 8; i++) {
ASSERT_EQ(chars_175[i], chars[i]);
}
unsigned char chars_neg_175[8] = {255, 255, 255, 255, 255, 255, 255, 81};
encode_int64(-175, chars);
for(uint32_t i = 0; i < 8; i++) {
ASSERT_EQ(chars_neg_175[i], chars[i]);
}
unsigned char chars_100K[8] = {0, 0, 0, 0, 0, 1, 134, 160};
encode_int64(100*1000, chars);
for(uint32_t i = 0; i < 8; i++) {
ASSERT_EQ(chars_100K[i], chars[i]);
}
unsigned char chars_large_num[8] = {0, 0, 0, 0, 128, 0, 0, 199};
int64_t large_num = (int64_t)(std::numeric_limits<std::int32_t>::max()) + 200;
encode_int64(large_num, chars);
for(uint32_t i = 0; i < 8; i++) {
ASSERT_EQ(chars_large_num[i], chars[i]);
}
unsigned char chars_large_neg_num[8] = {255, 255, 255, 255, 127, 255, 255, 57};
encode_int64(-1 * large_num, chars);
for(uint32_t i = 0; i < 8; i++) {
ASSERT_EQ(chars_large_neg_num[i], chars[i]);
}
}
TEST(ArtTest, test_search_int64) {
art_tree t;
art_tree_init(&t);
art_document doc = get_document(1);
const int CHAR_LEN = 8;
unsigned char chars[CHAR_LEN];
const uint64_t lmax = std::numeric_limits<std::int32_t>::max();
for(uint64_t i = lmax; i < lmax+100; i++) {
encode_int64(i, chars);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)chars, CHAR_LEN, &doc));
}
std::vector<const art_leaf*> results;
int res = art_int64_search(&t, lmax, EQUALS, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(1, results.size());
results.clear();
res = art_int64_search(&t, lmax, GREATER_THAN_EQUALS, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(100, results.size());
results.clear();
res = art_int64_search(&t, lmax, GREATER_THAN, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(99, results.size());
results.clear();
res = art_int64_search(&t, lmax+50, GREATER_THAN, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(49, results.size());
results.clear();
res = art_int64_search(&t, lmax+50, LESS_THAN, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(50, results.size());
results.clear();
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_search_negative_int64) {
art_tree t;
art_tree_init(&t);
art_document doc = get_document(1);
const int CHAR_LEN = 8;
unsigned char chars[CHAR_LEN];
const int64_t lmax = -1 * std::numeric_limits<std::int32_t>::max();
for(int64_t i = lmax-100; i < lmax; i++) {
encode_int64(i, chars);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)chars, CHAR_LEN, &doc));
}
std::vector<const art_leaf*> results;
int res = art_int64_search(&t, lmax-1, EQUALS, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(1, results.size());
results.clear();
res = art_int64_search(&t, lmax-1, LESS_THAN_EQUALS, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(100, results.size());
results.clear();
res = art_int64_search(&t, lmax-50, LESS_THAN, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(50, results.size());
results.clear();
res = art_int64_search(&t, lmax-50, GREATER_THAN, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(49, results.size());
results.clear();
res = art_int64_search(&t, lmax-50, GREATER_THAN_EQUALS, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(50, results.size());
results.clear();
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_search_negative_int64_large) {
art_tree t;
art_tree_init(&t);
art_document doc = get_document(1);
const int CHAR_LEN = 8;
unsigned char chars[CHAR_LEN];
encode_int64(-2, chars);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char *) chars, CHAR_LEN, &doc));
std::vector<const art_leaf *> results;
int res = art_int64_search(&t, 1577836800, GREATER_THAN, results);
//ASSERT_TRUE(res == 0);
//ASSERT_EQ(0, results.size());
//results.clear();
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_int32_array) {
art_tree t;
art_tree_init(&t);
const int CHAR_LEN = 8;
unsigned char chars[CHAR_LEN];
std::vector<const art_leaf *> results;
std::vector<std::vector<uint32_t>> values = {{2014, 2015, 2016},
{2015, 2016},
{2016},
{1981, 1985},
{1999, 2000, 2001, 2002}};
for (uint32_t i = 0; i < values.size(); i++) {
for (size_t j = 0; j < values[i].size(); j++) {
encode_int32(values[i][j], chars);
art_document doc = get_document(i);
art_insert(&t, (unsigned char *) chars, CHAR_LEN, &doc);
}
}
int res = art_int32_search(&t, 2002, GREATER_THAN, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(3, results.size());
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_encode_float_positive) {
art_tree t;
art_tree_init(&t);
float floats[6] = {0.0, 0.1044, 1.004, 1.99, 10.5678, 100.33};
const int CHAR_LEN = 8;
for(size_t i = 0; i < 6; i++) {
unsigned char chars0[CHAR_LEN];
encode_float(floats[i], chars0);
art_document doc = get_document(i);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)chars0, CHAR_LEN, &doc));
}
std::vector<const art_leaf*> results;
int res = art_float_search(&t, 0.0, EQUALS, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(1, results.size());
results.clear();
res = art_float_search(&t, 0.0, GREATER_THAN, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(5, results.size());
results.clear();
res = art_float_search(&t, 10.5678, LESS_THAN, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(4, results.size());
results.clear();
res = art_float_search(&t, 10.5678, LESS_THAN_EQUALS, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(5, results.size());
results.clear();
res = art_float_search(&t, 10.5678, GREATER_THAN, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(1, results.size());
results.clear();
res = art_float_search(&t, 10.4, GREATER_THAN, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(2, results.size());
results.clear();
res = art_float_search(&t, 10.5678, GREATER_THAN_EQUALS, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(2, results.size());
results.clear();
res = art_float_search(&t, 10, GREATER_THAN_EQUALS, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(2, results.size());
results.clear();
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
TEST(ArtTest, test_encode_float_positive_negative) {
art_tree t;
art_tree_init(&t);
float floats[6] = {-24.1033, -2.561, 0.0, 1.99, 10.5678, 100.33};
const int CHAR_LEN = 8;
for(size_t i = 0; i < 6; i++) {
unsigned char chars0[CHAR_LEN];
encode_float(floats[i], chars0);
art_document doc = get_document(i);
ASSERT_TRUE(NULL == art_insert(&t, (unsigned char*)chars0, CHAR_LEN, &doc));
}
std::vector<const art_leaf*> results;
int res = art_float_search(&t, -24.1033, EQUALS, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(1, results.size());
results.clear();
res = art_float_search(&t, 0.0, LESS_THAN, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(2, results.size());
results.clear();
res = art_float_search(&t, 0.0, GREATER_THAN, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(3, results.size());
results.clear();
res = art_float_search(&t, -2.561, LESS_THAN_EQUALS, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(2, results.size());
results.clear();
res = art_float_search(&t, -2.561, GREATER_THAN, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(4, results.size());
results.clear();
res = art_float_search(&t, -24.1033, GREATER_THAN_EQUALS, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(6, results.size());
results.clear();
res = art_float_search(&t, -24, GREATER_THAN_EQUALS, results);
ASSERT_TRUE(res == 0);
ASSERT_EQ(5, results.size());
results.clear();
res = art_tree_destroy(&t);
ASSERT_TRUE(res == 0);
}
| 59,669
|
C++
|
.cpp
| 1,401
| 35.764454
| 178
| 0.591155
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,732
|
collection_filtering_test.cpp
|
typesense_typesense/test/collection_filtering_test.cpp
|
#include <gtest/gtest.h>
#include <string>
#include <vector>
#include <fstream>
#include <algorithm>
#include <collection_manager.h>
#include "collection.h"
class CollectionFilteringTest : public ::testing::Test {
protected:
std::string state_dir_path = "/tmp/typesense_test/collection_filtering";
Store *store;
CollectionManager & collectionManager = CollectionManager::get_instance();
std::atomic<bool> quit = false;
std::vector<std::string> query_fields;
std::vector<sort_by> sort_fields;
void setupCollection() {
LOG(INFO) << "Truncating and creating: " << state_dir_path;
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
store = new Store(state_dir_path);
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
}
virtual void SetUp() {
setupCollection();
}
virtual void TearDown() {
collectionManager.dispose();
delete store;
}
};
TEST_F(CollectionFilteringTest, FilterOnTextFields) {
Collection *coll_array_fields;
std::ifstream infile(std::string(ROOT_DIR)+"test/numeric_array_documents.jsonl");
std::vector<field> fields = {
field("name", field_types::STRING, false),
field("age", field_types::INT32, false),
field("years", field_types::INT32_ARRAY, false),
field("tags", field_types::STRING_ARRAY, true)
};
std::vector<sort_by> sort_fields = { sort_by("age", "DESC") };
coll_array_fields = collectionManager.get_collection("coll_array_fields").get();
if(coll_array_fields == nullptr) {
coll_array_fields = collectionManager.create_collection("coll_array_fields", 4, fields, "age").get();
}
std::string json_line;
while (std::getline(infile, json_line)) {
coll_array_fields->add(json_line);
}
infile.close();
query_fields = {"name"};
std::vector<std::string> facets;
nlohmann::json results = coll_array_fields->search("Jeremy", query_fields, "tags: gold", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(3, results["hits"].size());
std::vector<std::string> ids = {"4", "0", "2"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
results = coll_array_fields->search("Jeremy", query_fields, "tags : fine PLATINUM", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
results = coll_array_fields->search("Jeremy", query_fields, "tags : foobarbaz", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
// using just ":", filtering should return documents that contain ALL tokens in the filter expression
results = coll_array_fields->search("Jeremy", query_fields, "tags : PLATINUM", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
// no documents contain "white"
results = coll_array_fields->search("Jeremy", query_fields, "tags : WHITE", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
// no documents contain both "white" and "platinum", so
results = coll_array_fields->search("Jeremy", query_fields, "tags : WHITE PLATINUM", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
// with exact match operator (:=) partial matches are not allowed
results = coll_array_fields->search("Jeremy", query_fields, "tags:= PLATINUM", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
results = coll_array_fields->search("Jeremy", query_fields, "tags : bronze", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(2, results["hits"].size());
ids = {"4", "2"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// search with a list of tags, also testing extra padding of space
results = coll_array_fields->search("Jeremy", query_fields, "tags: [bronze, silver]", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(4, results["hits"].size());
ids = {"3", "4", "0", "2"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// need to be exact matches
results = coll_array_fields->search("Jeremy", query_fields, "tags: bronze", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(2, results["hits"].size());
// when comparators are used, they should be ignored
results = coll_array_fields->search("Jeremy", query_fields, "tags:<bronze", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(2, results["hits"].size());
results = coll_array_fields->search("Jeremy", query_fields, "tags:<=BRONZE", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(2, results["hits"].size());
results = coll_array_fields->search("Jeremy", query_fields, "tags:>BRONZE", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(2, results["hits"].size());
// bad filter value (empty)
auto res_op = coll_array_fields->search("Jeremy", query_fields, "tags:=", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false});
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Error with filter field `tags`: Filter value cannot be empty.", res_op.error());
collectionManager.drop_collection("coll_array_fields");
auto schema_json =
R"({
"name": "title",
"fields": [
{"name": "title", "type": "string"},
{"name": "titles", "type": "string[]"}
]
})"_json;
std::vector<nlohmann::json> documents = {
R"({
"title": "foo bar baz",
"titles": []
})"_json,
R"({
"title": "foo bar baz",
"titles": ["foo bar baz"]
})"_json,
R"({
"title": "foo bar baz",
"titles": ["bar foo baz", "foo bar baz"]
})"_json,
R"({
"title": "bar foo baz",
"titles": ["bar foo baz"]
})"_json,
};
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
ASSERT_TRUE(add_op.ok());
}
std::map<std::string, std::string> req_params = {
{"collection", "title"},
{"q", "foo"},
{"query_by", "title"},
{"filter_by", "title:= foo bar baz"}
};
nlohmann::json embedded_params;
std::string json_res;
auto now_ts = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
auto search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
auto res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(3, res_obj["found"].get<size_t>());
ASSERT_EQ(3, res_obj["hits"].size());
ASSERT_EQ("2", res_obj["hits"][0]["document"].at("id"));
ASSERT_EQ("1", res_obj["hits"][1]["document"].at("id"));
ASSERT_EQ("0", res_obj["hits"][2]["document"].at("id"));
req_params = {
{"collection", "title"},
{"q", "foo"},
{"query_by", "titles"},
{"filter_by", "titles:= foo bar baz"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ("2", res_obj["hits"][0]["document"].at("id"));
ASSERT_EQ("1", res_obj["hits"][1]["document"].at("id"));
}
TEST_F(CollectionFilteringTest, FacetFieldStringFiltering) {
Collection *coll_str;
std::ifstream infile(std::string(ROOT_DIR)+"test/multi_field_documents.jsonl");
std::vector<field> fields = {
field("title", field_types::STRING, false),
field("starring", field_types::STRING, true),
field("cast", field_types::STRING_ARRAY, false),
field("points", field_types::INT32, false)
};
std::vector<sort_by> sort_fields = { sort_by("points", "DESC") };
coll_str = collectionManager.get_collection("coll_str").get();
if(coll_str == nullptr) {
coll_str = collectionManager.create_collection("coll_str", 1, fields, "points").get();
}
std::string json_line;
while (std::getline(infile, json_line)) {
nlohmann::json document = nlohmann::json::parse(json_line);
coll_str->add(document.dump());
}
infile.close();
query_fields = {"title"};
std::vector<std::string> facets;
// exact filter on string field must fail when single token is used
facets.clear();
facets.emplace_back("starring");
auto results = coll_str->search("*", query_fields, "starring:= samuel", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
ASSERT_EQ(0, results["found"].get<size_t>());
// multiple tokens but with a typo on one of them
results = coll_str->search("*", query_fields, "starring:= ssamuel l. Jackson", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
ASSERT_EQ(0, results["found"].get<size_t>());
// same should succeed when verbatim filter is made
results = coll_str->search("*", query_fields, "starring:= samuel l. Jackson", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ(2, results["found"].get<size_t>());
// with backticks
results = coll_str->search("*", query_fields, "starring:= `samuel l. Jackson`", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ(2, results["found"].get<size_t>());
// contains filter with a single token should work as well
results = coll_str->search("*", query_fields, "starring: jackson", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ(2, results["found"].get<size_t>());
results = coll_str->search("*", query_fields, "starring: samuel", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ(2, results["found"].get<size_t>());
// contains when only 1 token so should not match
results = coll_str->search("*", query_fields, "starring: samuel johnson", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
collectionManager.drop_collection("coll_str");
}
TEST_F(CollectionFilteringTest, FacetFieldStringArrayFiltering) {
Collection *coll_array_fields;
std::ifstream infile(std::string(ROOT_DIR)+"test/numeric_array_documents.jsonl");
std::vector<field> fields = {field("name", field_types::STRING, false),
field("name_facet", field_types::STRING, true),
field("age", field_types::INT32, true),
field("years", field_types::INT32_ARRAY, true),
field("rating", field_types::FLOAT, true),
field("timestamps", field_types::INT64_ARRAY, true),
field("tags", field_types::STRING_ARRAY, true)};
std::vector<sort_by> sort_fields = { sort_by("age", "DESC") };
coll_array_fields = collectionManager.get_collection("coll_array_fields").get();
if(coll_array_fields == nullptr) {
coll_array_fields = collectionManager.create_collection("coll_array_fields", 1, fields, "age").get();
}
std::string json_line;
while (std::getline(infile, json_line)) {
nlohmann::json document = nlohmann::json::parse(json_line);
document["name_facet"] = document["name"];
const std::string & patched_json_line = document.dump();
coll_array_fields->add(patched_json_line);
}
infile.close();
query_fields = {"name"};
std::vector<std::string> facets = {"tags"};
// facet with filter on string array field must fail when exact token is used
facets.clear();
facets.push_back("tags");
auto results = coll_array_fields->search("Jeremy", query_fields, "tags:= PLATINUM", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
ASSERT_EQ(0, results["found"].get<size_t>());
results = coll_array_fields->search("Jeremy", query_fields, "tags:= FINE", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
results = coll_array_fields->search("Jeremy", query_fields, "tags:= FFINE PLATINUM", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
// partial token filter should be made without "=" operator
results = coll_array_fields->search("Jeremy", query_fields, "tags: PLATINUM", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(1, results["found"].get<size_t>());
results = coll_array_fields->search("Jeremy", query_fields, "tags: FINE", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(1, results["found"].get<size_t>());
// to make tokens match facet value exactly, use "=" operator
results = coll_array_fields->search("Jeremy", query_fields, "tags:= FINE PLATINUM", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(1, results["found"].get<size_t>());
// allow exact filter on non-faceted field
results = coll_array_fields->search("Jeremy", query_fields, "name:= Jeremy Howard", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(5, results["hits"].size());
ASSERT_EQ(5, results["found"].get<size_t>());
// multi match exact query (OR condition)
results = coll_array_fields->search("Jeremy", query_fields, "tags:= [Gold, bronze]", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(3, results["hits"].size());
ASSERT_EQ(3, results["found"].get<size_t>());
results = coll_array_fields->search("Jeremy", query_fields, "tags:= [Gold, bronze, fine PLATINUM]", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_EQ(4, results["found"].get<size_t>());
// single array multi match
results = coll_array_fields->search("Jeremy", query_fields, "tags:= [fine PLATINUM]", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(1, results["found"].get<size_t>());
collectionManager.drop_collection("coll_array_fields");
}
TEST_F(CollectionFilteringTest, FilterOnTextFieldWithColon) {
Collection *coll1;
std::vector<field> fields = {field("url", field_types::STRING, true),
field("points", field_types::INT32, false)};
std::vector<sort_by> sort_fields = { sort_by("points", "DESC") };
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 4, fields, "points").get();
}
nlohmann::json doc1;
doc1["id"] = "1";
doc1["url"] = "https://example.com/1";
doc1["points"] = 1;
coll1->add(doc1.dump());
query_fields = {"url"};
std::vector<std::string> facets;
auto res = coll1->search("*", query_fields, "url:= https://example.com/1", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_STREQ("1", res["hits"][0]["document"]["id"].get<std::string>().c_str());
res = coll1->search("*", query_fields, "url: https://example.com/1", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, res["hits"].size());
ASSERT_STREQ("1", res["hits"][0]["document"]["id"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionFilteringTest, HandleBadlyFormedFilterQuery) {
// should not crash when filter query is malformed!
Collection *coll_array_fields;
std::ifstream infile(std::string(ROOT_DIR)+"test/numeric_array_documents.jsonl");
std::vector<field> fields = {field("name", field_types::STRING, false), field("age", field_types::INT32, false),
field("years", field_types::INT32_ARRAY, false),
field("timestamps", field_types::INT64_ARRAY, false),
field("tags", field_types::STRING_ARRAY, true)};
std::vector<sort_by> sort_fields = { sort_by("age", "DESC") };
coll_array_fields = collectionManager.get_collection("coll_array_fields").get();
if(coll_array_fields == nullptr) {
coll_array_fields = collectionManager.create_collection("coll_array_fields", 4, fields, "age").get();
}
std::string json_line;
while (std::getline(infile, json_line)) {
coll_array_fields->add(json_line);
}
infile.close();
query_fields = {"name"};
std::vector<std::string> facets;
// when filter field does not exist in the schema
nlohmann::json results = coll_array_fields->search("Jeremy", query_fields, "tagzz: gold", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
// compound filter expression containing an unknown field
results = coll_array_fields->search("Jeremy", query_fields,
"(age:>0 || timestamps:> 0) || tagzz: gold", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
// unbalanced paranthesis
results = coll_array_fields->search("Jeremy", query_fields,
"(age:>0 || timestamps:> 0) || ", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
// searching using a string for a numeric field
results = coll_array_fields->search("Jeremy", query_fields, "age: abcdef", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
// searching using a string for a numeric array field
results = coll_array_fields->search("Jeremy", query_fields, "timestamps: abcdef", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
// malformed k:v syntax
results = coll_array_fields->search("Jeremy", query_fields, "timestamps abcdef", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
// just spaces - must be treated as empty filter
results = coll_array_fields->search("Jeremy", query_fields, " ", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(5, results["hits"].size());
// wrapping number with quotes
results = coll_array_fields->search("Jeremy", query_fields, "age: '21'", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
// empty value for a numerical filter field
auto res_op = coll_array_fields->search("Jeremy", query_fields, "age:", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false});
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Error with filter field `age`: Numerical field has an invalid comparator.", res_op.error());
// empty value for string filter field
res_op = coll_array_fields->search("Jeremy", query_fields, "tags:", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false});
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Error with filter field `tags`: Filter value cannot be empty.", res_op.error());
res_op = coll_array_fields->search("Jeremy", query_fields, "tags:= ", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false});
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Error with filter field `tags`: Filter value cannot be empty.", res_op.error());
collectionManager.drop_collection("coll_array_fields");
}
TEST_F(CollectionFilteringTest, FilterAndQueryFieldRestrictions) {
Collection *coll_mul_fields;
std::ifstream infile(std::string(ROOT_DIR)+"test/multi_field_documents.jsonl");
std::vector<field> fields = {
field("title", field_types::STRING, false),
field("starring", field_types::STRING, false),
field("cast", field_types::STRING_ARRAY, true),
field("points", field_types::INT32, false)
};
coll_mul_fields = collectionManager.get_collection("coll_mul_fields").get();
if(coll_mul_fields == nullptr) {
coll_mul_fields = collectionManager.create_collection("coll_mul_fields", 4, fields, "points").get();
}
std::string json_line;
while (std::getline(infile, json_line)) {
coll_mul_fields->add(json_line);
}
infile.close();
std::vector<std::string> facets;
// query shall be allowed on faceted text fields as well
query_fields = {"cast"};
Option<nlohmann::json> result_op =
coll_mul_fields->search("anton", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false});
ASSERT_TRUE(result_op.ok());
nlohmann::json results = result_op.get();
ASSERT_EQ(1, results["hits"].size());
std::string solo_id = results["hits"].at(0)["document"]["id"];
ASSERT_STREQ("14", solo_id.c_str());
// filtering on string field should be possible
query_fields = {"title"};
result_op = coll_mul_fields->search("captain", query_fields, "starring: Samuel L. Jackson", facets, sort_fields, {0}, 10, 1,
FREQUENCY, {false});
ASSERT_EQ(true, result_op.ok());
results = result_op.get();
ASSERT_EQ(1, results["hits"].size());
solo_id = results["hits"].at(0)["document"]["id"];
ASSERT_STREQ("6", solo_id.c_str());
// filtering on facet field should be possible (supports partial word search but without typo tolerance)
query_fields = {"title"};
result_op = coll_mul_fields->search("*", query_fields, "cast: chris", facets, sort_fields, {0}, 10, 1,
FREQUENCY, {false});
ASSERT_EQ(true, result_op.ok());
results = result_op.get();
ASSERT_EQ(3, results["hits"].size());
// bad query string
result_op = coll_mul_fields->search("captain", query_fields, "BLAH", facets, sort_fields, {0}, 10, 1,
FREQUENCY, {false});
ASSERT_EQ(false, result_op.ok());
ASSERT_STREQ("Could not parse the filter query.", result_op.error().c_str());
// missing field
result_op = coll_mul_fields->search("captain", query_fields, "age: 100", facets, sort_fields, {0}, 10, 1,
FREQUENCY, {false});
ASSERT_EQ(false, result_op.ok());
ASSERT_STREQ("Could not find a filter field named `age` in the schema.", result_op.error().c_str());
// bad filter value type
result_op = coll_mul_fields->search("captain", query_fields, "points: \"100\"", facets, sort_fields, {0}, 10, 1,
FREQUENCY, {false});
ASSERT_EQ(false, result_op.ok());
ASSERT_STREQ("Error with filter field `points`: Numerical field has an invalid comparator.", result_op.error().c_str());
result_op = coll_mul_fields->search("captain", query_fields, "points:<= foo", facets, sort_fields, {0}, 10, 1,
FREQUENCY, {false});
ASSERT_FALSE(result_op.ok());
ASSERT_EQ("Error with filter field `points`: Not an int32.", result_op.error());
// bad filter value type - equaling float on an integer field
result_op = coll_mul_fields->search("captain", query_fields, "points: 100.34", facets, sort_fields, {0}, 10, 1,
FREQUENCY, {false});
ASSERT_EQ(false, result_op.ok());
ASSERT_STREQ("Error with filter field `points`: Not an int32.", result_op.error().c_str());
// bad filter value type - less than float on an integer field
result_op = coll_mul_fields->search("captain", query_fields, "points: <100.0", facets, sort_fields, {0}, 10, 1,
FREQUENCY, {false});
ASSERT_EQ(false, result_op.ok());
ASSERT_STREQ("Error with filter field `points`: Not an int32.", result_op.error().c_str());
// when an int32 field is queried with a 64-bit number
result_op = coll_mul_fields->search("captain", query_fields, "points: <2230070399", facets, sort_fields, {0}, 10, 1,
FREQUENCY, {false});
ASSERT_EQ(false, result_op.ok());
ASSERT_EQ("Error with filter field `points`: `2230070399` exceeds the range of an int32.", result_op.error());
result_op = coll_mul_fields->search("captain", query_fields, "points:<= 9223372036854775808", facets, sort_fields, {0}, 10, 1,
FREQUENCY, {false});
ASSERT_FALSE(result_op.ok());
ASSERT_EQ("Error with filter field `points`: `9223372036854775808` exceeds the range of an int32.", result_op.error());
// using a string filter value against an integer field
result_op = coll_mul_fields->search("captain", query_fields, "points: <sdsdfsdf", facets, sort_fields, {0}, 10, 1,
FREQUENCY, {false});
ASSERT_EQ(false, result_op.ok());
// large negative number
result_op = coll_mul_fields->search("captain", query_fields, "points: >-3230070399", facets, sort_fields, {0}, 10, 1,
FREQUENCY, {false});
ASSERT_EQ(false, result_op.ok());
// but should allow small negative number
result_op = coll_mul_fields->search("captain", query_fields, "points: >-3230", facets, sort_fields, {0}, 10, 1,
FREQUENCY, {false});
ASSERT_EQ(true, result_op.ok());
collectionManager.drop_collection("coll_mul_fields");
}
TEST_F(CollectionFilteringTest, FilterOnNumericFields) {
Collection *coll_array_fields;
std::ifstream infile(std::string(ROOT_DIR)+"test/numeric_array_documents.jsonl");
std::vector<field> fields = {
field("name", field_types::STRING, false),
field("rating", field_types::FLOAT, false),
field("age", field_types::INT32, false),
field("years", field_types::INT32_ARRAY, false),
field("timestamps", field_types::INT64_ARRAY, false),
field("tags", field_types::STRING_ARRAY, true)
};
std::vector<sort_by> sort_fields = { sort_by("age", "DESC") };
coll_array_fields = collectionManager.get_collection("coll_array_fields").get();
if(coll_array_fields == nullptr) {
// ensure that default_sorting_field is a non-array numerical field
auto coll_op = collectionManager.create_collection("coll_array_fields", 4, fields, "years");
ASSERT_EQ(false, coll_op.ok());
ASSERT_STREQ("Default sorting field `years` is not a sortable type.", coll_op.error().c_str());
// let's try again properly
coll_op = collectionManager.create_collection("coll_array_fields", 4, fields, "age");
coll_array_fields = coll_op.get();
}
std::string json_line;
while (std::getline(infile, json_line)) {
coll_array_fields->add(json_line);
}
infile.close();
// Plain search with no filters - results should be sorted by rank fields
query_fields = {"name"};
std::vector<std::string> facets;
nlohmann::json results = coll_array_fields->search("Jeremy", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(5, results["hits"].size());
std::vector<std::string> ids = {"3", "1", "4", "0", "2"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// Searching on an int32 field
results = coll_array_fields->search("Jeremy", query_fields, "age:>24", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(3, results["hits"].size());
ids = {"3", "1", "4"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
results = coll_array_fields->search("Jeremy", query_fields, "age:>=24", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(4, results["hits"].size());
results = coll_array_fields->search("Jeremy", query_fields, "age:24", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
// alternative `:=` syntax
results = coll_array_fields->search("Jeremy", query_fields, "age:=24", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
results = coll_array_fields->search("Jeremy", query_fields, "age:= 24", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
// Searching a number against an int32 array field
results = coll_array_fields->search("Jeremy", query_fields, "years:>2002", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(3, results["hits"].size());
ids = {"1", "0", "2"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
results = coll_array_fields->search("Jeremy", query_fields, "years:<1989", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ids = {"3"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// not equals
results = coll_array_fields->search("Jeremy", query_fields, "age:!= 24", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(4, results["hits"].size());
ids = {"3", "1", "4", "2"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
results = coll_array_fields->search("Jeremy", query_fields, "age:!= 0", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(5, results["hits"].size());
ids = {"3", "1", "4", "0", "2"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_EQ(id, result_id);
}
// multiple filters
results = coll_array_fields->search("Jeremy", query_fields, "years:<2005 && years:>1987", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ids = {"4"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// multiple search values (works like SQL's IN operator) against a single int field
results = coll_array_fields->search("Jeremy", query_fields, "age:[21, 24, 63]", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(3, results["hits"].size());
ids = {"3", "0", "2"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// alternative `:=` syntax
results = coll_array_fields->search("Jeremy", query_fields, "age:= [21, 24, 63]", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(3, results["hits"].size());
// individual comparators can still be applied.
results = coll_array_fields->search("Jeremy", query_fields, "age: [!=21, >30]", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(4, results["hits"].size());
ids = {"3", "1", "4", "0"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_EQ(id, result_id);
}
// negate multiple search values (works like SQL's NOT IN) against a single int field
results = coll_array_fields->search("Jeremy", query_fields, "age:!= [21, 24, 63]", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(2, results["hits"].size());
ids = {"1", "4"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_EQ(id, result_id);
}
// individual comparators can still be applied.
results = coll_array_fields->search("Jeremy", query_fields, "age: != [<30, >60]", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(2, results["hits"].size());
ids = {"1", "4"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_EQ(id, result_id);
}
// multiple search values against an int32 array field - also use extra padding between symbols
results = coll_array_fields->search("Jeremy", query_fields, "years : [ 2015, 1985 , 1999]", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(4, results["hits"].size());
ids = {"3", "1", "4", "0"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// searching on an int64 array field - also ensure that padded space causes no issues
results = coll_array_fields->search("Jeremy", query_fields, "timestamps : > 475205222", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(4, results["hits"].size());
ids = {"1", "4", "0", "2"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// range based filter
results = coll_array_fields->search("Jeremy", query_fields, "age: 21..32", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(3, results["hits"].size());
ids = {"4", "0", "2"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
results = coll_array_fields->search("Jeremy", query_fields, "age: 0 .. 100", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(5, results["hits"].size());
results = coll_array_fields->search("Jeremy", query_fields, "age: [21..24, 40..65]", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(4, results["hits"].size());
ids = {"3", "1", "0", "2"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
results = coll_array_fields->search("Jeremy", query_fields, "rating: 7.812 .. 9.999", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(2, results["hits"].size());
ids = {"1", "2"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
results = coll_array_fields->search("Jeremy", query_fields, "rating: [7.812 .. 9.999, 1.05 .. 1.09]", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(3, results["hits"].size());
// when filters don't match any record, no results should be returned
results = coll_array_fields->search("Jeremy", query_fields, "timestamps:>1591091288061", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
collectionManager.drop_collection("coll_array_fields");
}
TEST_F(CollectionFilteringTest, FilterOnFloatFields) {
Collection *coll_array_fields;
std::ifstream infile(std::string(ROOT_DIR)+"test/numeric_array_documents.jsonl");
std::vector<field> fields = {
field("name", field_types::STRING, false),
field("age", field_types::INT32, false),
field("top_3", field_types::FLOAT_ARRAY, false),
field("rating", field_types::FLOAT, false)
};
std::vector<sort_by> sort_fields_desc = { sort_by("rating", "DESC") };
std::vector<sort_by> sort_fields_asc = { sort_by("rating", "ASC") };
coll_array_fields = collectionManager.get_collection("coll_array_fields").get();
if(coll_array_fields == nullptr) {
coll_array_fields = collectionManager.create_collection("coll_array_fields", 4, fields, "age").get();
}
std::string json_line;
while (std::getline(infile, json_line)) {
auto add_op = coll_array_fields->add(json_line);
ASSERT_TRUE(add_op.ok());
}
infile.close();
// Plain search with no filters - results should be sorted by rating field DESC
query_fields = {"name"};
std::vector<std::string> facets;
nlohmann::json results = coll_array_fields->search("Jeremy", query_fields, "", facets, sort_fields_desc, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(5, results["hits"].size());
std::vector<std::string> ids = {"1", "2", "4", "0", "3"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// Plain search with no filters - results should be sorted by rating field ASC
results = coll_array_fields->search("Jeremy", query_fields, "", facets, sort_fields_asc, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(5, results["hits"].size());
ids = {"3", "0", "4", "2", "1"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str()); //?
}
results = coll_array_fields->search("Jeremy", query_fields, "rating:!=0", facets, sort_fields_asc, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(4, results["hits"].size());
ids = {"0", "4", "2", "1"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str()); //?
}
// Searching on a float field, sorted desc by rating
results = coll_array_fields->search("Jeremy", query_fields, "rating:>0.0", facets, sort_fields_desc, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(4, results["hits"].size());
ids = {"1", "2", "4", "0"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// Searching a float against an float array field
results = coll_array_fields->search("Jeremy", query_fields, "top_3:>7.8", facets, sort_fields_desc, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(2, results["hits"].size());
ids = {"1", "2"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// multiple filters
results = coll_array_fields->search("Jeremy", query_fields, "top_3:>7.8 && rating:>7.9", facets, sort_fields_desc, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ids = {"1"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// multiple search values (works like SQL's IN operator) against a single float field
results = coll_array_fields->search("Jeremy", query_fields, "rating:[1.09, 7.812]", facets, sort_fields_desc, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(2, results["hits"].size());
ids = {"2", "0"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// negate multiple search values (works like SQL's NOT IN operator) against a single float field
results = coll_array_fields->search("Jeremy", query_fields, "rating:!= [1.09, 7.812]", facets, sort_fields_desc, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(3, results["hits"].size());
ids = {"1", "4", "3"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_EQ(id, result_id);
}
// individual comparators can still be applied.
results = coll_array_fields->search("Jeremy", query_fields, "rating: != [<5.4, >9]", facets, sort_fields_desc, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(2, results["hits"].size());
ids = {"2", "4"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_EQ(id, result_id);
}
results = coll_array_fields->search("Jeremy", query_fields, "rating: [!= 1]", facets, sort_fields_desc, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(5, results["hits"].size());
ids = {"1", "2", "4", "0", "3"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_EQ(id, result_id);
}
// multiple search values against a float array field - also use extra padding between symbols
results = coll_array_fields->search("Jeremy", query_fields, "top_3 : [ 5.431, 0.001 , 7.812, 11.992]", facets, sort_fields_desc, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(3, results["hits"].size());
ids = {"2", "4", "0"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// when filters don't match any record, no results should be returned
auto results_op = coll_array_fields->search("Jeremy", query_fields, "rating:<-2.78", facets, sort_fields_desc, {0}, 10, 1, FREQUENCY, {false});
ASSERT_TRUE(results_op.ok());
results = results_op.get();
ASSERT_EQ(0, results["hits"].size());
// rank tokens by default sorting field
results_op = coll_array_fields->search("j", query_fields, "", facets, sort_fields_desc, {0}, 10, 1, MAX_SCORE, {true});
ASSERT_TRUE(results_op.ok());
results = results_op.get();
ASSERT_EQ(5, results["hits"].size());
ids = {"1", "2", "4", "0", "3"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
collectionManager.drop_collection("coll_array_fields");
}
TEST_F(CollectionFilteringTest, FilterOnNegativeNumericalFields) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("int32_field", field_types::INT32, false),
field("int64_field", field_types::INT64, false),
field("float_field", field_types::FLOAT, false)};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "int32_field").get();
}
std::vector<std::vector<std::string>> records = {
{"Title 1", "-100", "5000000", "-10.45124"},
{"Title 2", "100", "-1000000", "0.45124"},
{"Title 3", "-200", "3000000", "-0.45124"},
{"Title 4", "150", "10000000", "1.45124"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["int32_field"] = std::stoi(records[i][1]);
doc["int64_field"] = std::stoll(records[i][2]);
doc["float_field"] = std::stof(records[i][3]);
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("*", {}, "int32_field:<0", {}, {}, {0}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("2", results["hits"][1]["document"]["id"].get<std::string>());
results = coll1->search("*", {}, "int64_field:<0", {}, {}, {0}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
results = coll1->search("*", {}, "float_field:<0", {}, {sort_by("float_field", "desc")}, {0}, 10, 1, FREQUENCY,
{true}, 10).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("0", results["hits"][1]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionFilteringTest, ComparatorsOnMultiValuedNumericalField) {
Collection *coll_array_fields;
std::ifstream infile(std::string(ROOT_DIR)+"test/numeric_array_documents.jsonl");
std::vector<field> fields = {
field("name", field_types::STRING, false),
field("age", field_types::INT32, false),
field("top_3", field_types::FLOAT_ARRAY, false),
field("rating", field_types::FLOAT, false)
};
std::vector<sort_by> sort_fields_desc = {sort_by("rating", "DESC")};
coll_array_fields = collectionManager.get_collection("coll_array_fields").get();
if (coll_array_fields == nullptr) {
coll_array_fields = collectionManager.create_collection("coll_array_fields", 4, fields, "age").get();
}
std::string json_line;
while (std::getline(infile, json_line)) {
auto add_op = coll_array_fields->add(json_line);
ASSERT_TRUE(add_op.ok());
}
infile.close();
query_fields = {"name"};
std::vector<std::string> facets;
nlohmann::json results = coll_array_fields->search("Jeremy", query_fields, "age: [24, >32]",
facets, sort_fields_desc, {0}, 10, 1,FREQUENCY, {false}).get();
ASSERT_EQ(3, results["hits"].size());
std::vector<std::string> ids = {"1", "0", "3"};
for (size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// with <= and >=
results = coll_array_fields->search("Jeremy", query_fields, "age: [<=24, >=44]",
facets, sort_fields_desc, {0}, 10, 1,FREQUENCY, {false}).get();
ASSERT_EQ(4, results["hits"].size());
ids = {"1", "2", "0", "3"};
for (size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
collectionManager.drop_collection("coll_array_fields");
}
TEST_F(CollectionFilteringTest, FilteringWithPrefixSearch) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"elephant"}, {"emerald"}, {"effective"}, {"esther"}, {"eagle"},
{"empty"}, {"elite"}, {"example"}, {"elated"}, {"end"},
{"ear"}, {"eager"}, {"earmark"}, {"envelop"}, {"excess"},
{"ember"}, {"earth"}, {"envoy"}, {"emerge"}, {"emigrant"},
{"envision"}, {"envy"}, {"envisage"}, {"executive"}, {"end"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
// pick a location close to only the Sacre Coeur
auto res_op = coll1->search("e",
{"title"}, "points: 23",
{}, {}, {0}, 10, 1, FREQUENCY, {true});
auto results = res_op.get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("23", results["hits"][0]["document"]["id"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionFilteringTest, NumericalFilteringWithAnd) {
Collection *coll1;
std::vector<field> fields = {field("company_name", field_types::STRING, false),
field("num_employees", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "num_employees").get();
}
std::vector<std::vector<std::string>> records = {
{"123", "Company 1", "50"},
{"125", "Company 2", "150"},
{"127", "Company 3", "250"},
{"129", "Stark Industries 4", "500"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = records[i][0];
doc["company_name"] = records[i][1];
doc["num_employees"] = std::stoi(records[i][2]);
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
std::vector<sort_by> sort_fields = { sort_by("num_employees", "ASC") };
auto results = coll1->search("*",
{}, "num_employees:>=100 && num_employees:<=300",
{}, sort_fields, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
ASSERT_STREQ("125", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("127", results["hits"][1]["document"]["id"].get<std::string>().c_str());
// when filter number is well below all values
results = coll1->search("*",
{}, "num_employees:>=100 && num_employees:<=10",
{}, sort_fields, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(0, results["found"].get<size_t>());
// check boundaries
results = coll1->search("*",
{}, "num_employees:>=150 && num_employees:<=250",
{}, sort_fields, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_STREQ("125", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("127", results["hits"][1]["document"]["id"].get<std::string>().c_str());
results = coll1->search("*",
{}, "num_employees:>150 && num_employees:<250",
{}, sort_fields, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(0, results["found"].get<size_t>());
results = coll1->search("*",
{}, "num_employees:>50 && num_employees:<250",
{}, sort_fields, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_STREQ("125", results["hits"][0]["document"]["id"].get<std::string>().c_str());
// extreme boundaries
results = coll1->search("*",
{}, "num_employees:>50 && num_employees:<=500",
{}, sort_fields, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(3, results["found"].get<size_t>());
ASSERT_STREQ("125", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("127", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("129", results["hits"][2]["document"]["id"].get<std::string>().c_str());
results = coll1->search("*",
{}, "num_employees:>=50 && num_employees:<500",
{}, sort_fields, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(3, results["found"].get<size_t>());
ASSERT_STREQ("123", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("125", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("127", results["hits"][2]["document"]["id"].get<std::string>().c_str());
// no match
results = coll1->search("*",
{}, "num_employees:>3000 && num_employees:<10",
{}, sort_fields, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(0, results["found"].get<size_t>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionFilteringTest, FilteringViaDocumentIds) {
Collection *coll1;
std::vector<field> fields = {field("company_name", field_types::STRING, false),
field("num_employees", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "num_employees").get();
}
std::vector<std::vector<std::string>> records = {
{"123", "Company 1", "50"},
{"125", "Company 2", "150"},
{"127", "Company 3", "250"},
{"129", "Stark Industries 4", "500"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = records[i][0];
doc["company_name"] = records[i][1];
doc["num_employees"] = std::stoi(records[i][2]);
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
std::vector<sort_by> sort_fields = { sort_by("num_employees", "ASC") };
auto results = coll1->search("*",
{}, "id: 123",
{}, sort_fields, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("123", results["hits"][0]["document"]["id"].get<std::string>().c_str());
results = coll1->search("*",
{}, "id: != 123",
{}, sort_fields, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(3, results["found"].get<size_t>());
ASSERT_EQ(3, results["hits"].size());
ASSERT_STREQ("125", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("127", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("129", results["hits"][2]["document"]["id"].get<std::string>().c_str());
// single ID with backtick
results = coll1->search("*",
{}, "id: `123`",
{}, sort_fields, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("123", results["hits"][0]["document"]["id"].get<std::string>().c_str());
// single ID with condition
results = coll1->search("*",
{}, "id: 125 && num_employees: 150",
{}, sort_fields, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("125", results["hits"][0]["document"]["id"].get<std::string>().c_str());
// multiple IDs
results = coll1->search("*",
{}, "id: [123, 125, 127, 129] && num_employees: <300",
{}, sort_fields, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(3, results["found"].get<size_t>());
ASSERT_EQ(3, results["hits"].size());
ASSERT_STREQ("123", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("125", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("127", results["hits"][2]["document"]["id"].get<std::string>().c_str());
// multiple IDs with exact equals operator with IDs not being ordered
results = coll1->search("*",
{}, "id:= [129, 123, 127, 125] && num_employees: <300",
{}, sort_fields, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(3, results["found"].get<size_t>());
ASSERT_EQ(3, results["hits"].size());
ASSERT_STREQ("123", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("125", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("127", results["hits"][2]["document"]["id"].get<std::string>().c_str());
// multiple IDs with exact equals operator and backticks
results = coll1->search("*",
{}, "id:= [`123`, `125`, `127`, `129`] && num_employees: <300",
{}, sort_fields, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(3, results["found"].get<size_t>());
ASSERT_EQ(3, results["hits"].size());
ASSERT_STREQ("123", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("125", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("127", results["hits"][2]["document"]["id"].get<std::string>().c_str());
results = coll1->search("*",
{}, "id:!= [123,125] && num_employees: <300",
{}, sort_fields, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("127", results["hits"][0]["document"]["id"].get<std::string>().c_str());
// empty id list not allowed
auto res_op = coll1->search("*", {}, "id:=", {}, sort_fields, {0}, 10, 1, FREQUENCY, {true});
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Error with filter field `id`: Filter value cannot be empty.", res_op.error());
res_op = coll1->search("*", {}, "id:= ", {}, sort_fields, {0}, 10, 1, FREQUENCY, {true});
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Error with filter field `id`: Filter value cannot be empty.", res_op.error());
res_op = coll1->search("*", {}, "id: ", {}, sort_fields, {0}, 10, 1, FREQUENCY, {true});
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Error with filter field `id`: Filter value cannot be empty.", res_op.error());
res_op = coll1->search("*", {}, "id: ``", {}, sort_fields, {0}, 10, 1, FREQUENCY, {true});
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Error with filter field `id`: Filter value cannot be empty.", res_op.error());
// when no IDs exist
results = coll1->search("*",
{}, "id: [1000] && num_employees: <300",
{}, sort_fields, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(0, results["found"].get<size_t>());
results = coll1->search("*",
{}, "id: 1000",
{}, sort_fields, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(0, results["found"].get<size_t>());
// match all IDs
results = coll1->search("*",
{}, "id: *",
{}, sort_fields, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(4, results["found"].get<size_t>());
results = coll1->search("*",
{}, "id:= [*]",
{}, sort_fields, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(4, results["found"].get<size_t>());
// match no IDs
results = coll1->search("*",
{}, "id: != *",
{}, sort_fields, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(0, results["found"].get<size_t>());
results = coll1->search("*",
{}, "id: != [*]",
{}, sort_fields, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(0, results["found"].get<size_t>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionFilteringTest, NumericalFilteringWithArray) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("prices", field_types::INT32_ARRAY, false),};
coll1 = collectionManager.get_collection("coll1").get();
if (coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "").get();
}
std::vector<std::vector<std::string>> records = {
{"1", "T Shirt 1", "1", "2", "3"},
{"2", "T Shirt 2", "1", "2", "3"},
{"3", "T Shirt 3", "1", "2", "3"},
{"4", "T Shirt 4", "1", "1", "1"},
};
for (size_t i = 0; i < records.size(); i++) {
nlohmann::json doc;
doc["id"] = records[i][0];
doc["title"] = records[i][1];
std::vector<int32_t> prices;
for(size_t j = 2; j <= 4; j++) {
prices.push_back(std::stoi(records[i][j]));
}
doc["prices"] = prices;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
// check equals on a repeating price
auto results = coll1->search("*",
{}, "prices:1",
{}, {}, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(4, results["found"].get<size_t>());
ASSERT_EQ(4, results["hits"].size());
// check ranges
results = coll1->search("*",
{}, "prices:>=1",
{}, {}, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(4, results["found"].get<size_t>());
ASSERT_EQ(4, results["hits"].size());
results = coll1->search("*",
{}, "prices:>=2",
{}, {}, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(3, results["found"].get<size_t>());
ASSERT_EQ(3, results["hits"].size());
results = coll1->search("*",
{}, "prices:<4",
{}, {}, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(4, results["found"].get<size_t>());
ASSERT_EQ(4, results["hits"].size());
results = coll1->search("*",
{}, "prices:<=2",
{}, {}, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(4, results["found"].get<size_t>());
ASSERT_EQ(4, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionFilteringTest, NegationOperatorBasics) {
Collection *coll1;
std::vector<field> fields = {
field("title", field_types::STRING, false),
field("artist", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 2, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"Taylor Swift Karaoke: reputation", "Taylor Swift"},
{"Beat it", "Michael Jackson"},
{"Style", "Taylor Swift"},
{"Thriller", "Michael Joseph Jackson"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["artist"] = records[i][1];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("*", {"artist"}, "artist:!=Michael Jackson", {}, {}, {0}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(3, results["found"].get<size_t>());
ASSERT_STREQ("3", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("2", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("0", results["hits"][2]["document"]["id"].get<std::string>().c_str());
results = coll1->search("*", {"artist"}, "artist:!= Michael Jackson && points: >0", {}, {}, {0}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_STREQ("3", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("2", results["hits"][1]["document"]["id"].get<std::string>().c_str());
// negation operation on multiple values
results = coll1->search("*", {"artist"}, "artist:!= [Michael Jackson, Taylor Swift]", {}, {}, {0}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_STREQ("3", results["hits"][0]["document"]["id"].get<std::string>().c_str());
// when no such value exists: should return all results
results = coll1->search("*", {"artist"}, "artist:!=Foobar", {}, {}, {0}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(4, results["found"].get<size_t>());
results = coll1->search("*", {"artist"}, "artist:! Jackson", {}, {}, {0}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(2, results["found"]);
ASSERT_EQ("2", results["hits"][0]["document"]["id"]);
ASSERT_EQ("0", results["hits"][1]["document"]["id"]);
results = coll1->search("*", {"artist"}, "artist:![Swift, Jack]", {}, {}, {0}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(2, results["found"]);
ASSERT_EQ("3", results["hits"][0]["document"]["id"]);
ASSERT_EQ("1", results["hits"][1]["document"]["id"]);
results = coll1->search("*", {"artist"}, "artist:![Swift, Jackson]", {}, {}, {0}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(0, results["found"]);
results = coll1->search("*", {"artist"}, "artist:!=[]", {}, {}, {0}, 10, 1, FREQUENCY, {true}, 10).get();
ASSERT_EQ(4, results["found"]);
// empty value (bad filtering)
auto res_op = coll1->search("*", {"artist"}, "artist:!=", {}, {}, {0}, 10, 1, FREQUENCY, {true}, 10);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Error with filter field `artist`: Filter value cannot be empty.", res_op.error());
res_op = coll1->search("*", {"artist"}, "artist:!= ", {}, {}, {0}, 10, 1, FREQUENCY, {true}, 10);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Error with filter field `artist`: Filter value cannot be empty.", res_op.error());
res_op = coll1->search("*", {"artist"}, "artist:!=``", {}, {}, {0}, 10, 1, FREQUENCY, {true}, 10);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Error with filter field `artist`: Filter value cannot be empty.", res_op.error());
res_op = coll1->search("*", {"artist"}, "artist:!=[`foo`, ``]", {}, {}, {0}, 10, 1, FREQUENCY, {true}, 10);
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Error with filter field `artist`: Filter value cannot be empty.", res_op.error());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionFilteringTest, FilterStringsWithComma) {
Collection *coll1;
std::vector<field> fields = {field("place", field_types::STRING, true),
field("state", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"St. John's Cathedral, Denver, Colorado", "Colorado"},
{"Crater Lake National Park, Oregon", "Oregon"},
{"St. Patrick's Cathedral, Manhattan", "New York"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["place"] = records[i][0];
doc["state"] = records[i][1];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("*", {"place"}, "place:= St. John's Cathedral, Denver, Colorado", {}, {}, {0}, 10, 1,
FREQUENCY, {true}, 10).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
results = coll1->search("*", {"place"}, "place:= `St. John's Cathedral, Denver, Colorado`", {}, {}, {0}, 10, 1,
FREQUENCY, {true}, 10).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
results = coll1->search("*", {"place"}, "place:= [`St. John's Cathedral, Denver, Colorado`]", {}, {}, {0}, 10, 1,
FREQUENCY, {true}, 10).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
results = coll1->search("*", {"place"}, "place:= [`St. John's Cathedral, Denver, Colorado`, `St. Patrick's Cathedral, Manhattan`]", {}, {}, {0}, 10, 1,
FREQUENCY, {true}, 10).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_STREQ("2", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("0", results["hits"][1]["document"]["id"].get<std::string>().c_str());
results = coll1->search("*", {"place"}, "place: [`Cathedral, Denver, Colorado`]", {}, {}, {0}, 10, 1,
FREQUENCY, {true}, 10).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
results = coll1->search("*", {"place"}, "place: []", {}, {}, {0}, 10, 1,
FREQUENCY, {true}, 10).get();
ASSERT_EQ(0, results["found"].get<size_t>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionFilteringTest, NumericalRangeFilter) {
std::vector<field> fields = {field("company", field_types::STRING, true),
field("num_employees", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "num_employees").get();
std::vector<std::vector<std::string>> records = {
{"123", "Company 1", "50"},
{"125", "Company 2", "150"},
{"127", "Company 3", "250"},
{"129", "Stark Industries 4", "500"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = records[i][0];
doc["company"] = records[i][1];
doc["num_employees"] = std::stoi(records[i][2]);
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
std::vector<sort_by> sort_fields = { sort_by("num_employees", "ASC") };
auto results = coll1->search("*", {}, "num_employees:>=100 && num_employees:<=300", {}, sort_fields, {0}, 10, 1,
FREQUENCY, {true}, 10).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_STREQ("125", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("127", results["hits"][1]["document"]["id"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionFilteringTest, RangeFilterOnTimestamp) {
std::vector<field> fields = {field("ts", field_types::INT64, false)};
Collection* coll1 = collectionManager.create_collection(
"coll1", 1, fields, "", 0, "", {}, {"."}
).get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["ts"] = 1646092800000;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["ts"] = 1648771199000;
nlohmann::json doc3;
doc3["id"] = "2";
doc3["ts"] = 1647111199000;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
auto results = coll1->search("*", {},"ts:[1646092800000..1648771199000]", {}, {}, {0}, 10,
1, FREQUENCY, {false}).get();
ASSERT_EQ(3, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionFilteringTest, QueryBoolFields) {
Collection *coll_bool;
std::ifstream infile(std::string(ROOT_DIR)+"test/bool_documents.jsonl");
std::vector<field> fields = {
field("popular", field_types::BOOL, false),
field("title", field_types::STRING, false),
field("rating", field_types::FLOAT, false),
field("bool_array", field_types::BOOL_ARRAY, false),
};
std::vector<sort_by> sort_fields = { sort_by("popular", "DESC"), sort_by("rating", "DESC") };
coll_bool = collectionManager.get_collection("coll_bool").get();
if(coll_bool == nullptr) {
coll_bool = collectionManager.create_collection("coll_bool", 1, fields, "rating").get();
}
std::string json_line;
while (std::getline(infile, json_line)) {
coll_bool->add(json_line);
}
infile.close();
// Plain search with no filters - results should be sorted correctly
query_fields = {"title"};
std::vector<std::string> facets;
nlohmann::json results = coll_bool->search("the", query_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(5, results["hits"].size());
std::vector<std::string> ids = {"1", "3", "4", "9", "2"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// Searching on a bool field
results = coll_bool->search("the", query_fields, "popular:true", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(3, results["hits"].size());
ids = {"1", "3", "4"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
results = coll_bool->search("*", query_fields, "popular:true", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(7, results["hits"].size());
ids = {"1", "0", "3", "5", "6", "7", "4"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_EQ(id, result_id);
}
// alternative `:=` syntax
results = coll_bool->search("the", query_fields, "popular:=true", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(3, results["hits"].size());
results = coll_bool->search("the", query_fields, "popular:false", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(2, results["hits"].size());
results = coll_bool->search("the", query_fields, "popular:= false", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(2, results["hits"].size());
ids = {"9", "2"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// searching against a bool array field
// should be able to filter with an array of boolean values
Option<nlohmann::json> res_op = coll_bool->search("the", query_fields, "bool_array:[true, false]", facets,
sort_fields, {0}, 10, 1, FREQUENCY, {false});
ASSERT_TRUE(res_op.ok());
results = res_op.get();
ASSERT_EQ(5, results["hits"].size());
results = coll_bool->search("the", query_fields, "bool_array: true", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(4, results["hits"].size());
ids = {"1", "4", "9", "2"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// should be able to search using array with a single element boolean value
results = coll_bool->search("the", query_fields, "bool_array:[true]", facets,
sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(4, results["hits"].size());
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// not equals on bool field
results = coll_bool->search("the", query_fields, "popular:!= true", facets,
sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(2, results["hits"].size());
ASSERT_EQ("9", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("2", results["hits"][1]["document"]["id"].get<std::string>());
// not equals on bool array field
results = coll_bool->search("the", query_fields, "bool_array:!= [true]", facets,
sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("3", results["hits"][0]["document"]["id"].get<std::string>());
// empty filter value
res_op = coll_bool->search("the", query_fields, "bool_array:=", facets,
sort_fields, {0}, 10, 1, FREQUENCY, {false});
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Error with filter field `bool_array`: Filter value cannot be empty.", res_op.error());
collectionManager.drop_collection("coll_bool");
}
TEST_F(CollectionFilteringTest, FilteringWithTokenSeparators) {
std::vector<field> fields = {field("code", field_types::STRING, true)};
Collection* coll1 = collectionManager.create_collection(
"coll1", 1, fields, "", 0, "", {}, {"."}
).get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["code"] = "7318.15";
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
auto results = coll1->search("*", {},"code:=7318.15", {}, {}, {0}, 10,
1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
results = coll1->search("*", {},"code:=`7318.15`", {}, {}, {0}, 10,
1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
collectionManager.drop_collection("coll1");
Collection* coll2 = collectionManager.create_collection(
"coll2", 1, fields, "", 0, "", {"."}, {}
).get();
doc1["id"] = "0";
doc1["code"] = "7318.15";
ASSERT_TRUE(coll2->add(doc1.dump()).ok());
results = coll2->search("*", {},"code:=7318.15", {}, {}, {0}, 10,
1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
collectionManager.drop_collection("coll2");
}
TEST_F(CollectionFilteringTest, ExactFilteringSingleQueryTerm) {
std::vector<field> fields = {field("name", field_types::STRING, false),
field("tags", field_types::STRING_ARRAY, false)};
Collection* coll1 = collectionManager.create_collection(
"coll1", 1, fields, "", 0, "", {}, {"."}
).get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "AT&T GoPhone";
doc1["tags"] = {"AT&T GoPhone"};
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "AT&T";
doc2["tags"] = {"AT&T"};
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
auto results = coll1->search("*", {},"name:=AT&T", {}, {}, {0}, 10,
1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
results = coll1->search("*", {},"tags:=AT&T", {}, {}, {0}, 10,
1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
nlohmann::json doc3;
doc3["id"] = "2";
doc3["name"] = "Phone";
doc3["tags"] = {"Samsung Phone", "Phone"};
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
results = coll1->search("*", {},"tags:=Phone", {}, {}, {0}, 10,
1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("2", results["hits"][0]["document"]["id"].get<std::string>());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionFilteringTest, ExactFilteringRepeatingTokensSingularField) {
std::vector<field> fields = {field("name", field_types::STRING, false)};
Collection* coll1 = collectionManager.create_collection(
"coll1", 1, fields, "", 0, "", {}, {"."}
).get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "Cardiology - Interventional Cardiology";
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "Cardiology - Interventional";
nlohmann::json doc3;
doc3["id"] = "2";
doc3["name"] = "Cardiology - Interventional Cardiology Department";
nlohmann::json doc4;
doc4["id"] = "3";
doc4["name"] = "Interventional Cardiology - Interventional Cardiology";
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
ASSERT_TRUE(coll1->add(doc4.dump()).ok());
auto results = coll1->search("*", {},"name:=Cardiology - Interventional Cardiology", {}, {}, {0}, 10,
1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
results = coll1->search("*", {},"name:=Cardiology - Interventional", {}, {}, {0}, 10,
1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
results = coll1->search("*", {},"name:=Interventional Cardiology", {}, {}, {0}, 10,
1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
results = coll1->search("*", {},"name:=Cardiology", {}, {}, {0}, 10,
1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionFilteringTest, ExactFilteringRepeatingTokensArrayField) {
std::vector<field> fields = {field("name", field_types::STRING_ARRAY, false)};
Collection* coll1 = collectionManager.create_collection(
"coll1", 1, fields, "", 0, "", {}, {"."}
).get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = {"Cardiology - Interventional Cardiology"};
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = {"Cardiology - Interventional"};
nlohmann::json doc3;
doc3["id"] = "2";
doc3["name"] = {"Cardiology - Interventional Cardiology Department"};
nlohmann::json doc4;
doc4["id"] = "3";
doc4["name"] = {"Interventional Cardiology - Interventional Cardiology"};
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
ASSERT_TRUE(coll1->add(doc3.dump()).ok());
ASSERT_TRUE(coll1->add(doc4.dump()).ok());
auto results = coll1->search("*", {},"name:=Cardiology - Interventional Cardiology", {}, {}, {0}, 10,
1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
results = coll1->search("*", {},"name:=Cardiology - Interventional", {}, {}, {0}, 10,
1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
results = coll1->search("*", {},"name:=Interventional Cardiology", {}, {}, {0}, 10,
1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
results = coll1->search("*", {},"name:=Cardiology", {}, {}, {0}, 10,
1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionFilteringTest, ExcludeMultipleTokens) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"alpha"},
{"TXBT0eiYnFhkJHqz02Wv0PWN5hp1"},
{"3u7RtEn5S9fcnizoUojWUwW23Yf2"},
{"HpPALvzDDVc3zMmlAAUySwp8Ir33"},
{"9oF2qhYI8sdBa2xJSerfmntpvBr2"},
{"5fAnLlld5obG4vhhNIbIeoHe1uB2"},
{"4OlIYKbzwIUoAOYy6dfDzCREezg1"},
{"4JK1BvoqCuTeMwEZorlKj8hnSl02"},
{"3tQBmRH0AQPEWyoKcDNYJyIxQQe2"},
{"3Mvl5HZgNwQkHykAqL77oMfo8DW2"},
{"3Ipnw5JATpYFyCcdUKTBhCicjoH3"},
{"2rizUF2ntNSUVpaXwPdHmSBB6C63"},
{"2kMHFOUQhAQK9cQbFNoXGpcAFVD2"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search(
"-TXBT0eiYnFhkJHqz02Wv0PWN5hp1 -3u7RtEn5S9fcnizoUojWUwW23Yf2 -HpPALvzDDVc3zMmlAAUySwp8Ir33 "
"-9oF2qhYI8sdBa2xJSerfmntpvBr2 -5fAnLlld5obG4vhhNIbIeoHe1uB2 -4OlIYKbzwIUoAOYy6dfDzCREezg1 "
"-4JK1BvoqCuTeMwEZorlKj8hnSl02 -3tQBmRH0AQPEWyoKcDNYJyIxQQe2 -3Mvl5HZgNwQkHykAqL77oMfo8DW2 "
"-3Ipnw5JATpYFyCcdUKTBhCicjoH3 -2rizUF2ntNSUVpaXwPdHmSBB6C63 -2kMHFOUQhAQK9cQbFNoXGpcAFVD2",
{"title"}, "",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionFilteringTest, FilteringAfterUpsertOnArrayWithTokenSeparators) {
std::vector<field> fields = {field("name", field_types::STRING, false),
field("tags", field_types::STRING_ARRAY, false),
field("tag", field_types::STRING, false)};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "", 0, "", {}, {"-"}).get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "david";
doc1["tags"] = {"alpha-beta-gamma", "foo-bar-baz"};
doc1["tag"] = "foo-bar-baz";
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "david";
doc2["tags"] = {"alpha-gamma-beta", "bar-foo-baz"};
doc2["tag"] = "alpha-beta";
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
auto results = coll1->search("david", {"name"},"tags:=[foo-bar-baz]", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
// upsert with "foo-bar-baz" removed
doc1["tags"] = {"alpha-beta-gamma"};
coll1->add(doc1.dump(), UPSERT);
results = coll1->search("david", {"name"},"tags:=[foo-bar-baz]", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
results = coll1->search("david", {"name"},"tags:=[bar-foo-baz]", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
// repeat for singular string field: upsert with "foo-bar-baz" removed
doc1["tag"] = "alpha-beta-gamma";
coll1->add(doc1.dump(), UPSERT);
results = coll1->search("david", {"name"},"tag:=[foo-bar-baz]", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionFilteringTest, FilteringAfterUpsertOnArrayWithSymbolsToIndex) {
std::vector<field> fields = {field("name", field_types::STRING, false),
field("tags", field_types::STRING_ARRAY, false),
field("tag", field_types::STRING, false)};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "", 0, "", {"-"}, {}).get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["name"] = "david";
doc1["tags"] = {"alpha-beta-gamma", "foo-bar-baz"};
doc1["tag"] = "foo-bar-baz";
nlohmann::json doc2;
doc2["id"] = "1";
doc2["name"] = "david";
doc2["tags"] = {"alpha-gamma-beta", "bar-foo-baz"};
doc2["tag"] = "alpha-beta";
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
auto results = coll1->search("david", {"name"},"tags:=[foo-bar-baz]", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("0", results["hits"][0]["document"]["id"].get<std::string>());
// upsert with "foo-bar-baz" removed
doc1["tags"] = {"alpha-beta-gamma"};
coll1->add(doc1.dump(), UPSERT);
results = coll1->search("david", {"name"},"tags:=[foo-bar-baz]", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
results = coll1->search("david", {"name"},"tags:=[bar-foo-baz]", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
// repeat for singular string field: upsert with "foo-bar-baz" removed
doc1["tag"] = "alpha-beta-gamma";
coll1->add(doc1.dump(), UPSERT);
results = coll1->search("david", {"name"},"tag:=[foo-bar-baz]", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionFilteringTest, ComplexFilterQuery) {
nlohmann::json schema_json =
R"({
"name": "ComplexFilterQueryCollection",
"fields": [
{"name": "name", "type": "string"},
{"name": "age", "type": "int32"},
{"name": "years", "type": "int32[]"},
{"name": "rating", "type": "float"},
{"name": "tags", "type": "string[]"}
]
})"_json;
auto op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(op.ok());
auto coll = op.get();
std::ifstream infile(std::string(ROOT_DIR)+"test/numeric_array_documents.jsonl");
std::string json_line;
while (std::getline(infile, json_line)) {
auto add_op = coll->add(json_line);
ASSERT_TRUE(add_op.ok());
}
infile.close();
std::vector<sort_by> sort_fields_desc = {sort_by("rating", "DESC")};
nlohmann::json results = coll->search("Jeremy", {"name"}, "(rating:>=0 && years:>2000) && age:>50",
{}, sort_fields_desc, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
results = coll->search("*", {"name"}, "(age:>50 && rating:>5) || years:<2000",
{}, sort_fields_desc, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(2, results["hits"].size());
results = coll->search("Jeremy", {"name"}, "(age:>50 || rating:>5) && years:<2000",
{}, sort_fields_desc, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(2, results["hits"].size());
std::vector<std::string> ids = {"4", "3"};
for (size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
results = coll->search("Jeremy", {"name"}, "(age:<50 && rating:10) || (years:>2000 && rating:<5)",
{}, sort_fields_desc, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ids = {"0"};
for (size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
results = coll->search("Jeremy", {"name"}, "years:>2000 && ((age:<30 && rating:>5) || (age:>50 && rating:<5))",
{}, sort_fields_desc, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ids = {"2"};
for (size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
std::string extreme_filter = "(years:>2000 && ((age:<30 && rating:>5) || (age:>50 && rating:<5))) ||"
"(years:>2000 && ((age:<30 && rating:>5) || (age:>50 && rating:<5))) ||"
"(years:>2000 && ((age:<30 && rating:>5) || (age:>50 && rating:<5))) ||"
"(years:>2000 && ((age:<30 && rating:>5) || (age:>50 && rating:<5))) ||"
"(years:>2000 && ((age:<30 && rating:>5) || (age:>50 && rating:<5))) ||"
"(years:>2000 && ((age:<30 && rating:>5) || (age:>50 && rating:<5))) ||"
"(years:>2000 && ((age:<30 && rating:>5) || (age:>50 && rating:<5))) ||"
"(years:>2000 && ((age:<30 && rating:>5) || (age:>50 && rating:<5))) ||"
"(years:>2000 && ((age:<30 && rating:>5) || (age:>50 && rating:<5))) ||"
"(years:>2000 && ((age:<30 && rating:>5) || (age:>50 && rating:<5)))";
auto search_op = coll->search("Jeremy", {"name"}, extreme_filter,
{}, sort_fields_desc, {0}, 10, 1, FREQUENCY, {false});
ASSERT_TRUE(search_op.ok());
ASSERT_EQ(1, search_op.get()["hits"].size());
extreme_filter += "|| (years:>2000 && ((age:<30 && rating:>5) || (age:>50 && rating:<5)))";
search_op = coll->search("Jeremy", {"name"}, extreme_filter,
{}, sort_fields_desc, {0}, 10, 1, FREQUENCY, {false});
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("`filter_by` has too many operations. Maximum allowed: 100. Use `--filter-by-max-ops` command line "
"argument to customize this value.", search_op.error());
collectionManager.dispose();
delete store;
store = new Store(state_dir_path);
collectionManager.init(store, 1.0, "auth_key", quit, 109); // Re-initialize with 109 filter operations allowed.
auto load_op = collectionManager.load(8, 1000);
if(!load_op.ok()) {
LOG(ERROR) << load_op.error();
}
ASSERT_TRUE(load_op.ok());
coll = collectionManager.get_collection_unsafe("ComplexFilterQueryCollection");
search_op = coll->search("Jeremy", {"name"}, extreme_filter,
{}, sort_fields_desc, {0}, 10, 1, FREQUENCY, {false});
ASSERT_TRUE(search_op.ok());
ASSERT_EQ(1, search_op.get()["hits"].size());
extreme_filter += "|| (years:>2000 && ((age:<30 && rating:>5) || (age:>50 && rating:<5)))";
search_op = coll->search("Jeremy", {"name"}, extreme_filter,
{}, sort_fields_desc, {0}, 10, 1, FREQUENCY, {false});
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("`filter_by` has too many operations. Maximum allowed: 109. Use `--filter-by-max-ops` command line "
"argument to customize this value.", search_op.error());
collectionManager.drop_collection("ComplexFilterQueryCollection");
}
TEST_F(CollectionFilteringTest, PrefixSearchWithFilter) {
std::ifstream infile(std::string(ROOT_DIR)+"test/documents.jsonl");
std::vector<field> search_fields = {
field("title", field_types::STRING, false),
field("points", field_types::INT32, false)
};
query_fields = {"title"};
sort_fields = { sort_by(sort_field_const::text_match, "DESC"), sort_by("points", "DESC") };
auto collection = collectionManager.create_collection("collection", 4, search_fields, "points").get();
std::string json_line;
// dummy record for record id 0: to make the test record IDs to match with line numbers
json_line = "{\"points\":10,\"title\":\"z\"}";
collection->add(json_line);
while (std::getline(infile, json_line)) {
collection->add(json_line);
}
infile.close();
std::vector<std::string> facets;
auto results = collection->search("what ex", query_fields, "points: >10", facets, sort_fields, {0}, 10, 1, MAX_SCORE, {true}, 10,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10).get();
ASSERT_EQ(7, results["hits"].size());
std::vector<std::string> ids = {"6", "12", "19", "22", "13", "8", "15"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
collectionManager.drop_collection("collection");
}
TEST_F(CollectionFilteringTest, LargeFilterToken) {
nlohmann::json json =
R"({
"name": "LargeFilterTokenCollection",
"fields": [
{"name": "uri", "type": "string"}
],
"symbols_to_index": [
"/",
"-"
]
})"_json;
auto op = collectionManager.create_collection(json);
ASSERT_TRUE(op.ok());
auto coll = op.get();
json.clear();
std::string token = "rade/aols/insolvenzrecht/persoenliche-risiken-fuer-organe-von-kapitalgesellschaften-gmbh-"
"geschaeftsfuehrer-ag-vorstand";
json["uri"] = token;
auto add_op = coll->add(json.dump());
ASSERT_TRUE(add_op.ok());
auto results = coll->search("*", query_fields, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
results = coll->search("*", query_fields, "uri:" + token, {}, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
token.erase(100); // Max token length that's indexed is 100, we'll still get a match.
results = coll->search("*", query_fields, "uri:" + token, {}, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
token.erase(99);
results = coll->search("*", query_fields, "uri:" + token, {}, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
}
TEST_F(CollectionFilteringTest, NonIndexedFiltering) {
nlohmann::json json =
R"({
"name": "NonIndexedCollection",
"fields": [
{"name": "uri", "type": "string"},
{"name": "non_index", "type": "string", "index": false, "optional": true}
]
})"_json;
auto op = collectionManager.create_collection(json);
ASSERT_TRUE(op.ok());
auto coll = op.get();
json.clear();
json = R"({
"uri": "token",
"non_index": "foo"
})"_json;
auto add_op = coll->add(json.dump());
ASSERT_TRUE(add_op.ok());
auto search_op = coll->search("*", {}, "", {}, sort_fields, {0}, 10, 1, FREQUENCY, {false});
ASSERT_EQ(1, search_op.get()["hits"].size());
search_op = coll->search("*", {}, "non_index:= bar", {}, sort_fields, {0}, 10, 1, FREQUENCY, {false});
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("Cannot filter on non-indexed field `non_index`.", search_op.error());
}
TEST_F(CollectionFilteringTest, ComputeFilterResult) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
for(size_t i=0; i<50; i++) {
nlohmann::json doc;
doc["title"] = i < 10 ? "foo" : "bar";
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto res_op = coll1->search("*", {}, "title: foo",
{}, {}, {0}, 10, 1, FREQUENCY, {true});
auto results = res_op.get();
ASSERT_EQ(10, results["found"]);
res_op = coll1->search("*", {}, "title: bar && points:>=10",
{}, {}, {0}, 10, 1, FREQUENCY, {true});
results = res_op.get();
ASSERT_EQ(40, results["found"]);
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionFilteringTest, PrefixFilterOnTextFields) {
Collection *coll_mul_fields;
std::ifstream infile(std::string(ROOT_DIR)+"test/multi_field_documents.jsonl");
std::vector<field> fields = {
field("title", field_types::STRING, false),
field("starring", field_types::STRING, false),
field("cast", field_types::STRING_ARRAY, true),
field("points", field_types::INT32, false)
};
coll_mul_fields = collectionManager.get_collection("coll_mul_fields").get();
if(coll_mul_fields == nullptr) {
coll_mul_fields = collectionManager.create_collection("coll_mul_fields", 4, fields, "points").get();
}
std::string json_line;
while (std::getline(infile, json_line)) {
coll_mul_fields->add(json_line);
}
infile.close();
nlohmann::json results = coll_mul_fields->search("*", {}, "cast: Chris", {}, {}, {0},
10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(3, results["hits"].size());
std::vector<std::string> ids = {"6", "1", "7"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_EQ(id, result_id);
}
results = coll_mul_fields->search("*", {}, "cast: Ch*", {}, {}, {0},
10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(3, results["hits"].size());
ids = {"6", "1", "7"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_EQ(id, result_id);
}
results = coll_mul_fields->search("*", {}, "cast: M*", {}, {}, {0},
10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(3, results["hits"].size());
ids = {"3", "2", "16"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_EQ(id, result_id);
}
results = coll_mul_fields->search("*", {}, "cast: Chris P*", {}, {}, {0},
10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(2, results["hits"].size());
ids = {"1", "7"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_EQ(id, result_id);
}
results = coll_mul_fields->search("*", {}, "cast: [Martin, Chris P*]", {}, {}, {0},
10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(3, results["hits"].size());
ids = {"2", "1", "7"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_EQ(id, result_id);
}
results = coll_mul_fields->search("*", {}, "cast: [M*, Chris P*]", {}, {}, {0},
10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(5, results["hits"].size());
ids = {"3", "2", "16", "1", "7"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_EQ(id, result_id);
}
auto schema_json =
R"({
"name": "Names",
"fields": [
{"name": "name", "type": "string", "optional": true},
{"name": "names", "type": "string[]", "optional": true}
]
})"_json;
std::vector<nlohmann::json> documents = {
R"({
"name": "Steve Jobs"
})"_json,
R"({
"name": "Adam Stator"
})"_json,
};
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
ASSERT_TRUE(add_op.ok());
}
std::map<std::string, std::string> req_params = {
{"collection", "Names"},
{"q", "*"},
{"query_by", "name"},
{"filter_by", "name:= S*"}
};
nlohmann::json embedded_params;
std::string json_res;
auto now_ts = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
auto search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
auto res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
ASSERT_EQ("Steve Jobs", res_obj["hits"][0]["document"].at("name"));
req_params = {
{"collection", "Names"},
{"q", "*"},
{"query_by", "name"},
{"filter_by", "name: S*"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ("Adam Stator", res_obj["hits"][0]["document"].at("name"));
ASSERT_EQ("Steve Jobs", res_obj["hits"][1]["document"].at("name"));
documents = {
R"({
"name": "Steve Reiley"
})"_json,
R"({
"name": "Storm"
})"_json,
R"({
"name": "Steve Rogers"
})"_json,
};
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
ASSERT_TRUE(add_op.ok());
}
req_params = {
{"collection", "Names"},
{"q", "s"},
{"query_by", "name"},
{"filter_by", "name:= St*"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(4, res_obj["found"].get<size_t>());
ASSERT_EQ(4, res_obj["hits"].size());
ASSERT_EQ("Steve Rogers", res_obj["hits"][0]["document"].at("name"));
ASSERT_EQ("Storm", res_obj["hits"][1]["document"].at("name"));
ASSERT_EQ("Steve Reiley", res_obj["hits"][2]["document"].at("name"));
ASSERT_EQ("Steve Jobs", res_obj["hits"][3]["document"].at("name"));
req_params = {
{"collection", "Names"},
{"q", "s"},
{"query_by", "name"},
{"filter_by", "name: St*"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(5, res_obj["found"].get<size_t>());
ASSERT_EQ(5, res_obj["hits"].size());
ASSERT_EQ("Steve Rogers", res_obj["hits"][0]["document"].at("name"));
ASSERT_EQ("Storm", res_obj["hits"][1]["document"].at("name"));
ASSERT_EQ("Steve Reiley", res_obj["hits"][2]["document"].at("name"));
ASSERT_EQ("Adam Stator", res_obj["hits"][3]["document"].at("name"));
ASSERT_EQ("Steve Jobs", res_obj["hits"][4]["document"].at("name"));
req_params = {
{"collection", "Names"},
{"q", "s"},
{"query_by", "name"},
{"filter_by", "name:= Steve R*"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ("Steve Rogers", res_obj["hits"][0]["document"].at("name"));
ASSERT_EQ("Steve Reiley", res_obj["hits"][1]["document"].at("name"));
req_params = {
{"collection", "Names"},
{"q", "s"},
{"query_by", "name"},
{"filter_by", "name: Steve R*"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ("Steve Rogers", res_obj["hits"][0]["document"].at("name"));
ASSERT_EQ("Steve Reiley", res_obj["hits"][1]["document"].at("name"));
documents = {
R"({
"names": []
})"_json,
R"({
"names": ["Steve Jobs"]
})"_json,
R"({
"names": ["Adam Stator"]
})"_json
};
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
ASSERT_TRUE(add_op.ok());
}
req_params = {
{"collection", "Names"},
{"q", "s"},
{"query_by", "names"},
{"filter_by", "names:= St*"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
ASSERT_EQ("Steve Jobs", res_obj["hits"][0]["document"]["names"][0]);
req_params = {
{"collection", "Names"},
{"q", "s"},
{"query_by", "names"},
{"filter_by", "names: St*"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ("Adam Stator", res_obj["hits"][0]["document"]["names"][0]);
ASSERT_EQ("Steve Jobs", res_obj["hits"][1]["document"]["names"][0]);
documents = {
R"({
"names": ["Steve Reiley"]
})"_json,
R"({
"names": ["Storm"]
})"_json,
R"({
"names": ["Adam", "Steve Rogers"]
})"_json,
};
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
ASSERT_TRUE(add_op.ok());
}
req_params = {
{"collection", "Names"},
{"q", "s"},
{"query_by", "names"},
{"filter_by", "names:= St*"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(4, res_obj["found"].get<size_t>());
ASSERT_EQ(4, res_obj["hits"].size());
ASSERT_EQ("Steve Rogers", res_obj["hits"][0]["document"]["names"][1]);
ASSERT_EQ("Storm", res_obj["hits"][1]["document"]["names"][0]);
ASSERT_EQ("Steve Reiley", res_obj["hits"][2]["document"]["names"][0]);
ASSERT_EQ("Steve Jobs", res_obj["hits"][3]["document"]["names"][0]);
req_params = {
{"collection", "Names"},
{"q", "s"},
{"query_by", "names"},
{"filter_by", "names: St*"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(5, res_obj["found"].get<size_t>());
ASSERT_EQ(5, res_obj["hits"].size());
ASSERT_EQ("Steve Rogers", res_obj["hits"][0]["document"]["names"][1]);
ASSERT_EQ("Storm", res_obj["hits"][1]["document"]["names"][0]);
ASSERT_EQ("Steve Reiley", res_obj["hits"][2]["document"]["names"][0]);
ASSERT_EQ("Adam Stator", res_obj["hits"][3]["document"]["names"][0]);
ASSERT_EQ("Steve Jobs", res_obj["hits"][4]["document"]["names"][0]);
req_params = {
{"collection", "Names"},
{"q", "s"},
{"query_by", "names"},
{"filter_by", "names:= Steve*"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(3, res_obj["found"].get<size_t>());
ASSERT_EQ(3, res_obj["hits"].size());
ASSERT_EQ("Steve Rogers", res_obj["hits"][0]["document"]["names"][1]);
ASSERT_EQ("Steve Reiley", res_obj["hits"][1]["document"]["names"][0]);
ASSERT_EQ("Steve Jobs", res_obj["hits"][2]["document"]["names"][0]);
req_params = {
{"collection", "Names"},
{"q", "s"},
{"query_by", "names"},
{"filter_by", "names: Steve*"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(3, res_obj["found"].get<size_t>());
ASSERT_EQ(3, res_obj["hits"].size());
ASSERT_EQ("Steve Rogers", res_obj["hits"][0]["document"]["names"][1]);
ASSERT_EQ("Steve Reiley", res_obj["hits"][1]["document"]["names"][0]);
ASSERT_EQ("Steve Jobs", res_obj["hits"][2]["document"]["names"][0]);
req_params = {
{"collection", "Names"},
{"q", "s"},
{"query_by", "names"},
{"filter_by", "names:= Steve R*"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ("Steve Rogers", res_obj["hits"][0]["document"]["names"][1]);
ASSERT_EQ("Steve Reiley", res_obj["hits"][1]["document"]["names"][0]);
req_params = {
{"collection", "Names"},
{"q", "s"},
{"query_by", "names"},
{"filter_by", "names: Steve R*"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ("Steve Rogers", res_obj["hits"][0]["document"]["names"][1]);
ASSERT_EQ("Steve Reiley", res_obj["hits"][1]["document"]["names"][0]);
documents = {
R"({
"names": ["Steve Runner foo"]
})"_json,
R"({
"names": ["foo Steve Runner"]
})"_json,
};
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
ASSERT_TRUE(add_op.ok());
}
req_params = {
{"collection", "Names"},
{"q", "s"},
{"query_by", "names"},
{"filter_by", "names:= Steve R*"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(3, res_obj["found"].get<size_t>());
ASSERT_EQ(3, res_obj["hits"].size());
ASSERT_EQ("Steve Runner foo", res_obj["hits"][0]["document"]["names"][0]);
ASSERT_EQ("Steve Rogers", res_obj["hits"][1]["document"]["names"][1]);
ASSERT_EQ("Steve Reiley", res_obj["hits"][2]["document"]["names"][0]);
req_params = {
{"collection", "Names"},
{"q", "s"},
{"query_by", "names"},
{"filter_by", "names: Steve R*"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(4, res_obj["found"].get<size_t>());
ASSERT_EQ(4, res_obj["hits"].size());
ASSERT_EQ("foo Steve Runner", res_obj["hits"][0]["document"]["names"][0]);
ASSERT_EQ("Steve Runner foo", res_obj["hits"][1]["document"]["names"][0]);
ASSERT_EQ("Steve Rogers", res_obj["hits"][2]["document"]["names"][1]);
ASSERT_EQ("Steve Reiley", res_obj["hits"][3]["document"]["names"][0]);
}
TEST_F(CollectionFilteringTest, ExactFilterOnLongField) {
nlohmann::json schema = R"({
"name": "companies",
"fields": [
{"name": "keywords", "type": "string[]"}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
auto coll = op.get();
nlohmann::json doc1;
doc1["id"] = "0";
std::string arr_value;
// when value exceeds 128 tokens, we will fail gracefully
for(size_t i = 0; i < 130; i++) {
arr_value += "foo" + std::to_string(i) + " ";
}
doc1["keywords"] = {arr_value};
ASSERT_TRUE(coll->add(doc1.dump()).ok());
auto results = coll->search("*", {}, "keywords:=" + arr_value, {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(0, results["hits"].size());
}
TEST_F(CollectionFilteringTest, FilterOnStemmedField) {
nlohmann::json schema = R"({
"name": "companies",
"fields": [
{"name": "keywords", "type": "string[]", "facet": true, "stem": true }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
auto coll = op.get();
nlohmann::json doc1 = {
{"id", "124"},
{"keywords", {"Restaurant"}}
};
nlohmann::json doc2 = {
{"id", "125"},
{"keywords", {"Baking"}}
};
ASSERT_TRUE(coll->add(doc1.dump()).ok());
ASSERT_TRUE(coll->add(doc2.dump()).ok());
auto results = coll->search("*", {}, "keywords:=Baking", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ("125", results["hits"][0]["document"]["id"].get<std::string>());
}
TEST_F(CollectionFilteringTest, MaxFilterByCandidates) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("points", field_types::INT32, false)};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
for(size_t i = 0; i < 20; i++) {
nlohmann::json doc;
doc["title"] = "Independent" + std::to_string(i);
doc["points"] = i;
coll1->add(doc.dump());
}
std::map<std::string, std::string> req_params = {
{"collection", "coll1"},
{"q", "*"},
{"filter_by", "title:independent*"},
};
nlohmann::json embedded_params;
std::string json_res;
auto now_ts = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
auto search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
auto res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(4, res_obj["found"].get<size_t>());
ASSERT_EQ(4, res_obj["hits"].size());
ASSERT_EQ("Independent19", res_obj["hits"][0]["document"]["title"]);
ASSERT_EQ("Independent18", res_obj["hits"][1]["document"]["title"]);
ASSERT_EQ("Independent17", res_obj["hits"][2]["document"]["title"]);
ASSERT_EQ("Independent16", res_obj["hits"][3]["document"]["title"]);
req_params = {
{"collection", "coll1"},
{"q", "*"},
{"filter_by", "title:independent*"},
{"max_filter_by_candidates", "0"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(0, res_obj["found"].get<size_t>());
ASSERT_EQ(0, res_obj["hits"].size());
req_params = {
{"collection", "coll1"},
{"q", "*"},
{"filter_by", "title:independent*"},
{"max_filter_by_candidates", "1"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
ASSERT_EQ("Independent19", res_obj["hits"][0]["document"]["title"]);
}
TEST_F(CollectionFilteringTest, FilterOnObjectFields) {
auto schema_json =
R"({
"name": "Products",
"fields": [
{"name": "product_id", "type": "string"},
{"name": "product_name", "type": "string", "infix": true},
{"name": "product_description", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["product_description"], "model_config": {"model_name": "ts/e5-small"}}},
{"name": "rating", "type": "int32"},
{"name": "stocks", "type": "object"},
{"name": "stocks.*", "type": "auto", "optional": true}
],
"enable_nested_fields": true
})"_json;
std::vector<nlohmann::json> documents = {
R"({
"product_id": "product_a",
"product_name": "shampoo",
"product_description": "Our new moisturizing shampoo is perfect for those with dry or damaged hair.",
"rating": "2",
"stocks": {
"26": {
"rec": true
}
}
})"_json,
R"({
"product_id": "product_b",
"product_name": "soap",
"product_description": "Introducing our all-natural, organic soap bar made with essential oils and botanical ingredients.",
"rating": "4",
"stocks": {
"26": {
"rec": false
}
}
})"_json,
R"({
"product_id": "product_c",
"product_name": "comb",
"product_description": "Experience the natural elegance and gentle care of our handcrafted wooden combs – because your hair deserves the best.",
"rating": "3",
"stocks": {}
})"_json
};
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
std::map<std::string, std::string> req_params = {
{"collection", "Products"},
{"q", "*"},
{"filter_by", "stocks.26.rec:true"},
{"include_fields", "product_id, product_name, stocks"}
};
nlohmann::json embedded_params;
std::string json_res;
auto now_ts = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
auto search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
auto res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
ASSERT_EQ("product_a", res_obj["hits"][0]["document"]["product_id"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("stocks"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["stocks"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"]["stocks"].count("26"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["stocks"]["26"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"]["stocks"]["26"].count("rec"));
ASSERT_TRUE(res_obj["hits"][0]["document"]["stocks"]["26"]["rec"]);
req_params = {
{"collection", "Products"},
{"q", "*"},
{"filter_by", "stocks.26.rec:false"},
{"include_fields", "product_id, product_name, stocks"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
ASSERT_EQ("product_b", res_obj["hits"][0]["document"]["product_id"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("stocks"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["stocks"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"]["stocks"].count("26"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["stocks"]["26"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"]["stocks"]["26"].count("rec"));
ASSERT_FALSE(res_obj["hits"][0]["document"]["stocks"]["26"]["rec"]);
}
| 131,272
|
C++
|
.cpp
| 2,508
| 43.645534
| 171
| 0.56261
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,733
|
index_test.cpp
|
typesense_typesense/test/index_test.cpp
|
#include <gtest/gtest.h>
#include "index.h"
#include <vector>
#include <s2/s2loop.h>
/*TEST(IndexTest, PointInPolygon180thMeridian) {
// somewhere in far eastern russia
GeoCoord verts[3] = {
{67.63378886620751, 179.87924212491276},
{67.6276069384328, -179.8364939577639},
{67.5749950728145, 179.94421673458666}
};
*//*std::vector<S2Point> vertices;
for(size_t point_index = 0; point_index < 4; point_index++) {
S2Point vertex = S2LatLng::FromDegrees(verts[point_index].lat, verts[point_index].lon).ToPoint();
vertices.emplace_back(vertex);
}
S2Loop region(vertices);*//*
Geofence poly1{3, verts};
double offset = Index::transform_for_180th_meridian(poly1);
GeoCoord point1 = {67.61896440098865, 179.9998420463554};
GeoCoord point2 = {67.6332378896519, 179.88828622883355};
GeoCoord point3 = {67.62717271243574, -179.85954137693625};
GeoCoord point4 = {67.65842784263879, -179.79268650445243};
GeoCoord point5 = {67.62016647245217, 179.83764198608083};
Index::transform_for_180th_meridian(point1, offset);
Index::transform_for_180th_meridian(point2, offset);
Index::transform_for_180th_meridian(point3, offset);
Index::transform_for_180th_meridian(point4, offset);
Index::transform_for_180th_meridian(point5, offset);
*//*ASSERT_TRUE(region.Contains(S2LatLng::FromDegrees(point1.lat, point1.lon).ToPoint()));
ASSERT_TRUE(region.Contains(S2LatLng::FromDegrees(point2.lat, point2.lon).ToPoint()));
ASSERT_TRUE(region.Contains(S2LatLng::FromDegrees(point3.lat, point3.lon).ToPoint()));
ASSERT_FALSE(region.Contains(S2LatLng::FromDegrees(point4.lat, point4.lon).ToPoint()));
ASSERT_FALSE(region.Contains(S2LatLng::FromDegrees(point5.lat, point5.lon).ToPoint()));
*//*
ASSERT_TRUE(Index::is_point_in_polygon(poly1, point1));
ASSERT_TRUE(Index::is_point_in_polygon(poly1, point2));
ASSERT_TRUE(Index::is_point_in_polygon(poly1, point3));
ASSERT_FALSE(Index::is_point_in_polygon(poly1, point4));
ASSERT_FALSE(Index::is_point_in_polygon(poly1, point5));
}*/
TEST(IndexTest, GeoPointPackUnpack) {
std::vector<std::pair<double, double>> latlngs = {
{43.677223,-79.630556},
{-0.041935, 65.433296}, // Indian Ocean Equator
{-66.035056, 173.187202}, // Newzealand
{-65.015656, -158.336234}, // Southern Ocean
{84.552144, -159.742483}, // Arctic Ocean
{84.517046, 171.730040} // Siberian Sea
};
for(auto& latlng: latlngs) {
int64_t packed_latlng = GeoPoint::pack_lat_lng(latlng.first, latlng.second);
S2LatLng s2LatLng;
GeoPoint::unpack_lat_lng(packed_latlng, s2LatLng);
ASSERT_FLOAT_EQ(latlng.first, s2LatLng.lat().degrees());
ASSERT_FLOAT_EQ(latlng.second, s2LatLng.lng().degrees());
}
}
| 2,860
|
C++
|
.cpp
| 58
| 43.62069
| 105
| 0.692473
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,734
|
tokenizer_test.cpp
|
typesense_typesense/test/tokenizer_test.cpp
|
#include <gtest/gtest.h>
#include "tokenizer.h"
#include "logger.h"
TEST(TokenizerTest, ShouldTokenizeNormalizeDifferentStrings) {
const std::string withaccent = "Mise T.J. à jour Timy depuis PC";
std::vector<std::string> tokens;
Tokenizer(withaccent, true, false).tokenize(tokens);
std::vector<std::string> withaccent_tokens = {"mise", "tj", "a", "jour", "timy", "depuis", "pc"};
ASSERT_EQ(withaccent_tokens.size(), tokens.size());
for(size_t i = 0; i < withaccent_tokens.size(); i++) {
ASSERT_EQ(withaccent_tokens[i], tokens[i]);
}
const std::string withnewline = "Michael Jordan:\nWelcome, everybody. Welcome! ";
tokens.clear();
Tokenizer(withnewline, true, false).tokenize(tokens);
ASSERT_EQ(5, tokens.size());
ASSERT_STREQ("michael", tokens[0].c_str());
ASSERT_STREQ("jordan", tokens[1].c_str());
ASSERT_STREQ("welcome", tokens[2].c_str());
ASSERT_STREQ("everybody", tokens[3].c_str());
ASSERT_STREQ("welcome", tokens[4].c_str());
const std::string withspaces = " Michael Jordan ";
tokens.clear();
Tokenizer(withspaces, true, false).tokenize(tokens);
ASSERT_EQ(2, tokens.size());
ASSERT_STREQ("michael", tokens[0].c_str());
ASSERT_STREQ("jordan", tokens[1].c_str());
// single token
const std::string single_token = "foobar";
tokens.clear();
Tokenizer(single_token, false, false).tokenize(tokens);
ASSERT_EQ(1, tokens.size());
ASSERT_STREQ("foobar", tokens[0].c_str());
// split tokens
const std::string split_tokens = "foo-bar-baz";
tokens.clear();
Tokenizer(split_tokens, false, false).tokenize(tokens);
ASSERT_EQ(1, tokens.size());
ASSERT_STREQ("foobarbaz", tokens[0].c_str());
tokens.clear();
Tokenizer(split_tokens, true, false).tokenize(tokens);
ASSERT_EQ(1, tokens.size());
ASSERT_STREQ("foobarbaz", tokens[0].c_str());
// multiple spaces
const std::string multispace_tokens = "foo bar";
tokens.clear();
Tokenizer(multispace_tokens, false, false).tokenize(tokens);
ASSERT_EQ(2, tokens.size());
ASSERT_STREQ("foo", tokens[0].c_str());
ASSERT_STREQ("bar", tokens[1].c_str());
// special chars
const std::string specialchar_tokens = "https://www.amazon.com/s?k=phone&ref=nb_sb_noss_2";
tokens.clear();
Tokenizer(specialchar_tokens, false, false).tokenize(tokens);
ASSERT_EQ(1, tokens.size());
ASSERT_STREQ("httpswwwamazoncomskphonerefnbsbnoss2", tokens[0].c_str());
// noop
tokens.clear();
const std::string withspecialchars = "Special ½¥ and தமிழ் 你好吗 abcÅà123ß12 here.";
Tokenizer(withspecialchars, true, true).tokenize(tokens);
ASSERT_EQ(1, tokens.size());
ASSERT_STREQ(withspecialchars.c_str(), tokens[0].c_str());
}
TEST(TokenizerTest, ShouldTokenizeNormalizeUnicodeStrings) {
std::vector<std::string> tokens;
const std::string withspecialchars = "Special ½¥ and -thenதமிழ், 你2好吗 abcÅà123ß12 verläßlich here.";
tokens.clear();
Tokenizer(withspecialchars, true, false).tokenize(tokens);
ASSERT_EQ(8, tokens.size());
ASSERT_STREQ("special", tokens[0].c_str());
ASSERT_STREQ("12yen", tokens[1].c_str());
ASSERT_STREQ("and", tokens[2].c_str());
ASSERT_STREQ("thenதமிழ்", tokens[3].c_str());
ASSERT_STREQ("你2好吗", tokens[4].c_str());
ASSERT_STREQ("abcaa123ss12", tokens[5].c_str());
ASSERT_STREQ("verlasslich", tokens[6].c_str());
ASSERT_STREQ("here", tokens[7].c_str());
// when normalization is disabled
const std::string withoutnormalize = "Mise à, jour.";
tokens.clear();
Tokenizer(withoutnormalize, false, false).tokenize(tokens);
ASSERT_EQ(3, tokens.size());
ASSERT_STREQ("Mise", tokens[0].c_str());
ASSERT_STREQ("à", tokens[1].c_str());
ASSERT_STREQ("jour", tokens[2].c_str());
// single accented word tokenization
std::string singleword = "à";
tokens.clear();
Tokenizer(singleword, true, false).tokenize(tokens);
ASSERT_EQ(1, tokens.size());
ASSERT_STREQ("a", tokens[0].c_str());
}
TEST(TokenizerTest, ShouldTokenizeIteratively) {
const std::string withnewline = "Michael Jordan:\n\nWelcome, everybody. Welcome!";
std::vector<std::string> tokens;
Tokenizer tokenizer1(withnewline, true, false);
std::string token;
size_t token_index;
while(tokenizer1.next(token, token_index)) {
tokens.push_back(token);
}
ASSERT_EQ(5, tokens.size());
ASSERT_STREQ("michael", tokens[0].c_str());
ASSERT_STREQ("jordan", tokens[1].c_str());
ASSERT_STREQ("welcome", tokens[2].c_str());
ASSERT_STREQ("everybody", tokens[3].c_str());
ASSERT_STREQ("welcome", tokens[4].c_str());
// check for index when token_separators are not kept
Tokenizer tokenizer2(withnewline, true, false);
size_t expected_token_index = 0;
std::vector<std::string> expected_tokens = {"michael", "jordan", "welcome", "everybody", "welcome"};
while(tokenizer2.next(token, token_index)) {
ASSERT_EQ(expected_token_index, token_index);
ASSERT_EQ(expected_tokens[expected_token_index], token);
expected_token_index++;
}
// verbatim (no_op=true)
tokens.clear();
Tokenizer tokenizer3(withnewline, false, true);
while(tokenizer3.next(token, token_index)) {
tokens.push_back(token);
}
ASSERT_EQ(1, tokens.size());
ASSERT_STREQ("Michael Jordan:\n\nWelcome, everybody. Welcome!", tokens[0].c_str());
}
TEST(TokenizerTest, ShouldTokenizeTextWithCustomSpecialChars) {
std::vector<std::string> tokens;
Tokenizer("and -some -more", true, false, "en", {'-'}).tokenize(tokens);
ASSERT_EQ(3, tokens.size());
ASSERT_EQ("and", tokens[0]);
ASSERT_EQ("-some", tokens[1]);
ASSERT_EQ("-more", tokens[2]);
}
TEST(TokenizerTest, ShouldTokenizeChineseText) {
std::vector<std::string> tokens;
// traditional -> simplified
Tokenizer("語", false, false, "zh").tokenize(tokens);
ASSERT_EQ(1, tokens.size());
ASSERT_EQ("语", tokens[0]);
tokens.clear();
Tokenizer("說", false, false, "zh").tokenize(tokens);
ASSERT_EQ(1, tokens.size());
ASSERT_EQ("说", tokens[0]);
// tokenize traditional
tokens.clear();
Tokenizer("愛並不會因時間而", false, false, "zh").tokenize(tokens);
ASSERT_EQ(6, tokens.size());
ASSERT_EQ("爱", tokens[0]);
ASSERT_EQ("并不", tokens[1]);
ASSERT_EQ("会", tokens[2]);
ASSERT_EQ("因", tokens[3]);
ASSERT_EQ("时间", tokens[4]);
ASSERT_EQ("而", tokens[5]);
// tokenize simplified
tokens.clear();
Tokenizer("爱并不会因时间而", false, false, "zh").tokenize(tokens);
ASSERT_EQ(6, tokens.size());
ASSERT_EQ("爱", tokens[0]);
ASSERT_EQ("并不", tokens[1]);
ASSERT_EQ("会", tokens[2]);
ASSERT_EQ("因", tokens[3]);
ASSERT_EQ("时间", tokens[4]);
ASSERT_EQ("而", tokens[5]);
// with token_separators
tokens.clear();
Tokenizer("很久以前,傳說在臺中北屯的一個地方", false, false, "zh").tokenize(tokens);
ASSERT_EQ(10, tokens.size());
ASSERT_EQ("很久", tokens[0]);
ASSERT_EQ("以前", tokens[1]);
ASSERT_EQ("传说", tokens[2]);
ASSERT_EQ("在", tokens[3]);
ASSERT_EQ("台中", tokens[4]);
ASSERT_EQ("北", tokens[5]);
ASSERT_EQ("屯", tokens[6]);
ASSERT_EQ("的", tokens[7]);
ASSERT_EQ("一个", tokens[8]);
ASSERT_EQ("地方", tokens[9]);
tokens.clear();
Tokenizer("朵雲──海", false, false, "zh").tokenize(tokens);
ASSERT_EQ(3, tokens.size());
ASSERT_EQ("朵", tokens[0]);
ASSERT_EQ("云", tokens[1]);
ASSERT_EQ("海", tokens[2]);
tokens.clear();
Tokenizer("山丘上。媽媽", false, false, "zh").tokenize(tokens);
ASSERT_EQ(3, tokens.size());
ASSERT_EQ("山丘", tokens[0]);
ASSERT_EQ("上", tokens[1]);
ASSERT_EQ("妈妈", tokens[2]);
}
TEST(TokenizerTest, ShouldTokenizeLocaleText) {
std::vector<std::string> tokens;
tokens.clear();
std::string str = "ความเหลื่อมล้ำ";
Tokenizer(str, true, false, "th").tokenize(tokens);
//ASSERT_EQ(2, tokens.size());
tokens.clear();
str = "เหลื่";
Tokenizer(str, false, false, "th").tokenize(tokens);
//ASSERT_EQ(1, tokens.size());
tokens.clear();
str = "จิ้งจอกสีน้ำตาลด่วน";
Tokenizer(str, true, false, "th").tokenize(tokens);
ASSERT_EQ(4, tokens.size());
ASSERT_EQ("จิ้งจอก", tokens[0]);
ASSERT_EQ("สี", tokens[1]);
ASSERT_EQ("น้ําตาล", tokens[2]);
ASSERT_EQ("ด่วน", tokens[3]);
tokens.clear();
str = "น. วันที่ 31 มี.ค.";
Tokenizer(str, false, false, "th").tokenize(tokens);
ASSERT_EQ(5, tokens.size());
ASSERT_EQ("น", tokens[0]);
ASSERT_EQ("วัน", tokens[1]);
ASSERT_EQ("ที่", tokens[2]);
ASSERT_EQ("31", tokens[3]);
ASSERT_EQ("มีค", tokens[4]);
tokens.clear();
str = "12345_678";
Tokenizer(str, false, false, "th").tokenize(tokens);
ASSERT_EQ(1, tokens.size());
ASSERT_EQ("12345678", tokens[0]);
tokens.clear();
Tokenizer("Odd Thomas", false, false, "en").tokenize(tokens);
ASSERT_EQ(2, tokens.size());
ASSERT_EQ("Odd", tokens[0]);
ASSERT_EQ("Thomas", tokens[1]);
// korean
tokens.clear();
Tokenizer("경승지·산악·협곡", false, false, "ko").tokenize(tokens);
ASSERT_EQ(3, tokens.size());
ASSERT_EQ("경승지", tokens[0]);
ASSERT_EQ("산악", tokens[1]);
ASSERT_EQ("협곡", tokens[2]);
tokens.clear();
Tokenizer("안녕은하철도999극장판", false, false, "ko").tokenize(tokens);
ASSERT_EQ(3, tokens.size());
ASSERT_EQ("안녕은하철도", tokens[0]);
ASSERT_EQ("999", tokens[1]);
ASSERT_EQ("극장판", tokens[2]);
// japanese
tokens.clear();
Tokenizer("退屈", true, false, "ja").tokenize(tokens);
ASSERT_EQ(2, tokens.size());
ASSERT_EQ("た", tokens[0]);
ASSERT_EQ("いくつ", tokens[1]);
tokens.clear();
Tokenizer("魈", true, false, "ja").tokenize(tokens);
ASSERT_EQ(0, tokens.size());
tokens.clear();
Tokenizer("「業果材", true, false, "ja").tokenize(tokens);
ASSERT_EQ(6, tokens.size());
tokens.clear();
Tokenizer("ア退屈であ", true, false, "ja").tokenize(tokens);
ASSERT_EQ(5, tokens.size());
ASSERT_EQ("あ", tokens[0]);
ASSERT_EQ("た", tokens[1]);
ASSERT_EQ("いくつ", tokens[2]);
ASSERT_EQ("で", tokens[3]);
ASSERT_EQ("あ", tokens[4]);
tokens.clear();
Tokenizer("怠惰な犬", true, false, "ja").tokenize(tokens);
ASSERT_EQ(4, tokens.size());
ASSERT_EQ("たい", tokens[0]);
ASSERT_EQ("だ", tokens[1]);
ASSERT_EQ("な", tokens[2]);
ASSERT_EQ("いぬ", tokens[3]);
tokens.clear();
Tokenizer("今ぶり拍治ルツ", true, false, "ja").tokenize(tokens);
ASSERT_EQ(9, tokens.size());
ASSERT_EQ("いま", tokens[0]);
ASSERT_EQ("ぶり", tokens[1]);
ASSERT_EQ("は", tokens[2]);
ASSERT_EQ("く", tokens[3]);
ASSERT_EQ("お", tokens[4]);
ASSERT_EQ("さ", tokens[5]);
ASSERT_EQ("む", tokens[6]);
ASSERT_EQ("る", tokens[7]);
ASSERT_EQ("つ", tokens[8]);
tokens.clear(); // 配管
Tokenizer("配管", true, false, "ja").tokenize(tokens);
// persian containing zwnj
tokens.clear();
Tokenizer("روان\u200Cشناسی", false, false, "fa").tokenize(tokens);
ASSERT_EQ(2, tokens.size());
}
TEST(TokenizerTest, ShouldTokenizeLocaleTextWithEnglishText) {
std::string tstr = "ผู้เขียนมีความสนใจเกี่ยวกับ Discrete Math และการคำนวณโดยทั่วไป";
std::vector<std::string> ttokens;
Tokenizer(tstr, true, false, "th").tokenize(ttokens);
ASSERT_EQ(14, ttokens.size());
ASSERT_EQ("discrete", ttokens[7]);
ASSERT_EQ("math", ttokens[8]);
}
TEST(TokenizerTest, ShouldRemoveGenericPunctuationFromThaiText) {
std::string tstr = "f’’b";
std::vector<std::string> ttokens;
Tokenizer(tstr, true, false, "th").tokenize(ttokens);
ASSERT_EQ(2, ttokens.size());
ASSERT_EQ("f", ttokens[0]);
ASSERT_EQ("b", ttokens[1]);
ttokens.clear();
tstr = "Lay’s";
Tokenizer(tstr, true, false, "th").tokenize(ttokens);
ASSERT_EQ(1, ttokens.size());
ASSERT_EQ("lays", ttokens[0]);
}
TEST(TokenizerTest, ShouldTokenizeLocaleTextWithSwedishText) {
std::string tstr = "södra";
std::vector<std::string> ttokens;
Tokenizer(tstr, true, false, "sv").tokenize(ttokens);
ASSERT_EQ(1, ttokens.size());
ASSERT_EQ("södra", ttokens[0]);
tstr = "Ängelholm";
ttokens.clear();
Tokenizer(tstr, true, false, "sv").tokenize(ttokens);
ASSERT_EQ(1, ttokens.size());
ASSERT_EQ("ängelholm", ttokens[0]);
tstr = "Ängelholm";
ttokens.clear();
Tokenizer(tstr, true, false, "").tokenize(ttokens);
ASSERT_EQ(1, ttokens.size());
ASSERT_EQ("angelholm", ttokens[0]);
}
TEST(TokenizerTest, ShouldTokenizeWithDifferentSymbolConfigs) {
std::string str1 = "ความ-เหลื่อมล้ำ";
// '-' in symbols_to_index: "ความ", "-", "เหลื่อม", "ล้ำ"
// '-' in separators: "ความ", "เหลื่อม", "ล้ำ"
// 'none: "ความ", "เหลื่อม", "ล้ำ"
std::vector<std::string> tokens;
Tokenizer(str1, true, false, "th", {'-'}, {}).tokenize(tokens);
ASSERT_EQ(4, tokens.size());
ASSERT_EQ("ความ", tokens[0]);
ASSERT_EQ("-", tokens[1]);
ASSERT_EQ("เหลื่อม", tokens[2]);
ASSERT_EQ("ล้ํา", tokens[3]);
tokens.clear();
Tokenizer(str1, true, false, "th", {}, {'-'}).tokenize(tokens);
ASSERT_EQ(3, tokens.size());
ASSERT_EQ("ความ", tokens[0]);
ASSERT_EQ("เหลื่อม", tokens[1]);
ASSERT_EQ("ล้ํา", tokens[2]);
tokens.clear();
Tokenizer(str1, true, false, "th", {}, {}).tokenize(tokens);
ASSERT_EQ(3, tokens.size());
ASSERT_EQ("ความ", tokens[0]);
ASSERT_EQ("เหลื่อม", tokens[1]);
ASSERT_EQ("ล้ํา", tokens[2]);
tokens.clear();
Tokenizer("ความ_เห", true, false, "th", {}, {}).tokenize(tokens);
ASSERT_EQ(1, tokens.size());
ASSERT_EQ("ความเห", tokens[0]);
}
| 14,668
|
C++
|
.cpp
| 353
| 33.818697
| 104
| 0.631757
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,735
|
popular_queries_test.cpp
|
typesense_typesense/test/popular_queries_test.cpp
|
#include <gtest/gtest.h>
#include "query_analytics.h"
#include "logger.h"
class PopularQueriesTest : public ::testing::Test {
protected:
virtual void SetUp() {
}
virtual void TearDown() {
}
};
TEST_F(PopularQueriesTest, PrefixQueryCompaction) {
QueryAnalytics pq(10);
auto now_ts_us = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
// compaction when no queries have been entered
pq.compact_user_queries(now_ts_us);
auto queries = pq.get_user_prefix_queries();
ASSERT_TRUE(queries.empty());
// compaction after user has typed first prefix but before compaction interval has happened
pq.add("f", "f", true, "0", now_ts_us+1);
pq.compact_user_queries(now_ts_us+2);
queries = pq.get_user_prefix_queries();
ASSERT_EQ(1, queries.size());
ASSERT_EQ(1, queries.count("0"));
ASSERT_EQ(1, queries["0"].size());
ASSERT_EQ("f", queries["0"][0].query);
ASSERT_EQ(now_ts_us+1, queries["0"][0].timestamp);
ASSERT_EQ(0, pq.get_local_counts().size());
// compaction interval has happened
pq.compact_user_queries(now_ts_us + QueryAnalytics::QUERY_FINALIZATION_INTERVAL_MICROS + 100);
queries = pq.get_user_prefix_queries();
ASSERT_EQ(0, queries.size());
auto local_counts = pq.get_local_counts();
ASSERT_EQ(1, local_counts.size());
ASSERT_EQ(1, local_counts.count("f"));
ASSERT_EQ(1, local_counts["f"]);
// 3 letter search
pq.reset_local_counts();
pq.add("f", "f", true, "0", now_ts_us+1);
pq.add("fo", "fo", true, "0", now_ts_us+2);
pq.add("foo", "foo", true, "0", now_ts_us+3);
pq.compact_user_queries(now_ts_us + QueryAnalytics::QUERY_FINALIZATION_INTERVAL_MICROS + 100);
queries = pq.get_user_prefix_queries();
ASSERT_EQ(0, queries.size());
local_counts = pq.get_local_counts();
ASSERT_EQ(1, local_counts.size());
ASSERT_EQ(1, local_counts.count("foo"));
ASSERT_EQ(1, local_counts["foo"]);
// 3 letter search + start of next search
pq.reset_local_counts();
pq.add("f", "f", true, "0", now_ts_us+1);
pq.add("fo", "fo", true, "0", now_ts_us+2);
pq.add("foo", "foo", true, "0", now_ts_us+3);
pq.add("b", "b", true, "0", now_ts_us + 3 + QueryAnalytics::QUERY_FINALIZATION_INTERVAL_MICROS + 100);
pq.compact_user_queries(now_ts_us + 3 + QueryAnalytics::QUERY_FINALIZATION_INTERVAL_MICROS + 100 + 1);
queries = pq.get_user_prefix_queries();
ASSERT_EQ(1, queries.size());
ASSERT_EQ(1, queries["0"].size());
ASSERT_EQ("b", queries["0"][0].query);
local_counts = pq.get_local_counts();
ASSERT_EQ(1, local_counts.size());
ASSERT_EQ(1, local_counts.count("foo"));
ASSERT_EQ(1, local_counts["foo"]);
// continue with that query
auto prev_ts = now_ts_us + 3 + QueryAnalytics::QUERY_FINALIZATION_INTERVAL_MICROS + 100 + 1;
pq.add("ba", "ba", true, "0", prev_ts+1);
pq.add("bar", "bar", true, "0", prev_ts+2);
pq.compact_user_queries(prev_ts + 2 + QueryAnalytics::QUERY_FINALIZATION_INTERVAL_MICROS + 1);
queries = pq.get_user_prefix_queries();
ASSERT_EQ(0, queries.size());
local_counts = pq.get_local_counts();
ASSERT_EQ(2, local_counts.size());
ASSERT_EQ(1, local_counts.count("bar"));
ASSERT_EQ(1, local_counts["bar"]);
}
| 3,356
|
C++
|
.cpp
| 75
| 39.906667
| 106
| 0.643928
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,736
|
or_iterator_test.cpp
|
typesense_typesense/test/or_iterator_test.cpp
|
#include <gtest/gtest.h>
#include <or_iterator.h>
#include <posting_list.h>
#include <posting.h>
#include <filter_result_iterator.h>
#include "logger.h"
TEST(OrIteratorTest, IntersectTwoListsWith3SubLists) {
std::vector<uint32_t> offsets = {0, 1, 3};
std::vector<std::vector<uint32_t>> plists = {
{0, 2, 3, 20},
{1, 3, 5, 10, 20},
{2, 3, 6, 7, 20}
};
std::vector<posting_list_t*> postings1(plists.size());
for(size_t i = 0; i < plists.size(); i++) {
postings1[i] = new posting_list_t(2);
for(auto n: plists[i]) {
postings1[i]->upsert(n, offsets);
}
}
std::vector<std::vector<uint32_t>> ulists = {
{0, 1, 5, 20},
{1, 2, 7, 11, 15},
{3, 5, 10, 11, 12}
};
std::vector<posting_list_t*> postings2(ulists.size());
for(size_t i = 0; i < ulists.size(); i++) {
postings2[i] = new posting_list_t(2);
for(auto n: ulists[i]) {
postings2[i]->upsert(n, offsets);
}
}
std::vector<posting_list_t::iterator_t> pits1;
for(auto& posting_list: postings1) {
pits1.push_back(posting_list->new_iterator());
}
std::vector<posting_list_t::iterator_t> pits2;
for(auto& posting_list: postings2) {
pits2.push_back(posting_list->new_iterator());
}
or_iterator_t it1(pits1);
or_iterator_t it2(pits2);
std::vector<or_iterator_t> or_its;
or_its.push_back(std::move(it1));
or_its.push_back(std::move(it2));
result_iter_state_t istate;
std::vector<uint32_t> results;
or_iterator_t::intersect(or_its, istate,
[&results](const single_filter_result_t& filter_result, std::vector<or_iterator_t>& its) {
results.push_back(filter_result.seq_id);
});
ASSERT_EQ(8, results.size());
std::vector<uint32_t> expected_results = {0, 1, 2, 3, 5, 7, 10, 20};
for(size_t i = 0; i < expected_results.size(); i++) {
ASSERT_EQ(expected_results[i], results[i]);
}
for(auto p: postings1) {
delete p;
}
for(auto p: postings2) {
delete p;
}
}
TEST(OrIteratorTest, IntersectTwoListsWith4SubLists) {
std::vector<uint32_t> offsets = {0, 1, 3};
std::vector<std::vector<uint32_t>> plists = {
{817, 2099, 2982, 3199, 5456, 6414, 8178, 8284, 8561, 10345, 13662, 14021, 15292},
{9514},
{5758, 13357}
};
std::vector<posting_list_t*> postings1(plists.size());
for(size_t i = 0; i < plists.size(); i++) {
postings1[i] = new posting_list_t(2);
for(auto n: plists[i]) {
postings1[i]->upsert(n, offsets);
}
}
std::vector<std::vector<uint32_t>> ulists = {
{15156},
{242, 403, 431, 449, 469, 470, 471, 474, 476, 522, 538, 616, 684, 690, 789, 797, 841, 961, 970, 981, 1012, 1016, 1073, 1106, 1115, 1153, 1256, 1282, 1291, 1306, 1313, 1317, 1454, 1530, 1555, 1558, 1583, 1594, 1596, 1650, 1652, 1669, 1686, 1718, 1805, 1809, 1811, 1816, 1840, 1854, 1879, 1887, 1939, 1983, 2041, 2049, 2091, 2138, 2152, 2220, 2350, 2409, 2459, 2491, 2507, 2545, 2687, 2740, 2754, 2789, 2804, 2907, 2933, 2935, 2964, 2970, 3024, 3084, 3126, 3149, 3177, 3199, 3227, 3271, 3300, 3314, 3403, 3547, 3575, 3587, 3601, 3692, 3804, 3833, 3834, 3928, 4019, 4022, 4045, 4086, 4135, 4145, 4232, 4444, 4451, 4460, 4467, 4578, 4588, 4632, 4709, 4721, 4757, 4777, 4833, 4880, 4927, 4996, 5117, 5133, 5156, 5158, 5288, 5311, 5361, 5558, 5649, 5654, 5658, 5666, 5794, 5818, 5829, 5852, 5857, 5859, 5893, 5909, 5959, 5970, 5983, 5986, 6009, 6016, 6020, 6189, 6192, 6202, 6308, 6326, 6365, 6402, 6414, 6416, 6433, 6448, 6454, 6460, 6583, 6589, 6702, 7006, 7010, 7273, 7335, 7340, 7677, 7678, 7722, 7775, 7807, 7861, 7903, 7950, 7975, 8123, 8201, 8288, 8359, 8373, 8392, 8497, 8502, 8566, 8613, 8635, 8720, 8827, 8847, 8873, 9079, 9374, 9394, 9404, 9486, 9587, 9796, 9859, 9958, 10054, 10101, 10105, 10120, 10135, 10180, 10234, 10246, 10299, 10400, 10777, 11213, 11361, 11776, 11888, 12054, 12133, 12506, 12957, 12959, 12985, 13046, 13054, 13189, 13299, 13316, 13324, 13377, 13657, 13734, 14563, 14651, 14666, 14681, 14688, 14700, 14729, 14849, 14983, 14985, 15003, 15046, 15049, 15052, 15056, 15077, 15156, 15249, 15558, 15583, 15725, 15761, 15770, 15810, 16278, 16588, 17123, 17223,},
{4, 235, 257, 261, 379, 394, 403, 449, 469, 621, 750, 758, 789, 790, 806, 820, 889, 910, 912, 921, 961, 992, 1000, 1005, 1012, 1036, 1153, 1155, 1176, 1394, 1407, 1412, 1450, 1454, 1475, 1486, 1594, 1633, 1650, 1654, 1669, 1675, 1686, 1766, 1871, 1879, 1939, 1983, 2023, 2056, 2197, 2226, 2255, 2332, 2459, 2491, 2507, 2513, 2526, 2538, 2545, 2546, 2567, 2591, 2592, 2749, 2825, 2834, 2843, 2849, 2920, 3013, 3024, 3061, 3062, 3183, 3219, 3319, 3503, 3657, 3667, 3692, 3728, 3740, 3751, 3804, 3807, 3860, 4022, 4112, 4120, 4123, 4135, 4262, 4343, 4375, 4388, 4444, 4467, 4588, 4762, 4829, 4927, 5107, 5109, 5117, 5241, 5288, 5411, 5558, 5654, 5675, 5710, 5744, 5760, 5778, 5781, 5823, 5893, 5974, 5986, 6000, 6009, 6012, 6016, 6067, 6114, 6192, 6222, 6253, 6259, 6287, 6308, 6337, 6338, 6349, 6384, 6387, 6416, 6433, 6442, 6454, 6476, 6576, 6589, 6619, 6719, 6727, 6875, 7084, 7221, 7335, 7340, 7355, 7619, 7670, 7775, 7781, 7861, 7961, 8000, 8017, 8191, 8268, 8363, 8412, 8484, 8737, 8833, 8872, 9121, 9125, 9311, 9322, 9359, 9413, 9491, 9532, 9694, 9735, 9895, 9911, 9958, 10105, 10120, 10180, 10299, 10302, 10318, 10327, 10372, 10375, 10378, 10391, 10394, 10400, 10458, 10487, 10497, 10556, 10564, 10569, 10631, 10657, 10662, 10777, 10781, 10827, 10872, 10873, 10923, 10961, 10975, 11043, 11224, 11702, 11776, 12025, 12149, 12318, 12414, 12565, 12734, 12854, 12945, 12971, 12977, 12997, 13008, 13032, 13054, 13064, 13103, 13143, 13170, 13205, 13209, 13220, 13224, 13255, 13299, 13348, 13357, 13377, 13381, 13385, 13516, 13537, 13588, 13626, 13631, 13643, 13669, 13700, 13752, 13788, 13813, 13817, 13914, 13935, 13974, 13999, 14111, 14236, 14544, 14549, 14627, 14688, 14712, 14985, 15012, 15137, 15148, 15155, 15297, 15302, 15386, 15388, 15416, 15418, 15576, 15583, 15584, 15608, 15636, 15679, 15685, 15686, 15690, 15693, 15742, 15753, 15756, 15762, 15783, 15805, 15810, 15819, 15906, 15910, 16093, 16232, 16278, 16479, 17027, 17123, 17223,}
};
// both lists contain common IDs: 3199, 6414, 13357
std::vector<posting_list_t*> postings2(ulists.size());
for(size_t i = 0; i < ulists.size(); i++) {
postings2[i] = new posting_list_t(2);
for(auto n: ulists[i]) {
postings2[i]->upsert(n, offsets);
}
}
std::vector<posting_list_t::iterator_t> pits1;
for(auto& posting_list: postings1) {
pits1.push_back(posting_list->new_iterator());
}
std::vector<posting_list_t::iterator_t> pits2;
for(auto& posting_list: postings2) {
pits2.push_back(posting_list->new_iterator());
}
or_iterator_t it1(pits1);
or_iterator_t it2(pits2);
std::vector<or_iterator_t> or_its;
or_its.push_back(std::move(it1));
or_its.push_back(std::move(it2));
result_iter_state_t istate;
std::vector<uint32_t> results;
or_iterator_t::intersect(or_its, istate,
[&results](const single_filter_result_t& filter_result, std::vector<or_iterator_t>& its) {
results.push_back(filter_result.seq_id);
});
std::vector<uint32_t> expected_results = {3199, 6414, 13357};
ASSERT_EQ(expected_results.size(), results.size());
for(size_t i = 0; i < expected_results.size(); i++) {
ASSERT_EQ(expected_results[i], results[i]);
}
for(auto p: postings1) {
delete p;
}
for(auto p: postings2) {
delete p;
}
}
TEST(OrIteratorTest, IntersectAndFilterThreeIts) {
std::vector<uint32_t> offsets = {0, 1, 3};
std::vector<std::vector<uint32_t>> id_list = {
{4207, 29159, 47182, 47250, 47337, 48518, 99820,},
{62, 330, 367, 4124, 4207, 4242, 4418, 28740, 29099, 29159, 29284, 40795, 43556, 46779, 47182, 47250, 47322, 48494, 48518, 48633, 98813, 98821, 99069, 99368, 99533, 99670, 99820, 99888, 99973,},
{723, 1504, 29038, 29164, 29390, 30890, 34743, 35067, 36466, 40268, 40965, 42161, 43425, 45188, 47326, 47443, 49319, 53043, 58436, 58774, 61123, 70973, 71393, 81575, 82323, 88301, 88502, 88594, 88690, 88951, 90662, 91016, 91915, 92069, 92844, 99820,}
};
posting_list_t* p1 = new posting_list_t(256);
posting_list_t* p2 = new posting_list_t(256);
posting_list_t* p3 = new posting_list_t(256);
for(auto id: id_list[0]) {
p1->upsert(id, offsets);
}
for(auto id: id_list[1]) {
p2->upsert(id, offsets);
}
for(auto id: id_list[2]) {
p3->upsert(id, offsets);
}
std::vector<posting_list_t::iterator_t> pits1;
std::vector<posting_list_t::iterator_t> pits2;
std::vector<posting_list_t::iterator_t> pits3;
pits1.push_back(p1->new_iterator());
pits2.push_back(p2->new_iterator());
pits3.push_back(p3->new_iterator());
or_iterator_t it1(pits1);
or_iterator_t it2(pits2);
or_iterator_t it3(pits3);
std::vector<or_iterator_t> or_its;
or_its.push_back(std::move(it1));
or_its.push_back(std::move(it2));
or_its.push_back(std::move(it3));
std::vector<uint32_t> filter_ids = {44424, 44425, 44447, 99820, 99834, 99854, 99859, 99963};
result_iter_state_t istate(nullptr, 0, &filter_ids[0], filter_ids.size());
std::vector<uint32_t> results;
or_iterator_t::intersect(or_its, istate,
[&results](const single_filter_result_t& filter_result, std::vector<or_iterator_t>& its) {
results.push_back(filter_result.seq_id);
});
ASSERT_EQ(1, results.size());
delete p1;
delete p2;
delete p3;
}
TEST(OrIteratorTest, IntersectAndFilterTwoIts) {
std::vector<uint32_t> offsets = {0, 1, 3};
std::vector<std::vector<uint32_t>> id_list = {
{4207, 29159, 47182, 47250, 47337, 48518, 99820,},
{62, 330, 367, 4124, 4207, 4242, 4418, 28740, 29099, 29159, 29284, 40795, 43556, 46779, 47182, 47250, 47322, 48494, 48518, 48633, 98813, 98821, 99069, 99368, 99533, 99670, 99820, 99888, 99973,},
{723, 1504, 29038, 29164, 29390, 30890, 34743, 35067, 36466, 40268, 40965, 42161, 43425, 45188, 47326, 47443, 49319, 53043, 58436, 58774, 61123, 70973, 71393, 81575, 82323, 88301, 88502, 88594, 88690, 88951, 90662, 91016, 91915, 92069, 92844, 99820,}
};
posting_list_t* p1 = new posting_list_t(256);
posting_list_t* p2 = new posting_list_t(256);
for(auto id: id_list[0]) {
p1->upsert(id, offsets);
}
for(auto id: id_list[1]) {
p2->upsert(id, offsets);
}
std::vector<posting_list_t::iterator_t> pits1;
std::vector<posting_list_t::iterator_t> pits2;
pits1.push_back(p1->new_iterator());
pits2.push_back(p2->new_iterator());
or_iterator_t it1(pits1);
or_iterator_t it2(pits2);
std::vector<or_iterator_t> or_its;
or_its.push_back(std::move(it1));
or_its.push_back(std::move(it2));
std::vector<uint32_t> filter_ids = {44424, 44425, 44447, 99820, 99834, 99854, 99859, 99963};
result_iter_state_t istate(nullptr, 0, &filter_ids[0], filter_ids.size());
std::vector<uint32_t> results;
or_iterator_t::intersect(or_its, istate,
[&results](const single_filter_result_t& filter_result, std::vector<or_iterator_t>& its) {
results.push_back(filter_result.seq_id);
});
ASSERT_EQ(1, results.size());
delete p1;
delete p2;
}
TEST(OrIteratorTest, ContainsAtLeastOne) {
std::vector<uint32_t> ids = {1, 3, 5};
std::vector<or_iterator_t> or_iterators;
std::vector<posting_list_t*> expanded_plists;
posting_list_t p_list1(2);
for (const auto &id: ids) {
p_list1.upsert(id, {1, 2, 3});
}
void* raw_pointer = &p_list1;
posting_t::get_or_iterator(raw_pointer, or_iterators, expanded_plists);
ASSERT_EQ(1, or_iterators.size());
posting_list_t p_list2(2);
ids = {2, 4};
for (const auto &id: ids) {
p_list2.upsert(id, {1, 2, 3});
}
raw_pointer = &p_list2;
posting_t::get_or_iterator(raw_pointer, or_iterators, expanded_plists);
ASSERT_EQ(2, or_iterators.size());
auto found = or_iterator_t::contains_atleast_one(or_iterators,
result_iter_state_t(nullptr, 0, nullptr));
ASSERT_FALSE(found);
or_iterators.clear();
posting_list_t p_list3(2);
ids = {1, 2, 4, 5};
for (const auto &id: ids) {
p_list3.upsert(id, {1, 2, 3});
}
raw_pointer = &p_list1;
posting_t::get_or_iterator(raw_pointer, or_iterators, expanded_plists);
raw_pointer = &p_list3;
posting_t::get_or_iterator(raw_pointer, or_iterators, expanded_plists);
ASSERT_EQ(2, or_iterators.size());
found = or_iterator_t::contains_atleast_one(or_iterators,
result_iter_state_t(nullptr, 0, nullptr));
ASSERT_TRUE(found);
ASSERT_EQ(1, or_iterators.front().id()); // Match found on id 1
or_iterators.clear();
raw_pointer = &p_list1;
posting_t::get_or_iterator(raw_pointer, or_iterators, expanded_plists);
raw_pointer = &p_list3;
posting_t::get_or_iterator(raw_pointer, or_iterators, expanded_plists);
ASSERT_EQ(2, or_iterators.size());
auto filter_ids = new uint32_t[1]{5};
auto filter_iterator = new filter_result_iterator_t(filter_ids, 1);
found = or_iterator_t::contains_atleast_one(or_iterators,
result_iter_state_t(nullptr, 0, filter_iterator));
ASSERT_TRUE(found);
ASSERT_EQ(5, or_iterators.front().id()); // Match found on id 5
delete filter_iterator;
}
| 13,864
|
C++
|
.cpp
| 251
| 48.131474
| 1,951
| 0.626496
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,737
|
facet_index_test.cpp
|
typesense_typesense/test/facet_index_test.cpp
|
#include <gtest/gtest.h>
#include "facet_index.h"
TEST(FacetIndexTest, FacetValueDeletionString) {
facet_index_t findex;
findex.initialize("brand");
std::unordered_map<facet_value_id_t, std::vector<uint32_t>, facet_value_id_t::Hash> fvalue_to_seq_ids;
std::unordered_map<uint32_t, std::vector<facet_value_id_t>> seq_id_to_fvalues;
facet_value_id_t nike("nike", 1);
fvalue_to_seq_ids[nike] = {0, 1, 2};
seq_id_to_fvalues[0] = {nike};
seq_id_to_fvalues[1] = {nike};
seq_id_to_fvalues[2] = {nike};
field brandf("brand", field_types::STRING, true);
nlohmann::json doc;
doc["brand"] = "nike";
findex.insert("brand", fvalue_to_seq_ids, seq_id_to_fvalues, true);
ASSERT_EQ(3, findex.facet_val_num_ids("brand", "nike"));
findex.remove(doc, brandf, 0);
findex.remove(doc, brandf, 1);
ASSERT_EQ(1, findex.facet_val_num_ids("brand", "nike"));
findex.remove(doc, brandf, 2);
ASSERT_FALSE(findex.facet_value_exists("brand", "nike"));
}
TEST(FacetIndexTest, HighCardinalityCheck) {
facet_index_t findex;
for(size_t i = 0; i < 101; i++) {
findex.initialize("field_" + std::to_string(i));
}
std::unordered_map<facet_value_id_t, std::vector<uint32_t>, facet_value_id_t::Hash> fvalue_to_seq_ids;
std::unordered_map<uint32_t, std::vector<facet_value_id_t>> seq_id_to_fvalues;
facet_value_id_t nike("nike", 1);
fvalue_to_seq_ids[nike] = {0, 1, 2};
seq_id_to_fvalues[0] = {nike};
seq_id_to_fvalues[1] = {nike};
seq_id_to_fvalues[2] = {nike};
findex.insert("field_1", fvalue_to_seq_ids, seq_id_to_fvalues, true);
ASSERT_EQ(3, findex.facet_val_num_ids("field_1", "nike"));
findex.check_for_high_cardinality("field_1", 20000);
ASSERT_TRUE(findex.facet_value_exists("field_1", "nike"));
ASSERT_EQ(0, findex.facet_val_num_ids("field_1", "nike"));
}
TEST(FacetIndexTest, FacetValueDeletionOfLongString) {
facet_index_t findex;
findex.initialize("brand");
std::unordered_map<facet_value_id_t, std::vector<uint32_t>, facet_value_id_t::Hash> fvalue_to_seq_ids;
std::unordered_map<uint32_t, std::vector<facet_value_id_t>> seq_id_to_fvalues;
std::string longval;
for(size_t i = 0; i < 200; i++) {
longval += "a";
}
facet_value_id_t longfval(longval.substr(0, 100), 1);
fvalue_to_seq_ids[longfval] = {0, 1, 2};
seq_id_to_fvalues[0] = {longfval};
seq_id_to_fvalues[1] = {longfval};
seq_id_to_fvalues[2] = {longfval};
field brandf("brand", field_types::STRING, true);
nlohmann::json doc;
doc["brand"] = longval;
findex.insert("brand", fvalue_to_seq_ids, seq_id_to_fvalues, true);
ASSERT_EQ(3, findex.facet_val_num_ids("brand", longval.substr(0, 100)));
findex.remove(doc, brandf, 0);
findex.remove(doc, brandf, 1);
ASSERT_EQ(1, findex.facet_val_num_ids("brand", longval.substr(0, 100)));
findex.remove(doc, brandf, 2);
ASSERT_FALSE(findex.facet_value_exists("brand", longval.substr(0, 100)));
}
TEST(FacetIndexTest, FacetValueDeletionFloat) {
facet_index_t findex;
findex.initialize("price");
std::unordered_map<facet_value_id_t, std::vector<uint32_t>, facet_value_id_t::Hash> fvalue_to_seq_ids;
std::unordered_map<uint32_t, std::vector<facet_value_id_t>> seq_id_to_fvalues;
facet_value_id_t price1("99.95", 1);
fvalue_to_seq_ids[price1] = {0, 1, 2};
seq_id_to_fvalues[0] = {price1};
seq_id_to_fvalues[1] = {price1};
seq_id_to_fvalues[2] = {price1};
field pricef("price", field_types::FLOAT, true);
nlohmann::json doc;
doc["price"] = 99.95;
findex.insert("price", fvalue_to_seq_ids, seq_id_to_fvalues, true);
ASSERT_EQ(3, findex.facet_val_num_ids("price", "99.95"));
findex.remove(doc, pricef, 0);
findex.remove(doc, pricef, 1);
ASSERT_EQ(1, findex.facet_val_num_ids("price", "99.95"));
findex.remove(doc, pricef, 2);
ASSERT_FALSE(findex.facet_value_exists("price", "99.95"));
}
| 3,993
|
C++
|
.cpp
| 87
| 40.885057
| 106
| 0.655048
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,738
|
posting_list_test.cpp
|
typesense_typesense/test/posting_list_test.cpp
|
#include <gtest/gtest.h>
#include "posting.h"
#include "array_utils.h"
#include <chrono>
#include <vector>
class PostingListTest : public ::testing::Test {
protected:
ThreadPool* pool;
virtual void SetUp() {
pool = new ThreadPool(4);
}
virtual void TearDown() {
pool->shutdown();
delete pool;
}
};
TEST_F(PostingListTest, Insert) {
std::vector<uint32_t> offsets = {0, 1, 3};
posting_list_t pl(5);
// insert elements sequentially
for(size_t i = 0; i < 15; i++) {
pl.upsert(i, offsets);
}
posting_list_t::block_t* root = pl.get_root();
ASSERT_EQ(5, root->ids.getLength());
ASSERT_EQ(5, root->next->ids.getLength());
ASSERT_EQ(5, root->next->next->ids.getLength());
ASSERT_EQ(root->next->next->next, nullptr);
ASSERT_EQ(3, pl.num_blocks());
ASSERT_EQ(15, pl.num_ids());
ASSERT_EQ(root, pl.block_of(4));
ASSERT_EQ(root->next, pl.block_of(9));
ASSERT_EQ(root->next->next, pl.block_of(14));
// insert alternate values
posting_list_t pl2(5);
for(size_t i = 0; i < 15; i+=2) {
// [0, 2, 4, 6, 8], [10, 12, 14]
pl2.upsert(i, offsets);
}
root = pl2.get_root();
ASSERT_EQ(5, root->ids.getLength());
ASSERT_EQ(3, root->next->ids.getLength());
ASSERT_EQ(root->next->next, nullptr);
ASSERT_EQ(2, pl2.num_blocks());
ASSERT_EQ(8, pl2.num_ids());
ASSERT_EQ(root, pl2.block_of(8));
ASSERT_EQ(root->next, pl2.block_of(14));
// insert in the middle
// case 1
posting_list_t pl3(5);
for(size_t i = 0; i < 5; i++) {
pl3.upsert(i, offsets);
}
pl3.upsert(6, offsets);
pl3.upsert(8, offsets);
pl3.upsert(9, offsets);
pl3.upsert(10, offsets);
pl3.upsert(12, offsets);
ASSERT_EQ(10, pl3.num_ids());
// [0,1,2,3,4], [6,8,9,10,12]
pl3.upsert(5, offsets);
ASSERT_EQ(3, pl3.num_blocks());
ASSERT_EQ(11, pl3.num_ids());
ASSERT_EQ(5, pl3.get_root()->ids.getLength());
ASSERT_EQ(3, pl3.get_root()->next->ids.getLength());
ASSERT_EQ(8, pl3.get_root()->next->ids.last());
ASSERT_EQ(3, pl3.get_root()->next->next->ids.getLength());
ASSERT_EQ(12, pl3.get_root()->next->next->ids.last());
for(size_t i = 0; i < pl3.get_root()->next->offset_index.getLength(); i++) {
ASSERT_EQ(i * 3, pl3.get_root()->next->offset_index.at(i));
}
for(size_t i = 0; i < pl3.get_root()->next->offsets.getLength(); i++) {
ASSERT_EQ(offsets[i % 3], pl3.get_root()->next->offsets.at(i));
}
// case 2
posting_list_t pl4(5);
for(size_t i = 0; i < 5; i++) {
pl4.upsert(i, offsets);
}
pl4.upsert(6, offsets);
pl4.upsert(8, offsets);
pl4.upsert(9, offsets);
pl4.upsert(10, offsets);
pl4.upsert(12, offsets);
// [0,1,2,3,4], [6,8,9,10,12]
pl4.upsert(11, offsets);
ASSERT_EQ(3, pl4.num_blocks());
ASSERT_EQ(11, pl4.num_ids());
ASSERT_EQ(5, pl4.get_root()->ids.getLength());
ASSERT_EQ(3, pl4.get_root()->next->ids.getLength());
ASSERT_EQ(9, pl4.get_root()->next->ids.last());
ASSERT_EQ(3, pl4.get_root()->next->next->ids.getLength());
ASSERT_EQ(12, pl4.get_root()->next->next->ids.last());
for(size_t i = 0; i < pl4.get_root()->next->offset_index.getLength(); i++) {
ASSERT_EQ(i * 3, pl4.get_root()->next->offset_index.at(i));
}
for(size_t i = 0; i < pl4.get_root()->next->offsets.getLength(); i++) {
ASSERT_EQ(offsets[i % 3], pl4.get_root()->next->offsets.at(i));
}
}
TEST_F(PostingListTest, InsertInMiddle) {
posting_list_t pl(3);
pl.upsert(1, {1});
pl.upsert(3, {3});
pl.upsert(2, {2});
ASSERT_EQ(1, pl.get_root()->ids.at(0));
ASSERT_EQ(2, pl.get_root()->ids.at(1));
ASSERT_EQ(3, pl.get_root()->ids.at(2));
ASSERT_EQ(0, pl.get_root()->offset_index.at(0));
ASSERT_EQ(1, pl.get_root()->offset_index.at(1));
ASSERT_EQ(2, pl.get_root()->offset_index.at(2));
ASSERT_EQ(1, pl.get_root()->offsets.at(0));
ASSERT_EQ(2, pl.get_root()->offsets.at(1));
ASSERT_EQ(3, pl.get_root()->offsets.at(2));
}
TEST_F(PostingListTest, InplaceUpserts) {
std::vector<uint32_t> offsets = {1, 2, 3};
posting_list_t pl(5);
pl.upsert(2, offsets);
pl.upsert(5, offsets);
pl.upsert(7, offsets);
ASSERT_EQ(1, pl.num_blocks());
ASSERT_EQ(3, pl.num_ids());
ASSERT_EQ(3, pl.get_root()->ids.getLength());
ASSERT_EQ(9, pl.get_root()->offsets.getLength());
// update starting ID with same length of offsets
pl.upsert(2, {1, 2, 4});
ASSERT_EQ(1, pl.num_blocks());
ASSERT_EQ(3, pl.num_ids());
ASSERT_EQ(3, pl.get_root()->ids.getLength());
ASSERT_EQ(9, pl.get_root()->offsets.getLength());
ASSERT_EQ(1, pl.get_root()->offsets.at(0));
ASSERT_EQ(2, pl.get_root()->offsets.at(1));
ASSERT_EQ(4, pl.get_root()->offsets.at(2));
ASSERT_EQ(4, pl.get_root()->offsets.getMax());
ASSERT_EQ(1, pl.get_root()->offsets.getMin());
ASSERT_EQ(0, pl.get_root()->offset_index.at(0));
ASSERT_EQ(3, pl.get_root()->offset_index.at(1));
ASSERT_EQ(6, pl.get_root()->offset_index.at(2));
// update starting ID with smaller number of offsets
pl.upsert(2, {5, 7});
ASSERT_EQ(1, pl.num_blocks());
ASSERT_EQ(3, pl.num_ids());
ASSERT_EQ(3, pl.get_root()->ids.getLength());
ASSERT_EQ(8, pl.get_root()->offsets.getLength());
ASSERT_EQ(5, pl.get_root()->offsets.at(0));
ASSERT_EQ(7, pl.get_root()->offsets.at(1));
ASSERT_EQ(1, pl.get_root()->offsets.at(2));
ASSERT_EQ(7, pl.get_root()->offsets.getMax());
ASSERT_EQ(1, pl.get_root()->offsets.getMin());
ASSERT_EQ(0, pl.get_root()->offset_index.at(0));
ASSERT_EQ(2, pl.get_root()->offset_index.at(1));
ASSERT_EQ(5, pl.get_root()->offset_index.at(2));
// update starting ID with larger number of offsets
pl.upsert(2, {0, 2, 8});
ASSERT_EQ(3, pl.num_ids());
ASSERT_EQ(1, pl.num_blocks());
ASSERT_EQ(3, pl.get_root()->ids.getLength());
ASSERT_EQ(9, pl.get_root()->offsets.getLength());
ASSERT_EQ(0, pl.get_root()->offsets.at(0));
ASSERT_EQ(2, pl.get_root()->offsets.at(1));
ASSERT_EQ(8, pl.get_root()->offsets.at(2));
ASSERT_EQ(1, pl.get_root()->offsets.at(3));
ASSERT_EQ(8, pl.get_root()->offsets.getMax());
ASSERT_EQ(0, pl.get_root()->offsets.getMin());
ASSERT_EQ(0, pl.get_root()->offset_index.at(0));
ASSERT_EQ(3, pl.get_root()->offset_index.at(1));
ASSERT_EQ(6, pl.get_root()->offset_index.at(2));
// update middle ID with smaller number of offsets
pl.upsert(5, {1, 10});
ASSERT_EQ(3, pl.num_ids());
ASSERT_EQ(1, pl.num_blocks());
ASSERT_EQ(3, pl.get_root()->ids.getLength());
ASSERT_EQ(8, pl.get_root()->offsets.getLength());
ASSERT_EQ(0, pl.get_root()->offsets.at(0));
ASSERT_EQ(2, pl.get_root()->offsets.at(1));
ASSERT_EQ(8, pl.get_root()->offsets.at(2));
ASSERT_EQ(1, pl.get_root()->offsets.at(3));
ASSERT_EQ(10, pl.get_root()->offsets.at(4));
ASSERT_EQ(10, pl.get_root()->offsets.getMax());
ASSERT_EQ(0, pl.get_root()->offsets.getMin());
ASSERT_EQ(0, pl.get_root()->offset_index.at(0));
ASSERT_EQ(3, pl.get_root()->offset_index.at(1));
ASSERT_EQ(5, pl.get_root()->offset_index.at(2));
// update middle ID with larger number of offsets
pl.upsert(5, {2, 4, 12});
ASSERT_EQ(1, pl.num_blocks());
ASSERT_EQ(3, pl.num_ids());
ASSERT_EQ(3, pl.get_root()->ids.getLength());
ASSERT_EQ(9, pl.get_root()->offsets.getLength());
ASSERT_EQ(0, pl.get_root()->offsets.at(0));
ASSERT_EQ(2, pl.get_root()->offsets.at(1));
ASSERT_EQ(8, pl.get_root()->offsets.at(2));
ASSERT_EQ(2, pl.get_root()->offsets.at(3));
ASSERT_EQ(4, pl.get_root()->offsets.at(4));
ASSERT_EQ(12, pl.get_root()->offsets.at(5));
ASSERT_EQ(1, pl.get_root()->offsets.at(6));
ASSERT_EQ(2, pl.get_root()->offsets.at(7));
ASSERT_EQ(3, pl.get_root()->offsets.at(8));
ASSERT_EQ(12, pl.get_root()->offsets.getMax());
ASSERT_EQ(0, pl.get_root()->offsets.getMin());
ASSERT_EQ(0, pl.get_root()->offset_index.at(0));
ASSERT_EQ(3, pl.get_root()->offset_index.at(1));
ASSERT_EQ(6, pl.get_root()->offset_index.at(2));
// update last ID with smaller number of offsets
pl.upsert(7, {3});
ASSERT_EQ(1, pl.num_blocks());
ASSERT_EQ(3, pl.num_ids());
ASSERT_EQ(3, pl.get_root()->ids.getLength());
ASSERT_EQ(7, pl.get_root()->offsets.getLength());
ASSERT_EQ(0, pl.get_root()->offsets.at(0));
ASSERT_EQ(2, pl.get_root()->offsets.at(1));
ASSERT_EQ(8, pl.get_root()->offsets.at(2));
ASSERT_EQ(2, pl.get_root()->offsets.at(3));
ASSERT_EQ(4, pl.get_root()->offsets.at(4));
ASSERT_EQ(12, pl.get_root()->offsets.at(5));
ASSERT_EQ(3, pl.get_root()->offsets.at(6));
ASSERT_EQ(12, pl.get_root()->offsets.getMax());
ASSERT_EQ(0, pl.get_root()->offsets.getMin());
ASSERT_EQ(0, pl.get_root()->offset_index.at(0));
ASSERT_EQ(3, pl.get_root()->offset_index.at(1));
ASSERT_EQ(6, pl.get_root()->offset_index.at(2));
// update last ID with larger number of offsets
pl.upsert(7, {5, 20});
ASSERT_EQ(1, pl.num_blocks());
ASSERT_EQ(3, pl.num_ids());
ASSERT_EQ(3, pl.get_root()->ids.getLength());
ASSERT_EQ(8, pl.get_root()->offsets.getLength());
ASSERT_EQ(0, pl.get_root()->offsets.at(0));
ASSERT_EQ(2, pl.get_root()->offsets.at(1));
ASSERT_EQ(8, pl.get_root()->offsets.at(2));
ASSERT_EQ(2, pl.get_root()->offsets.at(3));
ASSERT_EQ(4, pl.get_root()->offsets.at(4));
ASSERT_EQ(12, pl.get_root()->offsets.at(5));
ASSERT_EQ(5, pl.get_root()->offsets.at(6));
ASSERT_EQ(20, pl.get_root()->offsets.at(7));
ASSERT_EQ(20, pl.get_root()->offsets.getMax());
ASSERT_EQ(0, pl.get_root()->offsets.getMin());
ASSERT_EQ(0, pl.get_root()->offset_index.at(0));
ASSERT_EQ(3, pl.get_root()->offset_index.at(1));
ASSERT_EQ(6, pl.get_root()->offset_index.at(2));
}
TEST_F(PostingListTest, RemovalsOnFirstBlock) {
std::vector<uint32_t> offsets = {0, 1, 3};
posting_list_t pl(5);
ASSERT_EQ(0, pl.num_blocks());
ASSERT_EQ(0, pl.num_ids());
// try to erase when posting list is empty
pl.erase(0);
ASSERT_FALSE(pl.contains(0));
ASSERT_EQ(0, pl.num_ids());
ASSERT_EQ(0, pl.num_blocks());
// insert a single element and erase it
pl.upsert(0, offsets);
ASSERT_EQ(1, pl.num_blocks());
ASSERT_EQ(1, pl.num_ids());
pl.erase(0);
ASSERT_EQ(0, pl.num_blocks());
ASSERT_EQ(0, pl.num_ids());
ASSERT_EQ(0, pl.get_root()->ids.getLength());
ASSERT_EQ(0, pl.get_root()->offset_index.getLength());
ASSERT_EQ(0, pl.get_root()->offsets.getLength());
// insert until one past max block size
for(size_t i = 0; i < 6; i++) {
pl.upsert(i, offsets);
}
ASSERT_EQ(2, pl.num_blocks());
ASSERT_EQ(6, pl.num_ids());
ASSERT_TRUE(pl.contains(2));
ASSERT_TRUE(pl.contains(5));
ASSERT_FALSE(pl.contains(6));
ASSERT_FALSE(pl.contains(1000));
// delete non-existing element
pl.erase(1000);
ASSERT_EQ(6, pl.num_ids());
// delete elements from first block: blocks should not be merged until it falls below 50% occupancy
pl.erase(1);
ASSERT_EQ(2, pl.num_blocks());
ASSERT_EQ(5, pl.num_ids());
// [0, 2, 3, 4], [5]
for(size_t i = 0; i < pl.get_root()->offset_index.getLength(); i++) {
ASSERT_EQ(i * 3, pl.get_root()->offset_index.at(i));
}
for(size_t i = 0; i < pl.get_root()->offsets.getLength(); i++) {
ASSERT_EQ(offsets[i % 3], pl.get_root()->offsets.at(i));
}
pl.erase(2);
ASSERT_EQ(2, pl.num_blocks());
pl.erase(3);
ASSERT_EQ(3, pl.num_ids());
// [0, 4], [5]
ASSERT_EQ(2, pl.num_blocks());
ASSERT_EQ(2, pl.get_root()->size());
ASSERT_EQ(1, pl.get_root()->next->size());
ASSERT_EQ(pl.get_root(), pl.block_of(4));
ASSERT_EQ(pl.get_root()->next, pl.block_of(5));
for(size_t i = 0; i < pl.get_root()->offset_index.getLength(); i++) {
ASSERT_EQ(i * 3, pl.get_root()->offset_index.at(i));
}
for(size_t i = 0; i < pl.get_root()->offsets.getLength(); i++) {
ASSERT_EQ(offsets[i % 3], pl.get_root()->offsets.at(i));
}
pl.erase(4); // this will trigger the merge
// [0, 5]
// ensure that merge has happened
ASSERT_EQ(2, pl.num_ids());
ASSERT_EQ(1, pl.num_blocks());
ASSERT_EQ(pl.get_root(), pl.block_of(5));
ASSERT_EQ(nullptr, pl.get_root()->next);
ASSERT_EQ(2, pl.get_root()->size());
for(size_t i = 0; i < pl.get_root()->offset_index.getLength(); i++) {
ASSERT_EQ(i * 3, pl.get_root()->offset_index.at(i));
}
for(size_t i = 0; i < pl.get_root()->offsets.getLength(); i++) {
ASSERT_EQ(offsets[i % 3], pl.get_root()->offsets.at(i));
}
}
TEST_F(PostingListTest, RemovalsOnLaterBlocks) {
std::vector<uint32_t> offsets = {0, 1, 3};
posting_list_t pl(5);
// insert until one past max block size
for(size_t i = 0; i < 6; i++) {
pl.upsert(i, offsets);
}
// erase last element of last, non-first block
pl.erase(5);
ASSERT_EQ(5, pl.num_ids());
ASSERT_EQ(1, pl.num_blocks());
ASSERT_EQ(5, pl.get_root()->size());
ASSERT_EQ(4, pl.get_root()->ids.last());
ASSERT_EQ(nullptr, pl.get_root()->next);
for(size_t i = 0; i < pl.get_root()->offset_index.getLength(); i++) {
ASSERT_EQ(i * 3, pl.get_root()->offset_index.at(i));
}
for(size_t i = 0; i < pl.get_root()->offsets.getLength(); i++) {
ASSERT_EQ(offsets[i % 3], pl.get_root()->offsets.at(i));
}
// erase last element of the only block when block is atleast half full
pl.erase(4);
ASSERT_EQ(4, pl.num_ids());
ASSERT_EQ(1, pl.num_blocks());
ASSERT_EQ(4, pl.get_root()->size());
ASSERT_EQ(3, pl.get_root()->ids.last());
ASSERT_EQ(pl.get_root(), pl.block_of(3));
for(size_t i = 4; i < 15; i++) {
pl.upsert(i, offsets);
}
// [0..4], [5..9], [10..14]
pl.erase(5);
pl.erase(6);
pl.erase(7);
ASSERT_EQ(12, pl.num_ids());
for(size_t i = 0; i < pl.get_root()->next->offset_index.getLength(); i++) {
ASSERT_EQ(i * 3, pl.get_root()->next->offset_index.at(i));
}
for(size_t i = 0; i < pl.get_root()->next->offsets.getLength(); i++) {
ASSERT_EQ(offsets[i % 3], pl.get_root()->next->offsets.at(i));
}
for(size_t i = 0; i < pl.get_root()->next->next->offset_index.getLength(); i++) {
ASSERT_EQ(i * 3, pl.get_root()->next->next->offset_index.at(i));
}
for(size_t i = 0; i < pl.get_root()->next->next->offsets.getLength(); i++) {
ASSERT_EQ(offsets[i % 3], pl.get_root()->next->next->offsets.at(i));
}
// only part of the next node contents can be moved over when we delete 8 since (1 + 5) > 5
pl.erase(8);
// [0..4], [9], [10..14] => [0..4], [9,10,11], [12,13,14]
ASSERT_EQ(3, pl.num_blocks());
ASSERT_EQ(11, pl.num_ids());
ASSERT_EQ(3, pl.get_root()->next->size());
ASSERT_EQ(3, pl.get_root()->next->next->size());
ASSERT_EQ(11, pl.get_root()->next->ids.last());
ASSERT_EQ(14, pl.get_root()->next->next->ids.last());
for(size_t i = 0; i < pl.get_root()->next->offset_index.getLength(); i++) {
ASSERT_EQ(i * 3, pl.get_root()->next->offset_index.at(i));
}
for(size_t i = 0; i < pl.get_root()->next->offsets.getLength(); i++) {
ASSERT_EQ(offsets[i % 3], pl.get_root()->next->offsets.at(i));
}
for(size_t i = 0; i < pl.get_root()->next->next->offset_index.getLength(); i++) {
ASSERT_EQ(i * 3, pl.get_root()->next->next->offset_index.at(i));
}
for(size_t i = 0; i < pl.get_root()->next->next->offsets.getLength(); i++) {
ASSERT_EQ(offsets[i % 3], pl.get_root()->next->next->offsets.at(i));
}
}
TEST_F(PostingListTest, OutOfOrderUpserts) {
std::vector<uint32_t> offsets = {0, 1, 3};
posting_list_t pl(5);
for(int i = 5; i > 0; i--) {
pl.upsert(i, offsets);
}
pl.upsert(0, offsets);
pl.upsert(200000, offsets);
ASSERT_EQ(2, pl.num_blocks());
ASSERT_EQ(3, pl.get_root()->size());
ASSERT_EQ(4, pl.get_root()->next->size());
for(size_t i = 0; i < pl.get_root()->offset_index.getLength(); i++) {
ASSERT_EQ(i * 3, pl.get_root()->offset_index.at(i));
}
for(size_t i = 0; i < pl.get_root()->offsets.getLength(); i++) {
ASSERT_EQ(offsets[i % 3], pl.get_root()->offsets.at(i));
}
for(size_t i = 0; i < pl.get_root()->next->offset_index.getLength(); i++) {
ASSERT_EQ(i * 3, pl.get_root()->next->offset_index.at(i));
}
for(size_t i = 0; i < pl.get_root()->next->offsets.getLength(); i++) {
ASSERT_EQ(offsets[i % 3], pl.get_root()->next->offsets.at(i));
}
}
TEST_F(PostingListTest, RandomInsertAndDeletes) {
time_t t;
srand((unsigned) time(&t));
posting_list_t pl(100);
std::vector<uint32_t> offsets1 = {0, 1, 3};
std::vector<uint32_t> offsets2 = {10, 12};
std::vector<uint32_t> ids;
for(size_t i = 0; i < 100000; i++) {
ids.push_back(rand() % 100000);
}
size_t index = 0;
for(auto id: ids) {
const std::vector<uint32_t>& offsets = (index % 2 == 0) ? offsets1 : offsets2;
pl.upsert(id, offsets);
index++;
}
for(size_t i = 0; i < 10000; i++) {
pl.erase(rand() % 100000);
}
ASSERT_GT(pl.num_blocks(), 750);
ASSERT_LT(pl.num_blocks(), 1000);
}
TEST_F(PostingListTest, MergeBasics) {
std::vector<uint32_t> offsets = {0, 1, 3};
std::vector<posting_list_t*> lists;
// [0, 2] [3, 20]
// [1, 3], [5, 10], [20]
// [2, 3], [5, 7], [20]
posting_list_t p1(2);
p1.upsert(0, offsets);
p1.upsert(2, offsets);
p1.upsert(3, offsets);
p1.upsert(20, offsets);
posting_list_t p2(2);
p2.upsert(1, offsets);
p2.upsert(3, offsets);
p2.upsert(5, offsets);
p2.upsert(10, offsets);
p2.upsert(20, offsets);
posting_list_t p3(2);
p3.upsert(2, offsets);
p3.upsert(3, offsets);
p3.upsert(5, offsets);
p3.upsert(7, offsets);
p3.upsert(20, offsets);
lists.push_back(&p1);
lists.push_back(&p2);
lists.push_back(&p3);
std::vector<uint32_t> result_ids;
posting_list_t::merge(lists, result_ids);
std::vector<uint32_t> expected_ids = {0, 1, 2, 3, 5, 7, 10, 20};
ASSERT_EQ(expected_ids.size(), result_ids.size());
for(size_t i = 0; i < expected_ids.size(); i++) {
ASSERT_EQ(expected_ids[i], result_ids[i]);
}
}
TEST_F(PostingListTest, IntersectionBasics) {
std::vector<uint32_t> offsets = {0, 1, 3};
// [0, 2] [3, 20]
// [1, 3], [5, 10], [20]
// [2, 3], [5, 7], [20]
posting_list_t p1(2);
p1.upsert(0, offsets);
p1.upsert(2, offsets);
p1.upsert(3, offsets);
p1.upsert(20, offsets);
posting_list_t p2(2);
p2.upsert(1, offsets);
p2.upsert(3, offsets);
p2.upsert(5, offsets);
p2.upsert(10, offsets);
p2.upsert(20, offsets);
posting_list_t p3(2);
p3.upsert(2, offsets);
p3.upsert(3, offsets);
p3.upsert(5, offsets);
p3.upsert(7, offsets);
p3.upsert(20, offsets);
std::vector<void*> raw_lists = {&p1, &p2, &p3};
std::vector<posting_list_t*> posting_lists = {&p1, &p2, &p3};
std::vector<uint32_t> result_ids;
std::mutex vecm;
posting_list_t::intersect(posting_lists, result_ids);
ASSERT_EQ(2, result_ids.size());
ASSERT_EQ(3, result_ids[0]);
ASSERT_EQ(20, result_ids[1]);
std::vector<posting_list_t::iterator_t> its;
result_iter_state_t iter_state;
result_ids.clear();
posting_t::block_intersector_t(raw_lists, iter_state).intersect([&](auto id, auto& its){
std::unique_lock lk(vecm);
result_ids.push_back(id);
});
std::sort(result_ids.begin(), result_ids.end());
ASSERT_EQ(2, result_ids.size());
ASSERT_EQ(3, result_ids[0]);
ASSERT_EQ(20, result_ids[1]);
// single item itersection
std::vector<posting_list_t*> single_item_list = {&p1};
result_ids.clear();
posting_list_t::intersect(single_item_list, result_ids);
std::vector<uint32_t> expected_ids = {0, 2, 3, 20};
ASSERT_EQ(expected_ids.size(), result_ids.size());
for(size_t i = 0; i < expected_ids.size(); i++) {
ASSERT_EQ(expected_ids[i], result_ids[i]);
}
result_iter_state_t iter_state2;
result_ids.clear();
raw_lists = {&p1};
posting_t::block_intersector_t(raw_lists, iter_state2).intersect([&](auto id, auto& its){
std::unique_lock lk(vecm);
result_ids.push_back(id);
});
std::sort(result_ids.begin(), result_ids.end()); // because of concurrent intersection order is not guaranteed
ASSERT_EQ(4, result_ids.size());
ASSERT_EQ(0, result_ids[0]);
ASSERT_EQ(2, result_ids[1]);
ASSERT_EQ(3, result_ids[2]);
ASSERT_EQ(20, result_ids[3]);
// empty intersection list
std::vector<posting_list_t*> empty_list;
result_ids.clear();
posting_list_t::intersect(empty_list, result_ids);
ASSERT_EQ(0, result_ids.size());
result_iter_state_t iter_state3;
result_ids.clear();
raw_lists.clear();
posting_t::block_intersector_t(raw_lists, iter_state3).intersect([&](auto id, auto& its){
std::unique_lock lk(vecm);
result_ids.push_back(id);
});
ASSERT_EQ(0, result_ids.size());
}
TEST_F(PostingListTest, ResultsAndOffsetsBasics) {
// NOTE: due to the way offsets1 are parsed, the actual positions are 1 less than the offset values stored
// (to account for the special offset `0` which indicates last offset
std::vector<uint32_t> offsets1 = {1, 2, 4};
std::vector<uint32_t> offsets2 = {5, 6};
std::vector<uint32_t> offsets3 = {7};
std::vector<posting_list_t*> lists;
// T1: [0, 2] [3, 20]
// T2: [1, 3], [5, 10], [20]
// T3: [2, 3], [5, 7], [20]
// 3: (0, 1, 3} {4, 5} {6}
// 2: {6} {4, 5} {0, 1, 3}
std::vector<token_positions_t> actual_offsets_3 = {
token_positions_t{false, {0, 1, 3}},
token_positions_t{false, {4, 5}},
token_positions_t{false, {6}},
};
std::vector<token_positions_t> actual_offsets_20 = {
token_positions_t{false, {6}},
token_positions_t{false, {4, 5}},
token_positions_t{false, {0, 1, 3}},
};
posting_list_t p1(2);
p1.upsert(0, offsets1);
p1.upsert(2, offsets1);
p1.upsert(3, offsets1);
p1.upsert(20, offsets3);
posting_list_t p2(2);
p2.upsert(1, offsets1);
p2.upsert(3, offsets2);
p2.upsert(5, offsets1);
p2.upsert(10, offsets1);
p2.upsert(20, offsets2);
posting_list_t p3(2);
p3.upsert(2, offsets1);
p3.upsert(3, offsets3);
p3.upsert(5, offsets1);
p3.upsert(7, offsets1);
p3.upsert(20, offsets1);
lists.push_back(&p1);
lists.push_back(&p2);
lists.push_back(&p3);
/*
std::vector<posting_list_t::iterator_t> its;
result_iter_state_t iter_state;
bool has_more = posting_list_t::block_intersect(lists, 2, its, iter_state);
ASSERT_FALSE(has_more);
std::vector<std::unordered_map<size_t, std::vector<token_positions_t>>> array_token_positions_vec;
posting_list_t::get_offsets(iter_state, array_token_positions_vec);
ASSERT_EQ(2, array_token_positions_vec.size());
ASSERT_EQ(actual_offsets_3[0].positions, array_token_positions_vec[0].at(0)[0].positions);
ASSERT_EQ(actual_offsets_3[1].positions, array_token_positions_vec[0].at(0)[1].positions);
ASSERT_EQ(actual_offsets_3[2].positions, array_token_positions_vec[0].at(0)[2].positions);
ASSERT_EQ(actual_offsets_20[0].positions, array_token_positions_vec[1].at(0)[0].positions);
ASSERT_EQ(actual_offsets_20[1].positions, array_token_positions_vec[1].at(0)[1].positions);
ASSERT_EQ(actual_offsets_20[2].positions, array_token_positions_vec[1].at(0)[2].positions);
*/
}
TEST_F(PostingListTest, IntersectionSkipBlocks) {
std::vector<uint32_t> offsets = {0, 1, 3};
std::vector<posting_list_t*> lists;
std::vector<uint32_t> p1_ids = {9, 11};
std::vector<uint32_t> p2_ids = {1, 2, 3, 4, 5, 6, 7, 8, 9, 11};
std::vector<uint32_t> p3_ids = {2, 3, 8, 9, 11, 20};
// [9, 11]
// [1, 2], [3, 4], [5, 6], [7, 8], [9, 11]
// [2, 3], [8, 9], [11, 20]
posting_list_t p1(2);
posting_list_t p2(2);
posting_list_t p3(2);
for(auto id: p1_ids) {
p1.upsert(id, offsets);
}
for(auto id: p2_ids) {
p2.upsert(id, offsets);
}
for(auto id: p3_ids) {
p3.upsert(id, offsets);
}
lists.push_back(&p1);
lists.push_back(&p2);
lists.push_back(&p3);
std::vector<uint32_t> result_ids;
posting_list_t::intersect(lists, result_ids);
uint32_t* p1_p2_intersected;
uint32_t* final_results;
size_t temp_len = ArrayUtils::and_scalar(&p1_ids[0], p1_ids.size(), &p2_ids[0], p2_ids.size(), &p1_p2_intersected);
size_t final_len = ArrayUtils::and_scalar(&p3_ids[0], p3_ids.size(), p1_p2_intersected, temp_len, &final_results);
ASSERT_EQ(final_len, result_ids.size());
for(size_t i = 0; i < result_ids.size(); i++) {
ASSERT_EQ(final_results[i], result_ids[i]);
}
delete [] p1_p2_intersected;
delete [] final_results;
}
TEST_F(PostingListTest, PostingListContainsAtleastOne) {
// when posting list is larger than target IDs
posting_list_t p1(100);
for(size_t i = 20; i < 1000; i++) {
p1.upsert(i, {1, 2, 3});
}
std::vector<uint32_t> target_ids1 = {200, 300};
std::vector<uint32_t> target_ids2 = {200, 3000};
std::vector<uint32_t> target_ids3 = {2000, 3000};
ASSERT_TRUE(p1.contains_atleast_one(&target_ids1[0], target_ids1.size()));
ASSERT_TRUE(p1.contains_atleast_one(&target_ids2[0], target_ids2.size()));
ASSERT_FALSE(p1.contains_atleast_one(&target_ids3[0], target_ids3.size()));
// when posting list is smaller than target IDs
posting_list_t p2(2);
for(size_t i = 10; i < 20; i++) {
p2.upsert(i, {1, 2, 3});
}
target_ids1.clear();
for(size_t i = 5; i < 1000; i++) {
target_ids1.push_back(i);
}
target_ids2.clear();
for(size_t i = 25; i < 1000; i++) {
target_ids2.push_back(i);
}
ASSERT_TRUE(p2.contains_atleast_one(&target_ids1[0], target_ids1.size()));
ASSERT_FALSE(p2.contains_atleast_one(&target_ids2[0], target_ids2.size()));
}
TEST_F(PostingListTest, PostingListMergeAdjancentBlocks) {
// when posting list is larger than target IDs
posting_list_t p1(6);
for(size_t i = 0; i < 18; i++) {
p1.upsert(i, {2, 3});
}
p1.erase(0);
p1.erase(1);
// IDs: [4] [6] [6]
// [6] [4] [6]
// Offsets:
// Before: [8] [12] [12]
// After: [12] [8] [12]
posting_list_t::block_t* next_block = p1.get_root()->next;
posting_list_t::merge_adjacent_blocks(p1.get_root(), next_block, 2);
ASSERT_EQ(6, p1.get_root()->ids.getLength());
ASSERT_EQ(6, p1.get_root()->offset_index.getLength());
ASSERT_EQ(12, p1.get_root()->offsets.getLength());
std::vector<uint32_t> ids = {2, 3, 4, 5, 6, 7};
for(size_t i = 0 ; i < ids.size(); i++) {
auto id = ids[i];
ASSERT_EQ(id, p1.get_root()->ids.at(i));
}
for(size_t i = 0; i < p1.get_root()->offset_index.getLength(); i++) {
ASSERT_EQ(i*2, p1.get_root()->offset_index.at(i));
}
for(size_t i = 0; i < p1.get_root()->offsets.getLength(); i++) {
auto expected_offset = (i % 2 == 0) ? 2 : 3;
ASSERT_EQ(expected_offset, p1.get_root()->offsets.at(i));
}
ASSERT_EQ(4, next_block->ids.getLength());
ASSERT_EQ(4, next_block->offset_index.getLength());
ASSERT_EQ(8, next_block->offsets.getLength());
ids = {8, 9, 10, 11};
for(size_t i = 0 ; i < ids.size(); i++) {
auto id = ids[i];
ASSERT_EQ(id, next_block->ids.at(i));
}
for(size_t i = 0; i < next_block->offset_index.getLength(); i++) {
ASSERT_EQ(i*2, next_block->offset_index.at(i));
}
for(size_t i = 0; i < next_block->offsets.getLength(); i++) {
auto expected_offset = (i % 2 == 0) ? 2 : 3;
ASSERT_EQ(expected_offset, next_block->offsets.at(i));
}
// full merge
posting_list_t::block_t* block1 = next_block;
posting_list_t::block_t* block2 = next_block->next;
posting_list_t::merge_adjacent_blocks(block1, block2, 6);
ASSERT_EQ(10, block1->ids.getLength());
ASSERT_EQ(10, block1->offset_index.getLength());
ASSERT_EQ(20, block1->offsets.getLength());
ids = {8, 9, 10, 11, 12, 13, 14, 15, 16, 17};
for(size_t i = 0; i < ids.size(); i++) {
auto id = ids[i];
ASSERT_EQ(id, block1->ids.at(i));
}
for(size_t i = 0; i < block1->offset_index.getLength(); i++) {
ASSERT_EQ(i*2, block1->offset_index.at(i));
}
for(size_t i = 0; i < block1->offsets.getLength(); i++) {
auto expected_offset = (i % 2 == 0) ? 2 : 3;
ASSERT_EQ(expected_offset, block1->offsets.at(i));
}
ASSERT_EQ(0, block2->ids.getLength());
ASSERT_EQ(0, block2->offset_index.getLength());
ASSERT_EQ(0, block2->offsets.getLength());
}
TEST_F(PostingListTest, PostingListSplitBlock) {
posting_list_t p1(6);
for (size_t i = 0; i < 6; i++) {
p1.upsert(i, {2, 3});
}
posting_list_t::block_t* block1 = p1.get_root();
posting_list_t::block_t block2;
posting_list_t::split_block(block1, &block2);
ASSERT_EQ(3, block1->ids.getLength());
ASSERT_EQ(3, block1->offset_index.getLength());
ASSERT_EQ(6, block1->offsets.getLength());
std::vector<uint32_t> ids = {0, 1, 2};
for(size_t i = 0; i < ids.size(); i++) {
ASSERT_EQ(ids[i], block1->ids.at(i));
}
for(size_t i = 0; i < block1->offset_index.getLength(); i++) {
ASSERT_EQ(i*2, block1->offset_index.at(i));
}
for(size_t i = 0; i < block1->offsets.getLength(); i++) {
auto expected_offset = (i % 2 == 0) ? 2 : 3;
ASSERT_EQ(expected_offset, block1->offsets.at(i));
}
ASSERT_EQ(3, block2.ids.getLength());
ASSERT_EQ(3, block2.offset_index.getLength());
ASSERT_EQ(6, block2.offsets.getLength());
ids = {3, 4, 5};
for(size_t i = 0; i < ids.size(); i++) {
ASSERT_EQ(ids[i], block2.ids.at(i));
}
for(size_t i = 0; i < block2.offset_index.getLength(); i++) {
ASSERT_EQ(i*2, block2.offset_index.at(i));
}
for(size_t i = 0; i < block2.offsets.getLength(); i++) {
auto expected_offset = (i % 2 == 0) ? 2 : 3;
ASSERT_EQ(expected_offset, block2.offsets.at(i));
}
}
TEST_F(PostingListTest, CompactPostingListUpsertAppends) {
uint32_t ids[] = {0, 1000, 1002};
uint32_t offset_index[] = {0, 3, 6};
uint32_t offsets[] = {0, 3, 4, 0, 3, 4, 0, 3, 4};
compact_posting_list_t* list = compact_posting_list_t::create(3, ids, offset_index, 9, offsets);
ASSERT_EQ(15, list->length);
ASSERT_EQ(15, list->capacity);
ASSERT_EQ(1002, list->last_id());
ASSERT_EQ(3, list->num_ids());
ASSERT_TRUE(list->contains(0));
ASSERT_TRUE(list->contains(1000));
ASSERT_TRUE(list->contains(1002));
ASSERT_FALSE(list->contains(500));
ASSERT_FALSE(list->contains(2));
// no-op since the container expects resizing to be done outside
list->upsert(1003, {1, 2});
ASSERT_EQ(15, list->length);
ASSERT_EQ(15, list->capacity);
ASSERT_EQ(1002, list->last_id());
ASSERT_EQ(3, list->num_ids());
// now resize
void* obj = SET_COMPACT_POSTING(list);
posting_t::upsert(obj, 1003, {1, 2});
ASSERT_EQ(1003, COMPACT_POSTING_PTR(obj)->last_id());
ASSERT_EQ(19, (COMPACT_POSTING_PTR(obj))->length);
ASSERT_EQ(24, (COMPACT_POSTING_PTR(obj))->capacity);
ASSERT_EQ(4, (COMPACT_POSTING_PTR(obj))->ids_length);
// insert enough docs to NOT exceed compact posting list threshold
posting_t::upsert(obj, 1004, {1, 2, 3, 4, 5, 6, 7, 8});
ASSERT_EQ(1004, COMPACT_POSTING_PTR(obj)->last_id());
posting_t::upsert(obj, 1005, {1, 2, 3, 4, 5, 6, 7, 8});
ASSERT_EQ(1005, COMPACT_POSTING_PTR(obj)->last_id());
posting_t::upsert(obj, 1006, {1, 2, 3, 4, 5, 6, 7, 8});
ASSERT_EQ(1006, COMPACT_POSTING_PTR(obj)->last_id());
posting_t::upsert(obj, 1007, {1, 2, 3, 4, 5, 6, 7, 8});
ASSERT_EQ(1007, COMPACT_POSTING_PTR(obj)->last_id());
ASSERT_TRUE(IS_COMPACT_POSTING(obj));
ASSERT_EQ(1007, COMPACT_POSTING_PTR(obj)->last_id());
ASSERT_EQ(8, (COMPACT_POSTING_PTR(obj))->ids_length);
// next upsert will exceed threshold
posting_t::upsert(obj, 1008, {1, 2, 3, 4, 5, 6, 7, 8});
ASSERT_FALSE(IS_COMPACT_POSTING(obj));
ASSERT_EQ(1, ((posting_list_t*)(obj))->num_blocks());
ASSERT_EQ(9, ((posting_list_t*)(obj))->get_root()->size());
ASSERT_EQ(1008, ((posting_list_t*)(obj))->get_root()->ids.last());
ASSERT_EQ(9, ((posting_list_t*)(obj))->get_root()->ids.getLength());
ASSERT_EQ(9, ((posting_list_t*)(obj))->num_ids());
delete ((posting_list_t*)(obj));
}
TEST_F(PostingListTest, CompactPostingListUpserts) {
uint32_t ids[] = {3, 1000, 1002};
uint32_t offset_index[] = {0, 3, 6};
uint32_t offsets[] = {0, 3, 4, 0, 3, 4, 0, 3, 4};
compact_posting_list_t* list = compact_posting_list_t::create(3, ids, offset_index, 9, offsets);
ASSERT_EQ(15, list->length);
ASSERT_EQ(15, list->capacity);
ASSERT_EQ(1002, list->last_id());
ASSERT_EQ(3, list->num_ids());
// insert before first ID
void* obj = SET_COMPACT_POSTING(list);
posting_t::upsert(obj, 2, {1, 2});
ASSERT_EQ(1002, COMPACT_POSTING_PTR(obj)->last_id());
ASSERT_EQ(19, COMPACT_POSTING_PTR(obj)->length);
ASSERT_EQ(24, COMPACT_POSTING_PTR(obj)->capacity);
ASSERT_EQ(4, COMPACT_POSTING_PTR(obj)->num_ids());
// insert in the middle
posting_t::upsert(obj, 999, {1, 2});
ASSERT_EQ(1002, COMPACT_POSTING_PTR(obj)->last_id());
ASSERT_EQ(23, COMPACT_POSTING_PTR(obj)->length);
ASSERT_EQ(24, COMPACT_POSTING_PTR(obj)->capacity);
ASSERT_EQ(5, COMPACT_POSTING_PTR(obj)->num_ids());
uint32_t expected_id_offsets[] = {
2, 1, 2, 2,
3, 0, 3, 4, 3,
2, 1, 2, 999,
3, 0, 3, 4, 1000,
3, 0, 3, 4, 1002
};
ASSERT_EQ(23, COMPACT_POSTING_PTR(obj)->length);
for(size_t i = 0; i < COMPACT_POSTING_PTR(obj)->length; i++) {
ASSERT_EQ(expected_id_offsets[i], COMPACT_POSTING_PTR(obj)->id_offsets[i]);
}
free(COMPACT_POSTING_PTR(obj));
}
TEST_F(PostingListTest, CompactPostingListUpdateWithLessOffsets) {
uint32_t ids[] = {0, 1000, 1002};
uint32_t offset_index[] = {0, 3, 6};
uint32_t offsets[] = {0, 3, 4, 0, 3, 4, 0, 3, 4};
compact_posting_list_t* list = compact_posting_list_t::create(3, ids, offset_index, 9, offsets);
ASSERT_EQ(15, list->length);
ASSERT_EQ(15, list->capacity);
ASSERT_EQ(1002, list->last_id());
ASSERT_EQ(3, list->num_ids());
// update middle
list->upsert(1000, {1, 2});
ASSERT_EQ(14, list->length);
ASSERT_EQ(15, list->capacity);
ASSERT_EQ(1002, list->last_id());
ASSERT_EQ(3, list->num_ids());
uint32_t expected_id_offsets[] = {3, 0, 3, 4, 0, 2, 1, 2, 1000, 3, 0, 3, 4, 1002};
for(size_t i = 0; i < list->length; i++) {
ASSERT_EQ(expected_id_offsets[i], list->id_offsets[i]);
}
// update start
list->upsert(0, {2, 4});
ASSERT_EQ(13, list->length);
ASSERT_EQ(15, list->capacity);
ASSERT_EQ(1002, list->last_id());
ASSERT_EQ(3, list->num_ids());
uint32_t expected_id_offsets2[] = {2, 2, 4, 0, 2, 1, 2, 1000, 3, 0, 3, 4, 1002};
for(size_t i = 0; i < list->length; i++) {
ASSERT_EQ(expected_id_offsets2[i], list->id_offsets[i]);
}
// update end
list->upsert(1002, {2, 4});
ASSERT_EQ(12, list->length);
ASSERT_EQ(15, list->capacity);
ASSERT_EQ(1002, list->last_id());
ASSERT_EQ(3, list->num_ids());
uint32_t expected_id_offsets3[] = {2, 2, 4, 0, 2, 1, 2, 1000, 2, 2, 4, 1002};
for(size_t i = 0; i < list->length; i++) {
ASSERT_EQ(expected_id_offsets3[i], list->id_offsets[i]);
}
free(list);
}
TEST_F(PostingListTest, CompactPostingListUpdateWithMoreOffsets) {
uint32_t ids[] = {0, 1000, 1002};
uint32_t offset_index[] = {0, 3, 6};
uint32_t offsets[] = {0, 3, 4, 0, 3, 4, 0, 3, 4};
compact_posting_list_t* list = compact_posting_list_t::create(3, ids, offset_index, 9, offsets);
ASSERT_EQ(15, list->length);
ASSERT_EQ(15, list->capacity);
ASSERT_EQ(1002, list->last_id());
ASSERT_EQ(3, list->num_ids());
// update middle
void* obj = SET_COMPACT_POSTING(list);
posting_t::upsert(obj, 1000, {1, 2, 3, 4});
list = COMPACT_POSTING_PTR(obj);
ASSERT_EQ(16, list->length);
ASSERT_EQ(20, list->capacity);
ASSERT_EQ(1002, list->last_id());
ASSERT_EQ(3, list->num_ids());
uint32_t expected_id_offsets[] = {3, 0, 3, 4, 0, 4, 1, 2, 3, 4, 1000, 3, 0, 3, 4, 1002};
for(size_t i = 0; i < list->length; i++) {
ASSERT_EQ(expected_id_offsets[i], list->id_offsets[i]);
}
// update start
list->upsert(0, {1, 2, 3, 4});
ASSERT_EQ(17, list->length);
ASSERT_EQ(20, list->capacity);
ASSERT_EQ(1002, list->last_id());
ASSERT_EQ(3, list->num_ids());
uint32_t expected_id_offsets2[] = {4, 1, 2, 3, 4, 0, 4, 1, 2, 3, 4, 1000, 3, 0, 3, 4, 1002};
for(size_t i = 0; i < list->length; i++) {
ASSERT_EQ(expected_id_offsets2[i], list->id_offsets[i]);
}
// update end
list->upsert(1002, {1, 2, 3, 4});
ASSERT_EQ(18, list->length);
ASSERT_EQ(20, list->capacity);
ASSERT_EQ(1002, list->last_id());
ASSERT_EQ(3, list->num_ids());
uint32_t expected_id_offsets3[] = {4, 1, 2, 3, 4, 0, 4, 1, 2, 3, 4, 1000, 4, 1, 2, 3, 4, 1002};
for(size_t i = 0; i < list->length; i++) {
ASSERT_EQ(expected_id_offsets3[i], list->id_offsets[i]);
}
free(list);
}
TEST_F(PostingListTest, CompactPostingListErase) {
uint32_t ids[] = {0, 1000, 1002};
uint32_t offset_index[] = {0, 3, 6};
uint32_t offsets[] = {0, 3, 4, 0, 3, 4, 0, 3, 4};
compact_posting_list_t* list = compact_posting_list_t::create(3, ids, offset_index, 9, offsets);
list->erase(3); // erase non-existing small ID
ASSERT_EQ(15, list->length);
ASSERT_EQ(15, list->capacity);
ASSERT_EQ(1002, list->last_id());
ASSERT_EQ(3, list->num_ids());
list->erase(3000); // erase non-existing large ID
ASSERT_EQ(15, list->length);
ASSERT_EQ(15, list->capacity);
ASSERT_EQ(1002, list->last_id());
ASSERT_EQ(3, list->num_ids());
list->erase(1000);
ASSERT_EQ(10, list->length);
ASSERT_EQ(15, list->capacity);
ASSERT_EQ(1002, list->last_id());
ASSERT_EQ(2, list->num_ids());
// deleting using posting wrapper
void* obj = SET_COMPACT_POSTING(list);
posting_t::erase(obj, 1002);
ASSERT_TRUE(IS_COMPACT_POSTING(obj));
ASSERT_EQ(5, (COMPACT_POSTING_PTR(obj))->length);
ASSERT_EQ(7, (COMPACT_POSTING_PTR(obj))->capacity);
ASSERT_EQ(0, (COMPACT_POSTING_PTR(obj))->last_id());
ASSERT_EQ(1, (COMPACT_POSTING_PTR(obj))->num_ids());
// upsert again
posting_t::upsert(obj, 1002, {0, 3, 4});
list = COMPACT_POSTING_PTR(obj);
ASSERT_EQ(10, list->length);
ASSERT_EQ(13, list->capacity);
ASSERT_EQ(1002, list->last_id());
ASSERT_EQ(2, list->num_ids());
free(list);
}
TEST_F(PostingListTest, CompactPostingListContainsAtleastOne) {
uint32_t ids[] = {5, 6, 7, 8};
uint32_t offset_index[] = {0, 3, 6, 9};
uint32_t offsets[] = {0, 3, 4, 0, 3, 4, 0, 3, 4, 0, 3, 4};
std::vector<uint32_t> target_ids1 = {4, 7, 11};
std::vector<uint32_t> target_ids2 = {2, 3, 4, 20};
compact_posting_list_t* list1 = compact_posting_list_t::create(4, ids, offset_index, 12, offsets);
ASSERT_TRUE(list1->contains_atleast_one(&target_ids1[0], target_ids1.size()));
ASSERT_FALSE(list1->contains_atleast_one(&target_ids2[0], target_ids2.size()));
free(list1);
compact_posting_list_t* list2 = static_cast<compact_posting_list_t*>(malloc(sizeof(compact_posting_list_t)));
list2->capacity = list2->ids_length = list2->length = 0;
void* obj = SET_COMPACT_POSTING(list2);
posting_t::upsert(obj, 3, {1, 5});
std::vector<uint32_t> target_ids3 = {1, 2, 3, 4, 100};
std::vector<uint32_t> target_ids4 = {4, 5, 6, 100};
ASSERT_TRUE(COMPACT_POSTING_PTR(obj)->contains_atleast_one(&target_ids3[0], target_ids3.size()));
ASSERT_FALSE(COMPACT_POSTING_PTR(obj)->contains_atleast_one(&target_ids4[0], target_ids4.size()));
std::vector<uint32_t> target_ids5 = {2, 3};
ASSERT_TRUE(COMPACT_POSTING_PTR(obj)->contains_atleast_one(&target_ids5[0], target_ids5.size()));
std::vector<uint32_t> target_ids6 = {0, 1, 2};
ASSERT_FALSE(COMPACT_POSTING_PTR(obj)->contains_atleast_one(&target_ids6[0], target_ids6.size()));
posting_t::destroy_list(obj);
}
TEST_F(PostingListTest, CompactToFullPostingListConversion) {
uint32_t ids[] = {5, 6, 7, 8};
uint32_t offset_index[] = {0, 3, 6, 9};
uint32_t offsets[] = {0, 3, 4, 0, 3, 4, 0, 3, 4, 0, 3, 4};
compact_posting_list_t* c1 = compact_posting_list_t::create(4, ids, offset_index, 12, offsets);
posting_list_t* p1 = c1->to_full_posting_list();
ASSERT_EQ(4, c1->num_ids());
ASSERT_EQ(4, p1->num_ids());
free(c1);
delete p1;
}
TEST_F(PostingListTest, BlockIntersectionOnMixedLists) {
uint32_t ids[] = {5, 6, 7, 8};
uint32_t offset_index[] = {0, 3, 6, 9};
uint32_t offsets[] = {0, 3, 4, 0, 3, 4, 0, 3, 4, 0, 3, 4};
compact_posting_list_t* list1 = compact_posting_list_t::create(4, ids, offset_index, 12, offsets);
posting_list_t p1(2);
std::vector<uint32_t> offsets1 = {2, 4};
p1.upsert(0, offsets1);
p1.upsert(5, offsets1);
p1.upsert(8, offsets1);
p1.upsert(20, offsets1);
std::vector<void*> raw_posting_lists = {SET_COMPACT_POSTING(list1), &p1};
result_iter_state_t iter_state;
std::vector<uint32_t> result_ids;
std::mutex vecm;
posting_t::block_intersector_t(raw_posting_lists, iter_state)
.intersect([&](auto seq_id, auto& its) {
std::unique_lock lock(vecm);
result_ids.push_back(seq_id);
});
std::sort(result_ids.begin(), result_ids.end());
ASSERT_EQ(2, result_ids.size());
ASSERT_EQ(5, result_ids[0]);
ASSERT_EQ(8, result_ids[1]);
free(list1);
}
TEST_F(PostingListTest, InsertAndEraseSequence) {
std::vector<uint32_t> offsets = {0, 1, 3};
posting_list_t pl(5);
pl.upsert(0, offsets);
pl.upsert(2, offsets);
pl.upsert(4, offsets);
pl.upsert(6, offsets);
pl.upsert(8, offsets);
// this will cause a split of the root block
pl.upsert(3, offsets); // 0,2,3 | 4,6,8
pl.erase(0); // 2,3 | 4,6,8
pl.upsert(5, offsets); // 2,3 | 4,5,6,8
pl.upsert(7, offsets); // 2,3 | 4,5,6,7,8
pl.upsert(10, offsets); // 2,3 | 4,5,6,7,8 | 10
// this will cause adjacent block refill
pl.erase(2); // 3,4,5,6,7 | 8 | 10
// deletes second block
pl.erase(8);
// remove all elements
pl.erase(3);
pl.erase(4);
pl.erase(5);
pl.erase(6);
pl.erase(7);
pl.erase(10);
ASSERT_EQ(0, pl.num_ids());
}
TEST_F(PostingListTest, InsertAndEraseSequenceWithBlockSizeTwo) {
std::vector<uint32_t> offsets = {0, 1, 3};
posting_list_t pl(2);
pl.upsert(2, offsets);
pl.upsert(3, offsets);
pl.upsert(1, offsets); // inserting 2 again here? // inserting 4 here?
// 1 | 2,3
pl.erase(1);
ASSERT_EQ(1, pl.get_root()->size());
ASSERT_EQ(2, pl.num_blocks());
pl.erase(3);
pl.erase(2);
ASSERT_EQ(0, pl.get_root()->size());
}
TEST_F(PostingListTest, PostingListMustHaveAtleast1Element) {
try {
std::vector<uint32_t> offsets = {0, 1, 3};
posting_list_t pl(1);
FAIL() << "Expected std::invalid_argument";
}
catch(std::invalid_argument const & err) {
EXPECT_EQ(err.what(),std::string("max_block_elements must be > 1"));
} catch(...) {
FAIL() << "Expected std::invalid_argument";
}
}
TEST_F(PostingListTest, DISABLED_RandInsertAndErase) {
std::vector<uint32_t> offsets = {0, 1, 3};
posting_list_t pl(5);
time_t t;
srand((unsigned) time(&t));
for(size_t i = 0; i < 10000; i++) {
LOG(INFO) << "i: " << i;
uint32_t add_id = rand() % 15;
pl.upsert(add_id, offsets);
uint32_t del_id = rand() % 15;
LOG(INFO) << "add: " << add_id << ", erase: " << del_id;
pl.erase(del_id);
}
LOG(INFO) << "Num ids: " << pl.num_ids() << ", num bocks: " << pl.num_blocks();
}
TEST_F(PostingListTest, DISABLED_Benchmark) {
std::vector<uint32_t> offsets = {0, 1, 3};
posting_list_t pl(4096);
sorted_array arr;
for(size_t i = 0; i < 500000; i++) {
pl.upsert(i, offsets);
arr.append(i);
}
auto begin = std::chrono::high_resolution_clock::now();
for(size_t i = 250000; i < 250005; i++) {
pl.upsert(i, offsets);
}
long long int timeMicros =
std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::high_resolution_clock::now() - begin).count();
LOG(INFO) << "Time taken for 5 posting list updates: " << timeMicros;
begin = std::chrono::high_resolution_clock::now();
for(size_t i = 250000; i < 250005; i++) {
arr.remove_value(i);
arr.append(i);
}
timeMicros =
std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::high_resolution_clock::now() - begin).count();
LOG(INFO) << "Time taken for 5 sorted array updates: " << timeMicros;
}
TEST_F(PostingListTest, DISABLED_BenchmarkIntersection) {
std::vector<uint32_t> offsets = {0, 1, 3};
time_t t;
srand((unsigned) time(&t));
std::set<uint32_t> rand_ids1;
std::set<uint32_t> rand_ids2;
std::set<uint32_t> rand_ids3;
const size_t list1_size = 100000;
const size_t list2_size = 50000;
const size_t list3_size = 25000;
const size_t num_range = 1000000;
/*const size_t list1_size = 10;
const size_t list2_size = 10;
const size_t num_range = 50;*/
for(size_t i = 0; i < list1_size; i++) {
rand_ids1.insert(rand() % num_range);
}
for(size_t i = 0; i < list2_size; i++) {
rand_ids2.insert(rand() % num_range);
}
for(size_t i = 0; i < list3_size; i++) {
rand_ids3.insert(rand() % num_range);
}
posting_list_t pl1(1024);
posting_list_t pl2(1024);
posting_list_t pl3(1024);
sorted_array arr1;
sorted_array arr2;
sorted_array arr3;
std::string id1_str = "";
std::string id2_str = "";
std::string id3_str = "";
for(auto id: rand_ids1) {
//id1_str += std::to_string(id) + " ";
pl1.upsert(id, offsets);
arr1.append(id);
}
for(auto id: rand_ids2) {
//id2_str += std::to_string(id) + " ";
pl2.upsert(id, offsets);
arr2.append(id);
}
for(auto id: rand_ids3) {
//id2_str += std::to_string(id) + " ";
pl3.upsert(id, offsets);
arr3.append(id);
}
//LOG(INFO) << "id1_str: " << id1_str;
//LOG(INFO) << "id2_str: " << id2_str;
std::vector<uint32_t> result_ids;
auto begin = std::chrono::high_resolution_clock::now();
posting_list_t::intersect({&pl1, &pl2, &pl3}, result_ids);
long long int timeMicros =
std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::high_resolution_clock::now() - begin).count();
LOG(INFO) << "Posting list result len: " << result_ids.size();
LOG(INFO) << "Time taken for posting list intersection: " << timeMicros;
begin = std::chrono::high_resolution_clock::now();
auto a = arr1.uncompress();
auto b = arr2.uncompress();
auto c = arr3.uncompress();
uint32_t* ab;
size_t ab_len = ArrayUtils::and_scalar(a, arr1.getLength(), b, arr2.getLength(), &ab);
uint32_t* abc;
size_t abc_len = ArrayUtils::and_scalar(ab, ab_len, c, arr3.getLength(), &abc);
delete [] a;
delete [] b;
delete [] c;
delete [] ab;
delete [] abc;
timeMicros =
std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::high_resolution_clock::now() - begin).count();
LOG(INFO) << "Sorted array result len: " << abc_len;
LOG(INFO) << "Time taken for sorted array intersection: " << timeMicros;
}
TEST_F(PostingListTest, GetOrIterator) {
std::vector<uint32_t> ids = {1, 3, 5};
std::vector<uint32_t> offset_index = {0, 3, 6};
std::vector<uint32_t> offsets = {0, 3, 4, 0, 3, 4, 0, 3, 4};
compact_posting_list_t* c_list = compact_posting_list_t::create(3, &ids[0], &offset_index[0], 9, &offsets[0]);
void* raw_pointer = SET_COMPACT_POSTING(c_list);
std::vector<or_iterator_t> or_iterators;
std::vector<posting_list_t*> expanded_plists;
posting_t::get_or_iterator(raw_pointer, or_iterators, expanded_plists);
ASSERT_EQ(1, or_iterators.size());
ASSERT_EQ(1, expanded_plists.size());
for (const auto &id: ids) {
ASSERT_TRUE(or_iterators.front().valid());
ASSERT_EQ(id, or_iterators.front().id());
or_iterators.front().next();
}
ASSERT_FALSE(or_iterators.front().valid());
free(c_list);
or_iterators.clear();
for (auto& item: expanded_plists) {
delete item;
}
expanded_plists.clear();
posting_list_t p_list(2);
for (const auto &id: ids) {
p_list.upsert(id, {1, 2, 3});
}
raw_pointer = &p_list;
posting_t::get_or_iterator(raw_pointer, or_iterators, expanded_plists);
ASSERT_EQ(1, or_iterators.size());
ASSERT_TRUE(expanded_plists.empty());
for (const auto &id: ids) {
ASSERT_TRUE(or_iterators.front().valid());
ASSERT_EQ(id, or_iterators.front().id());
or_iterators.front().next();
}
ASSERT_FALSE(or_iterators.front().valid());
or_iterators.clear();
}
| 50,128
|
C++
|
.cpp
| 1,242
| 34.81723
| 125
| 0.597742
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,740
|
collection_join_test.cpp
|
typesense_typesense/test/collection_join_test.cpp
|
#include <gtest/gtest.h>
#include <string>
#include <vector>
#include <fstream>
#include <algorithm>
#include <collection_manager.h>
#include "collection.h"
#include <join.h>
class CollectionJoinTest : public ::testing::Test {
protected:
Store *store;
CollectionManager & collectionManager = CollectionManager::get_instance();
std::atomic<bool> quit = false;
std::vector<std::string> query_fields;
std::vector<sort_by> sort_fields;
std::string state_dir_path = "/tmp/typesense_test/collection_join";
void setupCollection() {
LOG(INFO) << "Truncating and creating: " << state_dir_path;
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
store = new Store(state_dir_path);
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
}
virtual void SetUp() {
setupCollection();
}
virtual void TearDown() {
collectionManager.dispose();
delete store;
}
};
TEST_F(CollectionJoinTest, SchemaReferenceField) {
nlohmann::json schema_json =
R"({
"name": "Customers",
"fields": [
{"name": "product_.*", "type": "string", "reference": "Products.product_id"}
]
})"_json;
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_FALSE(collection_create_op.ok());
ASSERT_EQ("Wildcard field cannot have a reference.", collection_create_op.error());
schema_json =
R"({
"name": "Customers",
"fields": [
{"name": ".*", "type": "auto", "reference": "Products.product_id"}
]
})"_json;
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_FALSE(collection_create_op.ok());
ASSERT_EQ("Field `.*` cannot be a reference field.", collection_create_op.error());
schema_json =
R"({
"name": "Customers",
"fields": [
{"name": "product_id", "type": "string", "reference": 123},
{"name": "customer_name", "type": "string"},
{"name": "product_price", "type": "float"}
]
})"_json;
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_FALSE(collection_create_op.ok());
ASSERT_EQ("Reference should be a string.", collection_create_op.error());
schema_json =
R"({
"name": "Customers",
"fields": [
{"name": "product_id", "type": "string", "reference": "foo"},
{"name": "customer_name", "type": "string"},
{"name": "product_price", "type": "float"}
]
})"_json;
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_FALSE(collection_create_op.ok());
ASSERT_EQ("Invalid reference `foo`.", collection_create_op.error());
schema_json =
R"({
"name": "Customers",
"fields": [
{"name": "Object.object.field", "type": "string", "reference": "Products.product_id"},
{"name": "customer_name", "type": "string"},
{"name": "product_price", "type": "float"}
]
})"_json;
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_FALSE(collection_create_op.ok());
ASSERT_EQ("`Object.object.field` field cannot have a reference. Only the top-level field of an object is allowed.",
collection_create_op.error());
schema_json =
R"({
"name": "Customers",
"fields": [
{"name": "product_id", "type": "string", "reference": "Products.product_id"},
{"name": "customer_name", "type": "string"},
{"name": "product_price", "type": "float"}
]
})"_json;
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto collection = collection_create_op.get();
auto schema = collection->get_schema();
ASSERT_EQ(schema.count("customer_name"), 1);
ASSERT_TRUE(schema.at("customer_name").reference.empty());
ASSERT_EQ(schema.count("product_id"), 1);
ASSERT_FALSE(schema.at("product_id").reference.empty());
auto reference_fields = collection->get_reference_fields();
ASSERT_EQ(reference_fields.count("product_id"), 1);
ASSERT_EQ(reference_fields.at("product_id").collection, "Products");
ASSERT_EQ(reference_fields.at("product_id").field, "product_id");
// Add a `foo_sequence_id` field in the schema for `foo` reference field.
ASSERT_EQ(schema.count("product_id_sequence_id"), 1);
ASSERT_TRUE(schema.at("product_id_sequence_id").index);
collectionManager.drop_collection("Customers");
}
TEST_F(CollectionJoinTest, IndexDocumentHavingReferenceField) {
auto customers_schema_json =
R"({
"name": "Customers",
"fields": [
{"name": "customer_id", "type": "string"},
{"name": "customer_name", "type": "string"},
{"name": "product_price", "type": "float"},
{"name": "reference_id", "type": "string", "reference": "products.product_id"}
]
})"_json;
auto collection_create_op = collectionManager.create_collection(customers_schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto customer_collection = collection_create_op.get();
nlohmann::json customer_json = R"({
"customer_id": "customer_a",
"customer_name": "Joe",
"product_price": 143
})"_json;
auto add_doc_op = customer_collection->add(customer_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Missing the required reference field `reference_id` in the document.", add_doc_op.error());
customer_json = R"({
"customer_id": "customer_a",
"customer_name": "Joe",
"product_price": 143,
"reference_id": "a"
})"_json;
add_doc_op = customer_collection->add(customer_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Referenced collection `products` not found.", add_doc_op.error());
collectionManager.drop_collection("Customers");
customers_schema_json =
R"({
"name": "Customers",
"fields": [
{"name": "customer_id", "type": "string"},
{"name": "customer_name", "type": "string"},
{"name": "product_price", "type": "float"},
{"name": "reference_id", "type": "string", "reference": "Products.foo"}
]
})"_json;
collection_create_op = collectionManager.create_collection(customers_schema_json);
ASSERT_TRUE(collection_create_op.ok());
customer_collection = collection_create_op.get();
auto products_schema_json =
R"({
"name": "Products",
"fields": [
{"name": "product_id", "type": "string", "index": false, "optional": true},
{"name": "product_name", "type": "string"},
{"name": "product_description", "type": "string"}
]
})"_json;
collection_create_op = collectionManager.create_collection(products_schema_json);
ASSERT_TRUE(collection_create_op.ok());
add_doc_op = customer_collection->add(customer_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Referenced field `foo` not found in the collection `Products`.", add_doc_op.error());
collectionManager.drop_collection("Customers");
customers_schema_json =
R"({
"name": "Customers",
"fields": [
{"name": "customer_id", "type": "string"},
{"name": "customer_name", "type": "string"},
{"name": "product_price", "type": "float"},
{"name": "reference_id", "type": "string", "reference": "Products.product_id"}
]
})"_json;
collection_create_op = collectionManager.create_collection(customers_schema_json);
ASSERT_TRUE(collection_create_op.ok());
customer_collection = collection_create_op.get();
add_doc_op = customer_collection->add(customer_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Referenced field `product_id` in the collection `Products` must be indexed.", add_doc_op.error());
collectionManager.drop_collection("Products");
products_schema_json =
R"({
"name": "Products",
"fields": [
{"name": "product_id", "type": "string"},
{"name": "product_name", "type": "string"},
{"name": "product_description", "type": "string"}
]
})"_json;
collection_create_op = collectionManager.create_collection(products_schema_json);
ASSERT_TRUE(collection_create_op.ok());
add_doc_op = customer_collection->add(customer_json.dump());
ASSERT_EQ("Reference document having `product_id:= `a`` not found in the collection `Products`.", add_doc_op.error());
std::vector<nlohmann::json> products = {
R"({
"product_id": "product_a",
"product_name": "shampoo",
"product_description": "Our new moisturizing shampoo is perfect for those with dry or damaged hair."
})"_json,
R"({
"product_id": "product_a",
"product_name": "soap",
"product_description": "Introducing our all-natural, organic soap bar made with essential oils and botanical ingredients."
})"_json
};
for (auto const &json: products){
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
customer_json["reference_id"] = "product_a";
add_doc_op = customer_collection->add(customer_json.dump());
ASSERT_EQ("Multiple documents having `product_id:= `product_a`` found in the collection `Products`.", add_doc_op.error());
collectionManager.drop_collection("Products");
products[1]["product_id"] = "product_b";
products_schema_json =
R"({
"name": "Products",
"fields": [
{"name": "product_id", "type": "string"},
{"name": "product_name", "type": "string"},
{"name": "product_description", "type": "string"}
]
})"_json;
collection_create_op = collectionManager.create_collection(products_schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: products){
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
collectionManager.drop_collection("Customers");
customers_schema_json =
R"({
"name": "Customers",
"fields": [
{"name": "customer_id", "type": "string"},
{"name": "customer_name", "type": "string"},
{"name": "product_price", "type": "float"},
{"name": "reference_id", "type": "string", "reference": "Products.product_id"}
]
})"_json;
collection_create_op = collectionManager.create_collection(customers_schema_json);
ASSERT_TRUE(collection_create_op.ok());
customer_collection = collection_create_op.get();
add_doc_op = customer_collection->add(customer_json.dump());
ASSERT_TRUE(add_doc_op.ok());
auto customer_doc = customer_collection->get("0").get();
ASSERT_EQ(0, customer_doc.at("reference_id_sequence_id"));
ASSERT_EQ(1, customer_doc.count(".ref"));
ASSERT_EQ(1, customer_doc[".ref"].size());
ASSERT_EQ("reference_id_sequence_id", customer_doc[".ref"].at(0));
nlohmann::json product_doc;
// Referenced document's sequence_id must be valid.
auto get_op = collectionManager.get_collection("Products")->get_document_from_store(
customer_doc["reference_id_sequence_id"].get<uint32_t>(),
product_doc);
ASSERT_TRUE(get_op.ok());
ASSERT_EQ(product_doc.count("product_id"), 1);
ASSERT_EQ(product_doc["product_id"], "product_a");
ASSERT_EQ(product_doc["product_name"], "shampoo");
auto id_ref_schema_json =
R"({
"name": "id_ref",
"fields": [
{"name": "id_reference", "type": "string", "reference": "Products.id", "optional": true},
{"name": "multi_id_reference", "type": "string[]", "reference": "Products.id", "optional": true}
]
})"_json;
collection_create_op = collectionManager.create_collection(id_ref_schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto id_ref_collection = collection_create_op.get();
auto id_ref_json = R"({
"id_reference": 123
})"_json;
add_doc_op = id_ref_collection->add(id_ref_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Field `id_reference` must have string value.", add_doc_op.error());
id_ref_json = R"({
"id_reference": "foo"
})"_json;
add_doc_op = id_ref_collection->add(id_ref_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Referenced document having `id: foo` not found in the collection `Products`.", add_doc_op.error());
id_ref_json = R"({
"multi_id_reference": ["0", 1]
})"_json;
add_doc_op = id_ref_collection->add(id_ref_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Field `multi_id_reference` must have string value.", add_doc_op.error());
id_ref_json = R"({
"multi_id_reference": ["0", "foo"]
})"_json;
add_doc_op = id_ref_collection->add(id_ref_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Referenced document having `id: foo` not found in the collection `Products`.", add_doc_op.error());
collectionManager.drop_collection("id_ref");
id_ref_schema_json =
R"({
"name": "id_ref",
"fields": [
{"name": "id_reference", "type": "string", "reference": "Products.id", "optional": true},
{"name": "multi_id_reference", "type": "string[]", "reference": "Products.id", "optional": true}
]
})"_json;
collection_create_op = collectionManager.create_collection(id_ref_schema_json);
ASSERT_TRUE(collection_create_op.ok());
id_ref_collection = collection_create_op.get();
id_ref_json = R"({
"id_reference": "0"
})"_json;
add_doc_op = id_ref_collection->add(id_ref_json.dump());
ASSERT_TRUE(add_doc_op.ok());
auto doc = id_ref_collection->get("0").get();
ASSERT_EQ(0, doc["id_reference_sequence_id"]);
ASSERT_EQ(1, doc.count(".ref"));
ASSERT_EQ(1, doc[".ref"].size());
ASSERT_EQ("id_reference_sequence_id", doc[".ref"].at(0));
id_ref_json = R"({
"multi_id_reference": ["1"]
})"_json;
add_doc_op = id_ref_collection->add(id_ref_json.dump());
ASSERT_TRUE(add_doc_op.ok());
doc = id_ref_collection->get("1").get();
ASSERT_EQ(1, doc["multi_id_reference_sequence_id"].size());
ASSERT_EQ(1, doc["multi_id_reference_sequence_id"][0]);
ASSERT_EQ(1, doc.count(".ref"));
ASSERT_EQ(1, doc[".ref"].size());
ASSERT_EQ("multi_id_reference_sequence_id", doc[".ref"][0]);
id_ref_json = R"({
"multi_id_reference": ["0", "1"]
})"_json;
add_doc_op = id_ref_collection->add(id_ref_json.dump());
ASSERT_TRUE(add_doc_op.ok());
doc = id_ref_collection->get("2").get();
ASSERT_EQ(2, doc["multi_id_reference_sequence_id"].size());
ASSERT_EQ(0, doc["multi_id_reference_sequence_id"][0]);
ASSERT_EQ(1, doc["multi_id_reference_sequence_id"][1]);
id_ref_json = R"({
"id_reference": null
})"_json;
add_doc_op = id_ref_collection->add(id_ref_json.dump());
ASSERT_TRUE(add_doc_op.ok());
doc = id_ref_collection->get("3").get();
ASSERT_EQ(0, doc.count("id_reference_sequence_id"));
ASSERT_EQ(0, doc.count("multi_id_reference_sequence_id"));
ASSERT_EQ(0, doc.count(".ref"));
id_ref_json = R"({
"multi_id_reference": [null]
})"_json;
add_doc_op = id_ref_collection->add(id_ref_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Field `multi_id_reference` must be an array of string.", add_doc_op.error());
// Reference helper field is not returned in the search response.
auto result = id_ref_collection->search("*", {}, "", {}, {}, {0}).get();
ASSERT_EQ(4, result["found"].get<size_t>());
ASSERT_EQ(4, result["hits"].size());
ASSERT_EQ(0, result["hits"][0]["document"].count("id_reference_sequence_id"));
ASSERT_EQ(0, result["hits"][1]["document"].count("multi_id_reference_sequence_id"));
ASSERT_EQ(0, result["hits"][2]["document"].count("multi_id_reference_sequence_id"));
ASSERT_EQ(0, result["hits"][3]["document"].count("id_reference_sequence_id"));
collectionManager.drop_collection("Customers");
collectionManager.drop_collection("Products");
collectionManager.drop_collection("id_ref");
auto schema_json =
R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "string_field", "type": "string", "optional": true},
{"name": "string_array_field", "type": "string[]", "optional": true},
{"name": "int32_field", "type": "int32", "optional": true},
{"name": "int32_array_field", "type": "int32[]", "optional": true},
{"name": "int64_field", "type": "int64", "optional": true},
{"name": "int64_array_field", "type": "int64[]", "optional": true},
{"name": "float_field", "type": "float", "optional": true},
{"name": "float_array_field", "type": "float[]", "optional": true},
{"name": "bool_field", "type": "bool", "optional": true},
{"name": "bool_array_field", "type": "bool[]", "optional": true},
{"name": "geopoint_field", "type": "geopoint", "optional": true},
{"name": "geopoint_array_field", "type": "geopoint[]", "optional": true},
{"name": "object_field", "type": "object", "optional": true},
{"name": "object_array_field", "type": "object[]", "optional": true}
]
})"_json;
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto coll1 = collection_create_op.get();
schema_json =
R"({
"name": "coll2",
"enable_nested_fields": true,
"fields": [
{"name": "ref_string_field", "type": "string", "optional": true, "reference": "coll1.string_field"},
{"name": "ref_string_array_field", "type": "string[]", "optional": true, "reference": "coll1.string_array_field"},
{"name": "ref_int32_field", "type": "int32", "optional": true, "reference": "coll1.int32_field"},
{"name": "ref_int32_array_field", "type": "int32[]", "optional": true, "reference": "coll1.int32_array_field"},
{"name": "ref_int64_field", "type": "int64", "optional": true, "reference": "coll1.int64_field"},
{"name": "ref_int64_array_field", "type": "int64[]", "optional": true, "reference": "coll1.int64_array_field"},
{"name": "ref_float_field", "type": "float", "optional": true, "reference": "coll1.float_field"},
{"name": "ref_float_array_field", "type": "float[]", "optional": true, "reference": "coll1.float_array_field"},
{"name": "ref_bool_field", "type": "bool", "optional": true, "reference": "coll1.bool_field"},
{"name": "ref_bool_array_field", "type": "bool[]", "optional": true, "reference": "coll1.bool_array_field"},
{"name": "ref_geopoint_field", "type": "geopoint", "optional": true, "reference": "coll1.geopoint_field"},
{"name": "ref_geopoint_array_field", "type": "geopoint[]", "optional": true, "reference": "coll1.geopoint_array_field"},
{"name": "ref_object_field", "type": "object", "optional": true, "reference": "coll1.object_field"},
{"name": "ref_object_array_field", "type": "object[]", "optional": true, "reference": "coll1.object_array_field"},
{"name": "non_indexed_object.ref_field", "type": "string", "optional": true, "reference": "coll1.string_field"},
{"name": "object.ref_field", "type": "string", "optional": true, "reference": "coll1.string_field"},
{"name": "object.ref_array_field", "type": "string[]", "optional": true, "reference": "coll1.string_array_field"},
{"name": "object", "type": "object", "optional": true},
{"name": "object_array.ref_field", "type": "string", "optional": true, "reference": "coll1.string_field"},
{"name": "object_array.ref_array_field", "type": "string[]", "optional": true, "reference": "coll1.string_array_field"},
{"name": "object_array", "type": "object[]", "optional": true}
]
})"_json;
auto temp_json = schema_json;
collection_create_op = collectionManager.create_collection(temp_json);
ASSERT_TRUE(collection_create_op.ok());
auto coll2 = collection_create_op.get();
// string/string[] reference fields
auto doc_json = R"({
"string_field": "a",
"string_array_field": ["b", "c"]
})"_json;
add_doc_op = coll1->add(doc_json.dump());
ASSERT_TRUE(add_doc_op.ok());
doc_json = R"({
"string_field": "d",
"string_array_field": ["e", "f"]
})"_json;
add_doc_op = coll1->add(doc_json.dump());
ASSERT_TRUE(add_doc_op.ok());
doc_json = R"({
"ref_string_field": 1
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Field `ref_string_field` must have `string` value.", add_doc_op.error());
doc_json = R"({
"ref_string_field": "Tomaten (g`estückelt) "
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Filter value `Tomaten (g`estückelt) ` cannot be parsed.", add_doc_op.error());
doc_json = R"({
"ref_string_field": "Tomaten g`estückelt "
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
// We won't surround value in backticks if it already has a backtick in it.
ASSERT_EQ("Reference document having `string_field:= Tomaten g`estückelt ` not found in the collection `coll1`.",
add_doc_op.error());
doc_json = R"({
"ref_string_field": "Tomaten (gestückelt) "
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Reference document having `string_field:= `Tomaten (gestückelt) `` not found in the collection `coll1`.",
add_doc_op.error());
doc_json = R"({
"ref_string_field": "Tomaten && gestückelt"
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Reference document having `string_field:= `Tomaten && gestückelt`` not found in the collection `coll1`.",
add_doc_op.error());
doc_json = R"({
"ref_string_field": "Tomaten||gestückelt"
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Reference document having `string_field:= `Tomaten||gestückelt`` not found in the collection `coll1`.",
add_doc_op.error());
doc_json = R"({
"ref_string_field": "Tomaten`||`gestückelt"
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Reference document having `string_field:= Tomaten`||`gestückelt` not found in the collection `coll1`.",
add_doc_op.error());
doc_json = R"({
"ref_string_array_field": ["a", 1]
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Field `ref_string_array_field` must have `string` value.", add_doc_op.error());
doc_json = R"({
"ref_string_array_field": [null]
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Field `ref_string_array_field` cannot have `null` value.", add_doc_op.error());
doc_json = R"({
"ref_string_array_field": ["foo"]
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Reference document having `string_array_field:= `foo`` not found in the collection `coll1`.",
add_doc_op.error());
collectionManager.drop_collection("coll2");
temp_json = schema_json;
collection_create_op = collectionManager.create_collection(temp_json);
ASSERT_TRUE(collection_create_op.ok());
coll2 = collection_create_op.get();
doc_json = R"({
"ref_string_field": "d"
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_TRUE(add_doc_op.ok());
doc = coll2->get("0").get();
ASSERT_EQ(1, doc.count("ref_string_field_sequence_id"));
ASSERT_EQ(1, doc["ref_string_field_sequence_id"]);
ASSERT_EQ(1, doc.count(".ref"));
ASSERT_EQ(1, doc[".ref"].size());
ASSERT_EQ("ref_string_field_sequence_id", doc[".ref"][0]);
doc_json = R"({
"ref_string_field": null
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_TRUE(add_doc_op.ok());
doc = coll2->get("1").get();
ASSERT_EQ(0, doc.count("ref_string_field_sequence_id"));
ASSERT_EQ(0, doc.count(".ref"));
result = coll2->search("*", {}, "", {}, {}, {0}).get();
ASSERT_EQ(0, result["hits"][0]["document"]["ref_string_array_field_sequence_id"].size());
doc_json = R"({
"ref_string_array_field": ["b"]
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_TRUE(add_doc_op.ok());
doc = coll2->get("2").get();
ASSERT_EQ(1, doc.count("ref_string_array_field_sequence_id"));
ASSERT_EQ(1, doc["ref_string_array_field_sequence_id"].size());
ASSERT_EQ(0, doc["ref_string_array_field_sequence_id"][0]);
doc_json = R"({
"ref_string_array_field": ["c", "e"]
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_TRUE(add_doc_op.ok());
doc = coll2->get("3").get();
ASSERT_EQ(1, doc.count("ref_string_array_field_sequence_id"));
ASSERT_EQ(2, doc["ref_string_array_field_sequence_id"].size());
ASSERT_EQ(0, doc["ref_string_array_field_sequence_id"][0]);
ASSERT_EQ(1, doc["ref_string_array_field_sequence_id"][1]);
// int32/int32[] reference fields
doc_json = R"({
"int32_field": 1
})"_json;
add_doc_op = coll1->add(doc_json.dump());
ASSERT_TRUE(add_doc_op.ok());
doc_json = R"({
"int32_field": 1,
"int32_array_field": [2, -2147483648]
})"_json;
add_doc_op = coll1->add(doc_json.dump());
ASSERT_TRUE(add_doc_op.ok());
doc_json = R"({
"int32_field": 4,
"int32_array_field": [5, 2147483647]
})"_json;
add_doc_op = coll1->add(doc_json.dump());
ASSERT_TRUE(add_doc_op.ok());
doc_json = R"({
"ref_int32_field": "1"
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Field `ref_int32_field` must have `int32` value.", add_doc_op.error());
doc_json = R"({
"ref_int32_field": 2147483648
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Field `ref_int32_field` must have `int32` value.", add_doc_op.error());
doc_json = R"({
"ref_int32_field": 0
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Reference document having `int32_field: 0` not found in the collection `coll1`.", add_doc_op.error());
doc_json = R"({
"ref_int32_field": 1
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Multiple documents having `int32_field: 1` found in the collection `coll1`.", add_doc_op.error());
doc_json = R"({
"ref_int32_array_field": [1, "2"]
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Field `ref_int32_array_field` must have `int32` value.", add_doc_op.error());
doc_json = R"({
"ref_int32_array_field": [1, -2147483649]
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Field `ref_int32_array_field` must have `int32` value.", add_doc_op.error());
doc_json = R"({
"ref_int32_array_field": [1, 2147483648]
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Field `ref_int32_array_field` must have `int32` value.", add_doc_op.error());
doc_json = R"({
"ref_int32_array_field": [1]
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Reference document having `int32_array_field: 1` not found in the collection `coll1`.", add_doc_op.error());
collectionManager.drop_collection("coll2");
temp_json = schema_json;
collection_create_op = collectionManager.create_collection(temp_json);
ASSERT_TRUE(collection_create_op.ok());
coll2 = collection_create_op.get();
doc_json = R"({
"ref_int32_field": 4
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_TRUE(add_doc_op.ok());
doc = coll2->get("0").get();
ASSERT_EQ(1, doc.count("ref_int32_field_sequence_id"));
ASSERT_EQ(4, doc["ref_int32_field_sequence_id"]);
ASSERT_EQ(1, doc.count(".ref"));
ASSERT_EQ(1, doc[".ref"].size());
ASSERT_EQ("ref_int32_field_sequence_id", doc[".ref"][0]);
doc_json = R"({
"ref_int32_array_field": [2]
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_TRUE(add_doc_op.ok());
doc = coll2->get("1").get();
ASSERT_EQ(1, doc.count("ref_int32_array_field_sequence_id"));
ASSERT_EQ(1, doc["ref_int32_array_field_sequence_id"].size());
ASSERT_EQ(3, doc["ref_int32_array_field_sequence_id"][0]);
doc_json = R"({
"ref_int32_array_field": [2, 5]
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_TRUE(add_doc_op.ok());
doc = coll2->get("2").get();
ASSERT_EQ(1, doc.count("ref_int32_array_field_sequence_id"));
ASSERT_EQ(2, doc["ref_int32_array_field_sequence_id"].size());
ASSERT_EQ(3, doc["ref_int32_array_field_sequence_id"][0]);
ASSERT_EQ(4, doc["ref_int32_array_field_sequence_id"][1]);
doc_json = R"({
"ref_int32_array_field": [-2147483648]
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_TRUE(add_doc_op.ok());
doc = coll2->get("3").get();
ASSERT_EQ(1, doc.count("ref_int32_array_field_sequence_id"));
ASSERT_EQ(1, doc["ref_int32_array_field_sequence_id"].size());
ASSERT_EQ(3, doc["ref_int32_array_field_sequence_id"][0]);
// int64/int64[] reference fields
doc_json = R"({
"int64_field": 1
})"_json;
add_doc_op = coll1->add(doc_json.dump());
ASSERT_TRUE(add_doc_op.ok());
doc_json = R"({
"int64_field": 1,
"int64_array_field": [2, -9223372036854775808]
})"_json;
add_doc_op = coll1->add(doc_json.dump());
ASSERT_TRUE(add_doc_op.ok());
doc_json = R"({
"int64_field": 4,
"int64_array_field": [5, 9223372036854775807]
})"_json;
add_doc_op = coll1->add(doc_json.dump());
ASSERT_TRUE(add_doc_op.ok());
doc_json = R"({
"ref_int64_field": "1"
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Field `ref_int64_field` must have `int64` value.", add_doc_op.error());
doc_json = R"({
"ref_int64_field": 0
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Reference document having `int64_field: 0` not found in the collection `coll1`.", add_doc_op.error());
doc_json = R"({
"ref_int64_field": 1
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Multiple documents having `int64_field: 1` found in the collection `coll1`.", add_doc_op.error());
doc_json = R"({
"ref_int64_array_field": [1, "2"]
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Field `ref_int64_array_field` must have `int64` value.", add_doc_op.error());
doc_json = R"({
"ref_int64_array_field": [1, -9223372036854775809]
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Field `ref_int64_array_field` must have `int64` value.", add_doc_op.error());
doc_json = R"({
"ref_int64_array_field": [1, 1.5]
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Field `ref_int64_array_field` must have `int64` value.", add_doc_op.error());
doc_json = R"({
"ref_int64_array_field": [1]
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Reference document having `int64_array_field: 1` not found in the collection `coll1`.", add_doc_op.error());
collectionManager.drop_collection("coll2");
temp_json = schema_json;
collection_create_op = collectionManager.create_collection(temp_json);
ASSERT_TRUE(collection_create_op.ok());
coll2 = collection_create_op.get();
doc_json = R"({
"ref_int64_field": 4
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_TRUE(add_doc_op.ok());
doc = coll2->get("0").get();
ASSERT_EQ(1, doc.count("ref_int64_field_sequence_id"));
ASSERT_EQ(7, doc["ref_int64_field_sequence_id"]);
ASSERT_EQ(1, doc.count(".ref"));
ASSERT_EQ(1, doc[".ref"].size());
ASSERT_EQ("ref_int64_field_sequence_id", doc[".ref"][0]);
doc_json = R"({
"ref_int64_array_field": [2]
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_TRUE(add_doc_op.ok());
doc = coll2->get("1").get();
ASSERT_EQ(1, doc.count("ref_int64_array_field_sequence_id"));
ASSERT_EQ(1, doc["ref_int64_array_field_sequence_id"].size());
ASSERT_EQ(6, doc["ref_int64_array_field_sequence_id"][0]);
doc_json = R"({
"ref_int64_array_field": [2, 5]
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_TRUE(add_doc_op.ok());
doc = coll2->get("2").get();
ASSERT_EQ(1, doc.count("ref_int64_array_field_sequence_id"));
ASSERT_EQ(2, doc["ref_int64_array_field_sequence_id"].size());
ASSERT_EQ(6, doc["ref_int64_array_field_sequence_id"][0]);
ASSERT_EQ(7, doc["ref_int64_array_field_sequence_id"][1]);
doc_json = R"({
"ref_int64_array_field": [-9223372036854775808]
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_TRUE(add_doc_op.ok());
doc = coll2->get("3").get();
ASSERT_EQ(1, doc.count("ref_int64_array_field_sequence_id"));
ASSERT_EQ(1, doc["ref_int64_array_field_sequence_id"].size());
ASSERT_EQ(6, doc["ref_int64_array_field_sequence_id"][0]);
// reference field inside object/object[]
doc_json = R"({
"non_indexed_object": {
"ref_field": "foo"
}
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Could not find `non_indexed_object` object/object[] field in the schema.", add_doc_op.error());
doc_json = R"({
"object": {
"ref_field": 1
}
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Field `object.ref_field` must have `string` value.", add_doc_op.error());
doc_json = R"({
"object": {
"ref_array_field": [1]
}
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Field `object.ref_array_field` must have `string` value.", add_doc_op.error());
doc_json = R"({
"object_array": [
{
"ref_field": 1
}
]
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Field `object_array.ref_field` must have `string` value.", add_doc_op.error());
doc_json = R"({
"object_array": [
{
"ref_field": "foo"
}
]
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Reference document having `string_field:= `foo`` not found in the collection `coll1`.", add_doc_op.error());
doc_json = R"({
"object_array": [
{
"ref_field": "a"
}
]
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Field `object_array.ref_field` has an incorrect type."
" Hint: field inside an array of objects must be an array type as well.", add_doc_op.error());
doc_json = R"({
"object_array": [
{
"ref_array_field": "foo"
}
]
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Reference document having `string_array_field:= `foo`` not found in the collection `coll1`.", add_doc_op.error());
doc_json = R"({
"object": {
"ref_array_field": ["foo"]
}
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Reference document having `string_array_field:= `foo`` not found in the collection `coll1`.", add_doc_op.error());
collectionManager.drop_collection("coll2");
temp_json = schema_json;
collection_create_op = collectionManager.create_collection(temp_json);
ASSERT_TRUE(collection_create_op.ok());
coll2 = collection_create_op.get();
doc_json = R"({
"object": {
"ref_field": "d"
}
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_TRUE(add_doc_op.ok());
doc = coll2->get("0").get();
ASSERT_EQ(1, doc.count("object.ref_field_sequence_id"));
ASSERT_EQ(1, doc["object.ref_field_sequence_id"]);
ASSERT_EQ(1, doc.count(".ref"));
ASSERT_EQ(1, doc[".ref"].size());
ASSERT_EQ("object.ref_field_sequence_id", doc[".ref"][0]);
ASSERT_EQ(1, coll2->get_object_reference_helper_fields().count("object.ref_field_sequence_id"));
doc_json = R"({
"object": {
"ref_array_field": ["b"]
}
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_TRUE(add_doc_op.ok());
doc = coll2->get("1").get();
ASSERT_EQ(1, doc.count("object.ref_array_field_sequence_id"));
ASSERT_EQ(1, doc["object.ref_array_field_sequence_id"].size());
ASSERT_EQ(0, doc["object.ref_array_field_sequence_id"][0]);
doc_json = R"({
"object_array": [
{
"ref_array_field": "c"
},
{
"ref_array_field": "e"
}
]
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_TRUE(add_doc_op.ok());
doc = coll2->get("2").get();
ASSERT_EQ(1, doc.count("object_array.ref_array_field_sequence_id"));
ASSERT_EQ(2, doc["object_array.ref_array_field_sequence_id"].size());
ASSERT_EQ(2, doc["object_array.ref_array_field_sequence_id"][0].size());
ASSERT_EQ(0, doc["object_array.ref_array_field_sequence_id"][0][0]);
ASSERT_EQ(0, doc["object_array.ref_array_field_sequence_id"][0][1]);
ASSERT_EQ(2, doc["object_array.ref_array_field_sequence_id"][1].size());
ASSERT_EQ(1, doc["object_array.ref_array_field_sequence_id"][1][0]);
ASSERT_EQ(1, doc["object_array.ref_array_field_sequence_id"][1][1]);
ASSERT_EQ(1, doc.count(".ref"));
ASSERT_EQ(1, doc[".ref"].size());
ASSERT_EQ("object_array.ref_array_field_sequence_id", doc[".ref"][0]);
// float/float[] reference fields
doc_json = R"({
"ref_float_field": 1.5
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Cannot add a reference to `coll1.float_field` of type `float`.", add_doc_op.error());
doc_json = R"({
"ref_float_array_field": [1.5]
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Cannot add a reference to `coll1.float_array_field` of type `float[]`.", add_doc_op.error());
// bool/bool[] reference fields
doc_json = R"({
"ref_bool_field": "true"
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Cannot add a reference to `coll1.bool_field` of type `bool`.", add_doc_op.error());
doc_json = R"({
"ref_bool_array_field": ["true"]
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Cannot add a reference to `coll1.bool_array_field` of type `bool[]`.", add_doc_op.error());
// geopoint/geopoint[] reference fields
doc_json = R"({
"ref_geopoint_field": [13.12631, 80.20252]
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Cannot add a reference to `coll1.geopoint_field` of type `geopoint`.", add_doc_op.error());
doc_json = R"({
"ref_geopoint_array_field": [[13.12631, 80.20252]]
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Cannot add a reference to `coll1.geopoint_array_field` of type `geopoint[]`.", add_doc_op.error());
// object/object[] reference fields
doc_json = R"({
"ref_object_field": {
"foo": "bar"
}
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Cannot add a reference to `coll1.object_field` of type `object`.", add_doc_op.error());
doc_json = R"({
"ref_object_array_field": [
{
"foo": "bar"
}
]
})"_json;
add_doc_op = coll2->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Cannot add a reference to `coll1.object_array_field` of type `object[]`.", add_doc_op.error());
}
TEST_F(CollectionJoinTest, IndexDocumentHavingAsyncReferenceField) {
auto schema_json =
R"({
"name": "Customers",
"fields": [
{"name": "customer_id", "type": "string"},
{"name": "customer_name", "type": "string"},
{"name": "product_price", "type": "float"},
{"name": "product_id", "type": "string", "reference": "Products.product_id", "async_reference": true}
]
})"_json;
std::vector<nlohmann::json> documents = {
R"({
"customer_id": "customer_a",
"customer_name": "Joe",
"product_price": 143,
"product_id": "product_a"
})"_json,
R"({
"customer_id": "customer_a",
"customer_name": "Joe",
"product_price": 73.5,
"product_id": "product_b"
})"_json,
R"({
"customer_id": "customer_b",
"customer_name": "Dan",
"product_price": 75,
"product_id": "product_a"
})"_json
};
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
for (auto i = 0; i < 3; i++) {
auto const doc_id = std::to_string(i);
auto doc = collection_create_op.get()->get(doc_id).get();
ASSERT_EQ(doc_id, doc["id"]);
ASSERT_EQ(1, doc.count(".ref"));
ASSERT_EQ(1, doc[".ref"].size());
ASSERT_EQ("product_id_sequence_id", doc[".ref"][0]);
ASSERT_EQ(1, doc.count("product_id_sequence_id"));
// Referenced documents don't exist yet, so dummy value is present in the reference helper field.
ASSERT_EQ(UINT32_MAX, doc["product_id_sequence_id"]);
}
schema_json =
R"({
"name": "coll1",
"fields": [
{"name": "coll_id", "type": "string"},
{
"name": "object.reference",
"type": "string",
"reference": "Products.product_id",
"optional": true,
"async_reference": true
},
{"name": "object", "type": "object"}
],
"enable_nested_fields": true
})"_json;
documents = {
R"({
"coll_id": "a",
"object": {}
})"_json,
R"({
"coll_id": "b",
"object": {
"reference": "product_b"
}
})"_json,
R"({
"coll_id": "c",
"object": {
"reference": "product_a"
}
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
for (auto i = 0; i < 3; i++) {
auto const doc_id = std::to_string(i);
auto doc = collection_create_op.get()->get(doc_id).get();
ASSERT_EQ(doc_id, doc["id"]);
if (i == 0) {
ASSERT_EQ(0, doc.count(".ref"));
ASSERT_EQ(0, doc.count("object.reference_sequence_id"));
continue;
}
ASSERT_EQ(1, doc.count(".ref"));
ASSERT_EQ(1, doc[".ref"].size());
ASSERT_EQ("object.reference_sequence_id", doc[".ref"][0]);
ASSERT_EQ(1, doc.count("object.reference_sequence_id"));
// Referenced documents don't exist yet, so dummy value is present in the reference helper field.
ASSERT_EQ(UINT32_MAX, doc["object.reference_sequence_id"]);
}
schema_json =
R"({
"name": "Products",
"fields": [
{"name": "product_id", "type": "string"},
{"name": "product_name", "type": "string"},
{"name": "product_description", "type": "string"},
{"name": "rating", "type": "int32"}
]
})"_json;
documents = {
R"({
"product_id": "product_a",
"product_name": "shampoo",
"product_description": "Our new moisturizing shampoo is perfect for those with dry or damaged hair.",
"rating": "2"
})"_json,
R"({
"product_id": "product_c",
"product_name": "comb",
"product_description": "Experience the natural elegance and gentle care of our handcrafted wooden combs – because your hair deserves the best.",
"rating": "3"
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
std::map<std::string, std::string> req_params = {
{"collection", "Products"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "id:* || $Customers(id:*)"},
{"include_fields", "$Customers(id, strategy:nest_array) as Customers"}
};
nlohmann::json embedded_params;
std::string json_res;
auto now_ts = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
auto search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
auto res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ("1", res_obj["hits"][0]["document"]["id"]);
ASSERT_EQ(0, res_obj["hits"][0]["document"].count("Customers"));
ASSERT_EQ("0", res_obj["hits"][1]["document"]["id"]);
ASSERT_EQ(1, res_obj["hits"][1]["document"].count("Customers"));
ASSERT_EQ(2, res_obj["hits"][1]["document"]["Customers"].size());
ASSERT_EQ("0", res_obj["hits"][1]["document"]["Customers"][0]["id"]);
ASSERT_EQ("2", res_obj["hits"][1]["document"]["Customers"][1]["id"]);
req_params = {
{"collection", "coll1"},
{"q", "*"},
{"include_fields", "$Products(product_id)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(3, res_obj["found"].get<size_t>());
ASSERT_EQ(3, res_obj["hits"].size());
ASSERT_EQ("2", res_obj["hits"][0]["document"]["id"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"]["object"].count("Products"));
ASSERT_EQ("product_a", res_obj["hits"][0]["document"]["object"]["Products"]["product_id"]);
ASSERT_EQ("1", res_obj["hits"][1]["document"]["id"]);
ASSERT_EQ(0, res_obj["hits"][1]["document"]["object"].count("Products"));
ASSERT_EQ(1, res_obj["hits"][1]["document"]["object"].count("reference"));
ASSERT_EQ("product_b", res_obj["hits"][1]["document"]["object"]["reference"]);
ASSERT_EQ(0, res_obj["hits"][1]["document"].count("Products"));
ASSERT_EQ("0", res_obj["hits"][2]["document"]["id"]);
ASSERT_EQ(0, res_obj["hits"][2]["document"].count("Products"));
ASSERT_EQ(0, res_obj["hits"][2]["document"]["object"].count("reference"));
auto doc_json = R"({
"product_id": "product_b",
"product_name": "soap",
"product_description": "Introducing our all-natural, organic soap bar made with essential oils and botanical ingredients.",
"rating": "4"
})"_json;
auto add_doc_op = collection_create_op.get()->add(doc_json.dump());
ASSERT_TRUE(add_doc_op.ok());
req_params = {
{"collection", "Products"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "id:* || $Customers(id:*)"},
{"include_fields", "$Customers(id, strategy:nest_array) as Customers"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(3, res_obj["found"].get<size_t>());
ASSERT_EQ(3, res_obj["hits"].size());
ASSERT_EQ("2", res_obj["hits"][0]["document"]["id"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("Customers"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers"].size());
ASSERT_EQ("1", res_obj["hits"][0]["document"]["Customers"][0]["id"]);
ASSERT_EQ("1", res_obj["hits"][1]["document"]["id"]);
ASSERT_EQ(0, res_obj["hits"][1]["document"].count("Customers"));
ASSERT_EQ("0", res_obj["hits"][2]["document"]["id"]);
ASSERT_EQ(1, res_obj["hits"][2]["document"].count("Customers"));
ASSERT_EQ(2, res_obj["hits"][2]["document"]["Customers"].size());
ASSERT_EQ("0", res_obj["hits"][2]["document"]["Customers"][0]["id"]);
ASSERT_EQ("2", res_obj["hits"][2]["document"]["Customers"][1]["id"]);
{
auto const& customers = collectionManager.get_collection_unsafe("Customers");
doc_json = R"({
"customer_id": "customer_b",
"customer_name": "Dan",
"product_price": 140,
"product_id": "product_b"
})"_json;
add_doc_op = customers->add(doc_json.dump());
ASSERT_TRUE(add_doc_op.ok());
auto doc = customers->get("3").get();
ASSERT_EQ("3", doc["id"]);
ASSERT_EQ(1, doc.count(".ref"));
ASSERT_EQ(1, doc[".ref"].size());
ASSERT_EQ("product_id_sequence_id", doc[".ref"][0]);
ASSERT_EQ(1, doc.count("product_id_sequence_id"));
// When referenced document is already present, reference helper field should be initialized to its seq_id.
ASSERT_EQ(2, doc["product_id_sequence_id"]);
}
req_params = {
{"collection", "Products"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "id:* || $Customers(id:*)"},
{"include_fields", "$Customers(id, strategy:nest_array) as Customers"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(3, res_obj["found"].get<size_t>());
ASSERT_EQ(3, res_obj["hits"].size());
ASSERT_EQ("2", res_obj["hits"][0]["document"]["id"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("Customers"));
ASSERT_EQ(2, res_obj["hits"][0]["document"]["Customers"].size());
ASSERT_EQ("1", res_obj["hits"][0]["document"]["Customers"][0]["id"]);
ASSERT_EQ("3", res_obj["hits"][0]["document"]["Customers"][1]["id"]);
ASSERT_EQ("1", res_obj["hits"][1]["document"]["id"]);
ASSERT_EQ(0, res_obj["hits"][1]["document"].count("Customers"));
ASSERT_EQ("0", res_obj["hits"][2]["document"]["id"]);
ASSERT_EQ(1, res_obj["hits"][2]["document"].count("Customers"));
ASSERT_EQ(2, res_obj["hits"][2]["document"]["Customers"].size());
ASSERT_EQ("0", res_obj["hits"][2]["document"]["Customers"][0]["id"]);
ASSERT_EQ("2", res_obj["hits"][2]["document"]["Customers"][1]["id"]);
{
auto const& coll1 = collectionManager.get_collection_unsafe("coll1");
doc_json = R"({
"coll_id": "d",
"object": {
"reference": "product_d"
}
})"_json;
add_doc_op = coll1->add(doc_json.dump());
ASSERT_TRUE(add_doc_op.ok());
auto doc = coll1->get("3").get();
ASSERT_EQ("3", doc["id"]);
ASSERT_EQ(1, doc.count(".ref"));
ASSERT_EQ(1, doc[".ref"].size());
ASSERT_EQ("object.reference_sequence_id", doc[".ref"][0]);
ASSERT_EQ(1, doc.count("object.reference_sequence_id"));
// product_d doesn't exist yet, so dummy value is present in the reference helper field.
ASSERT_EQ(UINT32_MAX, doc["object.reference_sequence_id"]);
doc_json = R"({
"product_id": "product_d",
"product_name": "hair oil",
"product_description": "Revitalize your hair with our nourishing hair oil – nature's secret to lustrous, healthy locks.",
"rating": "foo"
})"_json;
add_doc_op = collection_create_op.get()->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
ASSERT_EQ("Field `rating` must be an int32.", add_doc_op.error());
doc = coll1->get("3").get();
ASSERT_EQ("3", doc["id"]);
ASSERT_EQ(1, doc.count(".ref"));
ASSERT_EQ(1, doc[".ref"].size());
ASSERT_EQ("object.reference_sequence_id", doc[".ref"][0]);
ASSERT_EQ(1, doc.count("object.reference_sequence_id"));
// product_d was not indexed, reference helper field should remain unchanged.
ASSERT_EQ(UINT32_MAX, doc["object.reference_sequence_id"]);
doc_json = R"({
"product_id": "product_a",
"product_name": "hair oil",
"product_description": "Revitalize your hair with our nourishing hair oil – nature's secret to lustrous, healthy locks.",
"rating": "4"
})"_json;
add_doc_op = collection_create_op.get()->add(doc_json.dump());
ASSERT_FALSE(add_doc_op.ok());
// Singular reference field can only reference one document.
ASSERT_EQ("Error while updating async reference field `product_id` of collection `Customers`: "
"Document `id: 0` already has a reference to document `0` of `Products` collection, "
"having reference value `product_a`.", add_doc_op.error());
doc = coll1->get("2").get();
ASSERT_EQ("2", doc["id"]);
ASSERT_EQ(1, doc.count(".ref"));
ASSERT_EQ(1, doc[".ref"].size());
ASSERT_EQ("object.reference_sequence_id", doc[".ref"][0]);
ASSERT_EQ(1, doc.count("object.reference_sequence_id"));
// product_a already existed, reference helper field should remain unchanged.
ASSERT_EQ(0, doc["object.reference_sequence_id"]);
doc_json = R"({
"product_id": "product_d",
"product_name": "hair oil",
"product_description": "Revitalize your hair with our nourishing hair oil – nature's secret to lustrous, healthy locks.",
"rating": "4"
})"_json;
add_doc_op = collection_create_op.get()->add(doc_json.dump());
ASSERT_TRUE(add_doc_op.ok());
doc = coll1->get("3").get();
ASSERT_EQ("3", doc["id"]);
ASSERT_EQ(1, doc.count(".ref"));
ASSERT_EQ(1, doc[".ref"].size());
ASSERT_EQ("object.reference_sequence_id", doc[".ref"][0]);
ASSERT_EQ(1, doc.count("object.reference_sequence_id"));
ASSERT_EQ(5, doc["object.reference_sequence_id"]);
}
schema_json =
R"({
"name": "songs",
"fields": [
{ "name": "title", "type": "string" },
{ "name": "genres", "type": "string[]", "reference": "genres.id", "async_reference": true}
]
})"_json;
documents = {
R"({"title":"Dil De Rani", "genres":[]})"_json,
R"({"title":"Corduroy", "genres":["1"]})"_json,
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
{
auto doc = collection_create_op.get()->get("0").get();
ASSERT_EQ("0", doc["id"]);
ASSERT_EQ(1, doc.count(".ref"));
ASSERT_EQ(1, doc[".ref"].size());
ASSERT_EQ("genres_sequence_id", doc[".ref"][0]);
ASSERT_EQ(1, doc.count("genres_sequence_id"));
ASSERT_TRUE(doc["genres"].size() == doc["genres_sequence_id"].size());
ASSERT_EQ(0, doc["genres_sequence_id"].size());
doc = collection_create_op.get()->get("1").get();
ASSERT_EQ("1", doc["id"]);
ASSERT_EQ(1, doc.count(".ref"));
ASSERT_EQ(1, doc[".ref"].size());
ASSERT_EQ("genres_sequence_id", doc[".ref"][0]);
ASSERT_EQ(1, doc.count("genres_sequence_id"));
ASSERT_TRUE(doc["genres"].size() == doc["genres_sequence_id"].size());
ASSERT_EQ(1, doc["genres_sequence_id"].size());
ASSERT_EQ(UINT32_MAX, doc["genres_sequence_id"][0]);
}
schema_json =
R"({
"name": "genres",
"fields": [
{ "name": "id", "type": "string" },
{ "name": "name", "type": "string" }
]
})"_json;
documents = {
R"({"id":"0","name":"Grunge"})"_json,
R"({"id":"1","name":"Arena rock"})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
req_params = {
{"collection", "songs"},
{"q", "*"},
{"include_fields", "$genres(name, strategy:nest) as genre"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ("Corduroy", res_obj["hits"][0]["document"]["title"].get<std::string>());
ASSERT_EQ(1, res_obj["hits"][0]["document"]["genre"].size());
ASSERT_EQ("Arena rock", res_obj["hits"][0]["document"]["genre"][0]["name"]);
ASSERT_EQ("Dil De Rani", res_obj["hits"][1]["document"]["title"].get<std::string>());
ASSERT_EQ(0, res_obj["hits"][1]["document"]["genre"].size());
{
// Insert individual document.
auto const& songs_coll = collectionManager.get_collection_unsafe("songs");
doc_json = R"({"title":"Achilles Last Stand", "genres":["3","0","2"]})"_json;
add_doc_op = songs_coll->add(doc_json.dump());
ASSERT_TRUE(add_doc_op.ok());
auto doc = songs_coll->get("2").get();
ASSERT_EQ("2", doc["id"]);
ASSERT_EQ(1, doc.count(".ref"));
ASSERT_EQ(1, doc[".ref"].size());
ASSERT_EQ("genres_sequence_id", doc[".ref"][0]);
ASSERT_EQ(1, doc.count("genres_sequence_id"));
ASSERT_TRUE(doc["genres"].size() == doc["genres_sequence_id"].size());
ASSERT_EQ(3, doc["genres_sequence_id"].size());
ASSERT_EQ("3", doc["genres"][0]);
ASSERT_EQ(UINT32_MAX, doc["genres_sequence_id"][0]);
ASSERT_EQ("0", doc["genres"][1]);
ASSERT_EQ(0, doc["genres_sequence_id"][1]);
ASSERT_EQ("2", doc["genres"][2]);
ASSERT_EQ(UINT32_MAX, doc["genres_sequence_id"][2]);
auto remove_op = collection_create_op.get()->remove("0");
ASSERT_TRUE(remove_op.ok());
doc = songs_coll->get("2").get();
ASSERT_EQ("2", doc["id"]);
ASSERT_EQ(1, doc.count(".ref"));
ASSERT_EQ(1, doc[".ref"].size());
ASSERT_EQ("genres_sequence_id", doc[".ref"][0]);
ASSERT_EQ(1, doc.count("genres_sequence_id"));
ASSERT_TRUE(doc["genres"].size() == doc["genres_sequence_id"].size());
ASSERT_EQ(2, doc["genres_sequence_id"].size());
ASSERT_EQ("3", doc["genres"][0]);
ASSERT_EQ(UINT32_MAX, doc["genres_sequence_id"][0]);
ASSERT_EQ("2", doc["genres"][1]);
ASSERT_EQ(UINT32_MAX, doc["genres_sequence_id"][1]);
doc_json = R"({"id":"2","name":"Blues"})"_json;
add_doc_op = collection_create_op.get()->add(doc_json.dump());
ASSERT_TRUE(add_doc_op.ok());
doc = songs_coll->get("2").get();
ASSERT_EQ("2", doc["id"]);
ASSERT_EQ(1, doc.count(".ref"));
ASSERT_EQ(1, doc[".ref"].size());
ASSERT_EQ("genres_sequence_id", doc[".ref"][0]);
ASSERT_EQ(1, doc.count("genres_sequence_id"));
ASSERT_TRUE(doc["genres"].size() == doc["genres_sequence_id"].size());
ASSERT_EQ(2, doc["genres_sequence_id"].size());
ASSERT_EQ("3", doc["genres"][0]);
ASSERT_EQ(UINT32_MAX, doc["genres_sequence_id"][0]);
ASSERT_EQ("2", doc["genres"][1]);
ASSERT_EQ(2, doc["genres_sequence_id"][1]);
}
req_params = {
{"collection", "songs"},
{"q", "*"},
{"include_fields", "$genres(name, strategy:nest) as genre"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(3, res_obj["found"].get<size_t>());
ASSERT_EQ(3, res_obj["hits"].size());
ASSERT_EQ("Achilles Last Stand", res_obj["hits"][0]["document"]["title"].get<std::string>());
ASSERT_EQ(1, res_obj["hits"][0]["document"]["genre"].size());
ASSERT_EQ("Blues", res_obj["hits"][0]["document"]["genre"][0]["name"]);
ASSERT_EQ("Corduroy", res_obj["hits"][1]["document"]["title"].get<std::string>());
ASSERT_EQ(1, res_obj["hits"][1]["document"]["genre"].size());
ASSERT_EQ("Arena rock", res_obj["hits"][1]["document"]["genre"][0]["name"]);
ASSERT_EQ("Dil De Rani", res_obj["hits"][2]["document"]["title"].get<std::string>());
ASSERT_EQ(0, res_obj["hits"][2]["document"]["genre"].size());
collectionManager.dispose();
delete store;
store = new Store(state_dir_path);
collectionManager.init(store, 1.0, "auth_key", quit);
auto load_op = collectionManager.load(8, 1000);
if(!load_op.ok()) {
LOG(ERROR) << load_op.error();
}
ASSERT_TRUE(load_op.ok());
req_params = {
{"collection", "songs"},
{"q", "*"},
{"include_fields", "$genres(name, strategy:nest) as genre"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(3, res_obj["found"].get<size_t>());
ASSERT_EQ(3, res_obj["hits"].size());
ASSERT_EQ("Achilles Last Stand", res_obj["hits"][0]["document"]["title"].get<std::string>());
ASSERT_EQ(1, res_obj["hits"][0]["document"]["genre"].size());
ASSERT_EQ("Blues", res_obj["hits"][0]["document"]["genre"][0]["name"]);
ASSERT_EQ("Corduroy", res_obj["hits"][1]["document"]["title"].get<std::string>());
ASSERT_EQ(1, res_obj["hits"][1]["document"]["genre"].size());
ASSERT_EQ("Arena rock", res_obj["hits"][1]["document"]["genre"][0]["name"]);
ASSERT_EQ("Dil De Rani", res_obj["hits"][2]["document"]["title"].get<std::string>());
ASSERT_EQ(0, res_obj["hits"][2]["document"]["genre"].size());
{
auto const& songs_coll = collectionManager.get_collection_unsafe("songs");
auto doc = songs_coll->get("2").get();
ASSERT_EQ("2", doc["id"]);
ASSERT_EQ(1, doc.count(".ref"));
ASSERT_EQ(1, doc[".ref"].size());
ASSERT_EQ("genres_sequence_id", doc[".ref"][0]);
ASSERT_EQ(1, doc.count("genres_sequence_id"));
ASSERT_TRUE(doc["genres"].size() == doc["genres_sequence_id"].size());
ASSERT_EQ(2, doc["genres_sequence_id"].size());
ASSERT_EQ("3", doc["genres"][0]);
ASSERT_EQ(UINT32_MAX, doc["genres_sequence_id"][0]);
ASSERT_EQ("2", doc["genres"][1]);
ASSERT_EQ(2, doc["genres_sequence_id"][1]);
auto const& genres_coll = collectionManager.get_collection_unsafe("genres");
doc_json = R"({"id":"3","name":"Metal"})"_json;
add_doc_op = genres_coll->add(doc_json.dump());
ASSERT_TRUE(add_doc_op.ok());
doc = songs_coll->get("2").get();
ASSERT_EQ("2", doc["id"]);
ASSERT_EQ(1, doc.count(".ref"));
ASSERT_EQ(1, doc[".ref"].size());
ASSERT_EQ("genres_sequence_id", doc[".ref"][0]);
ASSERT_EQ(1, doc.count("genres_sequence_id"));
ASSERT_TRUE(doc["genres"].size() == doc["genres_sequence_id"].size());
ASSERT_EQ(2, doc["genres_sequence_id"].size());
ASSERT_EQ("3", doc["genres"][0]);
ASSERT_EQ(3, doc["genres_sequence_id"][0]);
ASSERT_EQ("2", doc["genres"][1]);
ASSERT_EQ(2, doc["genres_sequence_id"][1]);
}
}
TEST_F(CollectionJoinTest, UpdateDocumentHavingReferenceField) {
auto schema_json =
R"({
"name": "Products",
"fields": [
{"name": "product_id", "type": "string"},
{"name": "product_name", "type": "string"},
{"name": "product_description", "type": "string"}
]
})"_json;
std::vector<nlohmann::json> documents = {
R"({
"product_id": "product_a",
"product_name": "shampoo",
"product_description": "Our new moisturizing shampoo is perfect for those with dry or damaged hair."
})"_json,
R"({
"product_id": "product_b",
"product_name": "soap",
"product_description": "Introducing our all-natural, organic soap bar made with essential oils and botanical ingredients."
})"_json
};
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Customers",
"fields": [
{"name": "customer_id", "type": "string"},
{"name": "customer_name", "type": "string", "sort": true},
{"name": "product_price", "type": "float"},
{"name": "product_id", "type": "string", "reference": "Products.product_id", "optional": true}
]
})"_json;
documents = {
R"({
"customer_id": "customer_a",
"customer_name": "Joe",
"product_price": 143,
"product_id": "product_a"
})"_json,
R"({
"customer_id": "customer_a",
"customer_name": "Joe",
"product_price": 73.5,
"product_id": "product_b"
})"_json,
R"({
"customer_id": "customer_b",
"customer_name": "Dan",
"product_price": 75,
"product_id": "product_a"
})"_json,
R"({
"customer_id": "customer_b",
"customer_name": "Dan",
"product_price": 140,
"product_id": "product_b"
})"_json,
R"({
"customer_id": "customer_c",
"customer_name": "Jane",
"product_price": 0
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
ASSERT_TRUE(add_op.ok());
}
std::map<std::string, std::string> req_params = {
{"collection", "Customers"},
{"q", "*"},
{"filter_by", "id: 0"}
};
nlohmann::json embedded_params;
std::string json_res;
auto now_ts = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
auto search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
auto res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
ASSERT_EQ("product_a", res_obj["hits"][0]["document"].at("product_id"));
ASSERT_EQ(143, res_obj["hits"][0]["document"].at("product_price"));
auto coll = collection_create_op.get();
std::string dirty_values = "REJECT";
auto update_op = coll->update_matching_filter("id: 0", R"({"product_price": 0})", dirty_values);
ASSERT_TRUE(update_op.ok());
req_params = {
{"collection", "Customers"},
{"q", "*"},
{"filter_by", "id: 0"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
ASSERT_EQ("product_a", res_obj["hits"][0]["document"].at("product_id"));
ASSERT_EQ(0, res_obj["hits"][0]["document"].at("product_price"));
auto doc = coll->get("4").get();
ASSERT_EQ(0, doc.count("product_id_sequence_id"));
update_op = coll->update_matching_filter("id: 4", R"({"product_id": "product_a"})", dirty_values);
ASSERT_TRUE(update_op.ok());
doc = coll->get("4").get();
ASSERT_EQ(1, doc.count("product_id_sequence_id"));
ASSERT_EQ(0, doc["product_id_sequence_id"]);
update_op = coll->update_matching_filter("id: 4", R"({"product_id": "product_b"})", dirty_values);
ASSERT_TRUE(update_op.ok());
doc = coll->get("4").get();
ASSERT_EQ(1, doc.count("product_id_sequence_id"));
ASSERT_EQ(1, doc["product_id_sequence_id"]);
schema_json =
R"({
"name": "Users",
"fields": [
{"name": "name", "type": "string"}
]
})"_json;
documents = {
R"({
"id": "user_a",
"name": "Joe"
})"_json,
R"({
"id": "user_b",
"name": "Dan"
})"_json,
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Repos",
"fields": [
{"name": "name", "type": "string"},
{"name": "stargazers", "type": "string[]", "reference": "Users.id"}
]
})"_json;
documents = {
R"({
"id": "repo_a",
"name": "Typesense",
"stargazers": ["user_a", "user_b"]
})"_json,
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
ASSERT_TRUE(add_op.ok());
}
req_params = {
{"collection", "Repos"},
{"q", "*"},
{"include_fields", "$Users(name)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
ASSERT_EQ(2, res_obj["hits"][0]["document"]["Users"].size());
ASSERT_EQ("Joe", res_obj["hits"][0]["document"]["Users"][0]["name"]);
ASSERT_EQ("Dan", res_obj["hits"][0]["document"]["Users"][1]["name"]);
auto json = R"({
"stargazers": ["user_b"]
})"_json;
auto add_op = collection_create_op.get()->add(json.dump(), index_operation_t::UPDATE, "repo_a", DIRTY_VALUES::REJECT);
ASSERT_TRUE(add_op.ok());
req_params = {
{"collection", "Repos"},
{"q", "*"},
{"include_fields", "$Users(name)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Users"].size());
ASSERT_EQ("Dan", res_obj["hits"][0]["document"]["Users"][0]["name"]);
}
TEST_F(CollectionJoinTest, JoinAfterUpdateOfArrayField) {
auto exercise_schema =
R"({
"name": "exercises",
"enable_nested_fields": true,
"fields": [
{"name":"bodyParts","reference":"bodyParts.uid","type":"string[]"},
{"name":"name","type":"string"}]
})"_json;
auto collection_create_op = collectionManager.create_collection(exercise_schema);
ASSERT_TRUE(collection_create_op.ok());
auto exercise_coll = collection_create_op.get();
auto body_parts_schema =
R"({
"name": "bodyParts",
"enable_nested_fields": true,
"fields": [
{"name":"uid","type":"string"},
{"name":"name","type":"string"}]
})"_json;
collection_create_op = collectionManager.create_collection(body_parts_schema);
ASSERT_TRUE(collection_create_op.ok());
auto part_coll = collection_create_op.get();
nlohmann::json body_part_doc;
body_part_doc["name"] = "Part 1";
body_part_doc["uid"] = "abcd1";
part_coll->add(body_part_doc.dump());
body_part_doc["name"] = "Part 2";
body_part_doc["uid"] = "abcd2";
part_coll->add(body_part_doc.dump());
body_part_doc["name"] = "Part 3";
body_part_doc["uid"] = "abcd3";
ASSERT_TRUE(part_coll->add(body_part_doc.dump()).ok());
nlohmann::json exercise_doc;
exercise_doc["id"] = "0";
exercise_doc["name"] = "Example 1";
exercise_doc["bodyParts"] = {"abcd1", "abcd2", "abcd3"};
ASSERT_TRUE(exercise_coll->add(exercise_doc.dump()).ok());
// search for the document
std::map<std::string, std::string> req_params = {
{"collection", "exercises"},
{"q", "*"},
{"include_fields", "$bodyParts(uid, name, strategy:nest) as parts"}
};
nlohmann::json embedded_params;
std::string json_res;
auto now_ts = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
auto search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
auto res = nlohmann::json::parse(json_res);
ASSERT_EQ(3, res["hits"][0]["document"]["bodyParts"].size());
ASSERT_EQ(3, res["hits"][0]["document"]["parts"].size());
// now update document to remove an array element
exercise_doc = R"({
"id": "0",
"bodyParts": ["abcd1", "abcd3"]
})"_json;
ASSERT_TRUE(exercise_coll->add(exercise_doc.dump(), UPDATE).ok());
req_params = {
{"collection", "exercises"},
{"q", "*"},
{"include_fields", "$bodyParts(uid, name, strategy:nest) as parts"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
res = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res["hits"][0]["document"]["bodyParts"].size());
ASSERT_EQ(2, res["hits"][0]["document"]["parts"].size());
// remove both elements
exercise_doc["bodyParts"] = nullptr;
ASSERT_TRUE(exercise_coll->add(exercise_doc.dump(), UPDATE).ok());
req_params = {
{"collection", "exercises"},
{"q", "*"},
{"include_fields", "$bodyParts(uid, name, strategy:nest) as parts"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res = nlohmann::json::parse(json_res);
ASSERT_EQ(0, res["hits"][0]["document"]["bodyParts"].size());
ASSERT_EQ(0, res["hits"][0]["document"]["parts"].size());
exercise_doc["bodyParts"] = {"abcd1"};
ASSERT_TRUE(exercise_coll->add(exercise_doc.dump(), UPDATE).ok());
req_params = {
{"collection", "exercises"},
{"q", "*"},
{"include_fields", "$bodyParts(uid, name, strategy:nest) as parts"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res["hits"][0]["document"]["bodyParts"].size());
ASSERT_EQ(1, res["hits"][0]["document"]["parts"].size());
exercise_doc["bodyParts"] = nlohmann::json::array();
ASSERT_TRUE(exercise_coll->add(exercise_doc.dump(), UPDATE).ok());
req_params = {
{"collection", "exercises"},
{"q", "*"},
{"include_fields", "$bodyParts(uid, name, strategy:nest) as parts"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res = nlohmann::json::parse(json_res);
ASSERT_EQ(0, res["hits"][0]["document"]["bodyParts"].size());
ASSERT_EQ(0, res["hits"][0]["document"]["parts"].size());
}
TEST_F(CollectionJoinTest, FilterByReference_SingleMatch) {
auto schema_json =
R"({
"name": "Products",
"fields": [
{"name": "product_id", "type": "string"},
{"name": "product_name", "type": "string"},
{"name": "product_description", "type": "string"},
{"name": "rating", "type": "int32"}
]
})"_json;
std::vector<nlohmann::json> documents = {
R"({
"product_id": "product_a",
"product_name": "shampoo",
"product_description": "Our new moisturizing shampoo is perfect for those with dry or damaged hair.",
"rating": "2"
})"_json,
R"({
"product_id": "product_b",
"product_name": "soap",
"product_description": "Introducing our all-natural, organic soap bar made with essential oils and botanical ingredients.",
"rating": "4"
})"_json
};
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Customers",
"fields": [
{"name": "customer_id", "type": "string"},
{"name": "customer_name", "type": "string"},
{"name": "product_price", "type": "float"},
{"name": "product_id", "type": "string", "reference": "Products.product_id"}
]
})"_json;
documents = {
R"({
"customer_id": "customer_a",
"customer_name": "Joe",
"product_price": 143,
"product_id": "product_a"
})"_json,
R"({
"customer_id": "customer_a",
"customer_name": "Joe",
"product_price": 73.5,
"product_id": "product_b"
})"_json,
R"({
"customer_id": "customer_b",
"customer_name": "Dan",
"product_price": 75,
"product_id": "product_a"
})"_json,
R"({
"customer_id": "customer_b",
"customer_name": "Dan",
"product_price": 140,
"product_id": "product_b"
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Dummy",
"fields": [
{"name": "dummy_id", "type": "string"}
]
})"_json;
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto coll = collectionManager.get_collection_unsafe("Products");
auto search_op = coll->search("s", {"product_name"}, "$foo:=customer_a", {}, {}, {0},
10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ(search_op.error(), "Could not parse the reference filter: `$foo:=customer_a`.");
search_op = coll->search("s", {"product_name"}, "$foo(:=customer_a", {}, {}, {0},
10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ(search_op.error(), "Could not parse the reference filter: `$foo(:=customer_a`.");
search_op = coll->search("s", {"product_name"}, "$foo(:=customer_a)", {}, {}, {0},
10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ(search_op.error(), "Referenced collection `foo` not found.");
search_op = coll->search("s", {"product_name"}, "$Dummy(dummy_id:=customer_a)", {}, {}, {0},
10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ(search_op.error(), "Failed to join on `Dummy`: No reference field found.");
search_op = coll->search("s", {"product_name"}, "$Customers(foo:=customer_a)", {}, {}, {0},
10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ(search_op.error(), "Failed to join on `Customers` collection: Could not find a filter "
"field named `foo` in the schema.");
search_op = coll->search("s", {"product_name"}, "$Customers (customer_id:=customer_a) && $Customers(product_price:<100)", {}, {}, {0},
10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ(search_op.error(), "More than one joins found for collection `Customers` in the `filter_by`. Instead of "
"providing separate join conditions like `$customer_product_prices(customer_id:=customer_a)"
" && $customer_product_prices(custom_price:<100)`, the join condition should be"
" provided as a single filter expression like `$customer_product_prices(customer_id:=customer_a"
" && custom_price:<100)`");
auto result = coll->search("s", {"product_name"}, "$Customers(customer_id:=customer_a && product_price:<100)", {},
{}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD).get();
ASSERT_EQ(1, result["found"].get<size_t>());
ASSERT_EQ(1, result["hits"].size());
ASSERT_EQ("soap", result["hits"][0]["document"]["product_name"].get<std::string>());
std::map<std::string, std::string> req_params = {
{"collection", "Customers"},
{"q", "Dan"},
{"query_by", "customer_name"},
{"filter_by", "$Products(foo:>3)"},
};
nlohmann::json embedded_params;
std::string json_res;
auto now_ts = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
auto search_op_bool = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_FALSE(search_op_bool.ok());
ASSERT_EQ(search_op_bool.error(), "Failed to join on `Products` collection: Could not find a filter "
"field named `foo` in the schema.");
req_params = {
{"collection", "Customers"},
{"q", "Dan"},
{"query_by", "customer_name"},
{"filter_by", "$Products(rating:>3)"},
{"include_fields", "$Products(*, strategy:merge)"},
};
search_op_bool = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op_bool.ok());
auto res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
ASSERT_EQ("soap", res_obj["hits"][0]["document"]["product_name"].get<std::string>());
req_params = {
{"collection", "Customers"},
{"q", "Dan"},
{"query_by", "customer_name"},
{"filter_by", "$Products(id:*) && product_price:>100"},
{"include_fields", "$Products(*, strategy:merge)"},
};
search_op_bool = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op_bool.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
ASSERT_EQ("soap", res_obj["hits"][0]["document"]["product_name"].get<std::string>());
collectionManager.drop_collection("Customers");
collectionManager.drop_collection("Products");
}
TEST_F(CollectionJoinTest, FilterByReference_MultipleMatch) {
auto schema_json =
R"({
"name": "Users",
"fields": [
{"name": "user_id", "type": "string"},
{"name": "user_name", "type": "string"}
]
})"_json;
std::vector<nlohmann::json> documents = {
R"({
"user_id": "user_a",
"user_name": "Roshan"
})"_json,
R"({
"user_id": "user_b",
"user_name": "Ruby"
})"_json,
R"({
"user_id": "user_c",
"user_name": "Joe"
})"_json,
R"({
"user_id": "user_d",
"user_name": "Aby"
})"_json
};
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Repos",
"fields": [
{"name": "repo_id", "type": "string"},
{"name": "repo_content", "type": "string"}
]
})"_json;
documents = {
R"({
"repo_id": "repo_a",
"repo_content": "body1"
})"_json,
R"({
"repo_id": "repo_b",
"repo_content": "body2"
})"_json,
R"({
"repo_id": "repo_c",
"repo_content": "body3"
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Links",
"fields": [
{"name": "repo_id", "type": "string", "reference": "Repos.repo_id"},
{"name": "user_id", "type": "string", "reference": "Users.user_id"}
]
})"_json;
documents = {
R"({
"repo_id": "repo_a",
"user_id": "user_b"
})"_json,
R"({
"repo_id": "repo_a",
"user_id": "user_c"
})"_json,
R"({
"repo_id": "repo_b",
"user_id": "user_a"
})"_json,
R"({
"repo_id": "repo_b",
"user_id": "user_b"
})"_json,
R"({
"repo_id": "repo_b",
"user_id": "user_d"
})"_json,
R"({
"repo_id": "repo_c",
"user_id": "user_a"
})"_json,
R"({
"repo_id": "repo_c",
"user_id": "user_b"
})"_json,
R"({
"repo_id": "repo_c",
"user_id": "user_c"
})"_json,
R"({
"repo_id": "repo_c",
"user_id": "user_d"
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
auto coll = collectionManager.get_collection_unsafe("Users");
// Search for users linked to repo_b
auto result = coll->search("R", {"user_name"}, "$Links(repo_id:=repo_b)", {}, {}, {0},
10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD).get();
ASSERT_EQ(2, result["found"].get<size_t>());
ASSERT_EQ(2, result["hits"].size());
ASSERT_EQ("user_b", result["hits"][0]["document"]["user_id"].get<std::string>());
ASSERT_EQ("user_a", result["hits"][1]["document"]["user_id"].get<std::string>());
collectionManager.drop_collection("Users");
collectionManager.drop_collection("Repos");
collectionManager.drop_collection("Links");
}
TEST_F(CollectionJoinTest, AndFilterResults_NoReference) {
filter_result_t a;
a.count = 9;
a.docs = new uint32_t[a.count];
for (size_t i = 0; i < a.count; i++) {
a.docs[i] = i;
}
filter_result_t b;
b.count = 0;
uint32_t limit = 10;
b.docs = new uint32_t[limit];
for (size_t i = 2; i < limit; i++) {
if (i % 3 == 0) {
b.docs[b.count++] = i;
}
}
// a.docs: [0..8] , b.docs: [3, 6, 9]
filter_result_t result;
filter_result_t::and_filter_results(a, b, result);
ASSERT_EQ(2, result.count);
ASSERT_EQ(nullptr, result.coll_to_references);
std::vector<uint32_t> docs = {3, 6};
for(size_t i = 0; i < result.count; i++) {
ASSERT_EQ(docs[i], result.docs[i]);
}
}
TEST_F(CollectionJoinTest, AndFilterResults_WithReferences) {
filter_result_t a;
a.count = 9;
a.docs = new uint32_t[a.count];
a.coll_to_references = new std::map<std::string, reference_filter_result_t>[a.count] {};
for (size_t i = 0; i < a.count; i++) {
a.docs[i] = i;
auto& reference = a.coll_to_references[i];
// Having only one reference of each document for brevity.
auto reference_docs = new uint32_t[1];
reference_docs[0] = 10 - i;
reference["foo"] = reference_filter_result_t(1, reference_docs);
}
filter_result_t b;
b.count = 0;
uint32_t limit = 10;
b.docs = new uint32_t[limit];
b.coll_to_references = new std::map<std::string, reference_filter_result_t>[limit] {};
for (size_t i = 2; i < limit; i++) {
if (i % 3 == 0) {
b.docs[b.count] = i;
auto& reference = b.coll_to_references[b.count++];
auto reference_docs = new uint32_t[1];
reference_docs[0] = 2 * i;
reference["bar"] = reference_filter_result_t(1, reference_docs);
}
}
// a.docs: [0..8] , b.docs: [3, 6, 9]
filter_result_t result;
filter_result_t::and_filter_results(a, b, result);
ASSERT_EQ(2, result.count);
ASSERT_EQ(2, result.coll_to_references[0].size());
ASSERT_EQ(1, result.coll_to_references[0].count("foo"));
ASSERT_EQ(1, result.coll_to_references[0].count("bar"));
std::vector<uint32_t> docs = {3, 6}, foo_reference = {7, 4}, bar_reference = {6, 12};
for(size_t i = 0; i < result.count; i++) {
ASSERT_EQ(docs[i], result.docs[i]);
// result should contain correct references to the foo and bar collection.
ASSERT_EQ(1, result.coll_to_references[i].at("foo").count);
ASSERT_EQ(foo_reference[i], result.coll_to_references[i].at("foo").docs[0]);
ASSERT_EQ(1, result.coll_to_references[i].at("bar").count);
ASSERT_EQ(bar_reference[i], result.coll_to_references[i].at("bar").docs[0]);
}
}
TEST_F(CollectionJoinTest, OrFilterResults_NoReference) {
filter_result_t a, b;
a.count = 0;
uint32_t limit = 10;
a.docs = new uint32_t[limit];
for (size_t i = 2; i < limit; i++) {
if (i % 3 == 0) {
a.docs[a.count++] = i;
}
}
// a.docs: [3, 6, 9], b.docs: []
filter_result_t result1;
filter_result_t::or_filter_results(a, b, result1);
ASSERT_EQ(3, result1.count);
ASSERT_EQ(nullptr, result1.coll_to_references);
std::vector<uint32_t> expected = {3, 6, 9};
for (size_t i = 0; i < result1.count; i++) {
ASSERT_EQ(expected[i], result1.docs[i]);
}
b.count = 9;
b.docs = new uint32_t[b.count];
for (size_t i = 0; i < b.count; i++) {
b.docs[i] = i;
}
// a.docs: [3, 6, 9], b.docs: [0..8]
filter_result_t result2;
filter_result_t::or_filter_results(a, b, result2);
ASSERT_EQ(10, result2.count);
ASSERT_EQ(nullptr, result2.coll_to_references);
expected = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
for (size_t i = 0; i < result2.count; i++) {
ASSERT_EQ(expected[i], result2.docs[i]);
}
filter_result_t c, result3;
std::vector<uint32_t> vec = {0, 4, 5};
c.docs = new uint32_t[vec.size()];
auto j = 0;
for(auto i: vec) {
a.docs[j++] = i;
}
// b.docs: [0..8], c.docs: [0, 4, 5]
filter_result_t::or_filter_results(b, c, result3);
ASSERT_EQ(9, result3.count);
ASSERT_EQ(nullptr, result3.coll_to_references);
expected = {0, 1, 2, 3, 4, 5, 6, 7, 8};
for(size_t i = 0; i < result3.count; i++) {
ASSERT_EQ(expected[i], result3.docs[i]);
}
}
TEST_F(CollectionJoinTest, OrFilterResults_WithReferences) {
filter_result_t a, b;
uint32_t limit = 10;
a.count = 0;
a.docs = new uint32_t[limit];
a.coll_to_references = new std::map<std::string, reference_filter_result_t>[limit] {};
for (size_t i = 2; i < limit; i++) {
if (i % 3 == 0) {
a.docs[a.count] = i;
auto& reference = a.coll_to_references[a.count++];
auto reference_docs = new uint32_t[1];
reference_docs[0] = 2 * i;
reference["foo"] = reference_filter_result_t(1, reference_docs);
}
}
// a.docs: [3, 6, 9], b.docs: []
filter_result_t result1;
filter_result_t::or_filter_results(a, b, result1);
ASSERT_EQ(3, result1.count);
ASSERT_EQ(1, result1.coll_to_references[0].size());
ASSERT_EQ(1, result1.coll_to_references[0].count("foo"));
std::vector<uint32_t> expected = {3, 6, 9}, foo_reference = {6, 12, 18};
for (size_t i = 0; i < result1.count; i++) {
ASSERT_EQ(expected[i], result1.docs[i]);
ASSERT_EQ(1, result1.coll_to_references[i].at("foo").count);
ASSERT_EQ(foo_reference[i], result1.coll_to_references[i].at("foo").docs[0]);
}
b.count = 9;
b.docs = new uint32_t[b.count];
b.coll_to_references = new std::map<std::string, reference_filter_result_t>[b.count] {};
for (size_t i = 0; i < b.count; i++) {
b.docs[i] = i;
auto& reference = b.coll_to_references[i];
auto reference_docs = new uint32_t[1];
reference_docs[0] = 10 - i;
reference["bar"] = reference_filter_result_t(1, reference_docs);
}
// a.docs: [3, 6, 9], b.docs: [0..8]
filter_result_t result2;
filter_result_t::or_filter_results(a, b, result2);
ASSERT_EQ(10, result2.count);
expected = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
// doc_id -> reference_id
std::map<uint32_t, uint32_t> foo_map = {{3, 6}, {6, 12}, {9, 18}}, bar_map = {{0, 10}, {1, 9}, {2, 8}, {3, 7},
{4, 6}, {5, 5}, {6, 4}, {7, 3}, {8, 2}};
for (size_t i = 0; i < result2.count; i++) {
ASSERT_EQ(expected[i], result2.docs[i]);
if (foo_map.count(i) != 0) {
ASSERT_EQ(1, result2.coll_to_references[i].at("foo").count);
ASSERT_EQ(foo_map[i], result2.coll_to_references[i].at("foo").docs[0]);
} else {
// foo didn't have any reference to current doc.
ASSERT_EQ(0, result2.coll_to_references[i].count("foo"));
}
if (bar_map.count(i) != 0) {
ASSERT_EQ(1, result2.coll_to_references[i].at("bar").count);
ASSERT_EQ(bar_map[i], result2.coll_to_references[i].at("bar").docs[0]);
} else {
ASSERT_EQ(0, result2.coll_to_references[i].count("bar"));
}
}
filter_result_t c, result3;
std::map<uint32_t, uint32_t> baz_map = {{0, 2}, {4, 0}, {5, 8}};
c.count = baz_map.size();
c.docs = new uint32_t[baz_map.size()];
c.coll_to_references = new std::map<std::string, reference_filter_result_t>[baz_map.size()] {};
auto j = 0;
for(auto i: baz_map) {
c.docs[j] = i.first;
auto& reference = c.coll_to_references[j++];
auto reference_docs = new uint32_t[1];
reference_docs[0] = i.second;
reference["baz"] = reference_filter_result_t(1, reference_docs);
}
// b.docs: [0..8], c.docs: [0, 4, 5]
filter_result_t::or_filter_results(b, c, result3);
ASSERT_EQ(9, result3.count);
expected = {0, 1, 2, 3, 4, 5, 6, 7, 8};
for (size_t i = 0; i < result3.count; i++) {
ASSERT_EQ(expected[i], result3.docs[i]);
if (bar_map.count(i) != 0) {
ASSERT_EQ(1, result3.coll_to_references[i].at("bar").count);
ASSERT_EQ(bar_map[i], result3.coll_to_references[i].at("bar").docs[0]);
} else {
ASSERT_EQ(0, result3.coll_to_references[i].count("bar"));
}
if (baz_map.count(i) != 0) {
ASSERT_EQ(1, result3.coll_to_references[i].at("baz").count);
ASSERT_EQ(baz_map[i], result3.coll_to_references[i].at("baz").docs[0]);
} else {
ASSERT_EQ(0, result3.coll_to_references[i].count("baz"));
}
}
}
TEST_F(CollectionJoinTest, FilterByNReferences) {
auto schema_json =
R"({
"name": "Users",
"fields": [
{"name": "user_id", "type": "string"},
{"name": "user_name", "type": "string"}
]
})"_json;
std::vector<nlohmann::json> documents = {
R"({
"user_id": "user_a",
"user_name": "Roshan"
})"_json,
R"({
"user_id": "user_b",
"user_name": "Ruby"
})"_json,
R"({
"user_id": "user_c",
"user_name": "Joe"
})"_json,
R"({
"user_id": "user_d",
"user_name": "Aby"
})"_json
};
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Repos",
"fields": [
{"name": "repo_id", "type": "string"},
{"name": "repo_content", "type": "string"},
{"name": "repo_stars", "type": "int32"},
{"name": "repo_is_private", "type": "bool"}
]
})"_json;
documents = {
R"({
"repo_id": "repo_a",
"repo_content": "body1",
"repo_stars": 431,
"repo_is_private": true
})"_json,
R"({
"repo_id": "repo_b",
"repo_content": "body2",
"repo_stars": 4562,
"repo_is_private": false
})"_json,
R"({
"repo_id": "repo_c",
"repo_content": "body3",
"repo_stars": 945,
"repo_is_private": false
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Links",
"fields": [
{"name": "repo_id", "type": "string", "reference": "Repos.repo_id"},
{"name": "user_id", "type": "string", "reference": "Users.user_id"}
]
})"_json;
documents = {
R"({
"repo_id": "repo_a",
"user_id": "user_b"
})"_json,
R"({
"repo_id": "repo_a",
"user_id": "user_c"
})"_json,
R"({
"repo_id": "repo_b",
"user_id": "user_a"
})"_json,
R"({
"repo_id": "repo_b",
"user_id": "user_b"
})"_json,
R"({
"repo_id": "repo_b",
"user_id": "user_d"
})"_json,
R"({
"repo_id": "repo_c",
"user_id": "user_a"
})"_json,
R"({
"repo_id": "repo_c",
"user_id": "user_b"
})"_json,
R"({
"repo_id": "repo_c",
"user_id": "user_c"
})"_json,
R"({
"repo_id": "repo_c",
"user_id": "user_d"
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Organizations",
"fields": [
{"name": "org_id", "type": "string"},
{"name": "org_name", "type": "string"}
]
})"_json;
documents = {
R"({
"org_id": "org_a",
"org_name": "Typesense"
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Participants",
"fields": [
{"name": "user_id", "type": "string", "reference": "Users.user_id"},
{"name": "org_id", "type": "string", "reference": "Organizations.org_id"}
]
})"_json;
documents = {
R"({
"user_id": "user_a",
"org_id": "org_a"
})"_json,
R"({
"user_id": "user_b",
"org_id": "org_a"
})"_json,
R"({
"user_id": "user_d",
"org_id": "org_a"
})"_json,
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
auto coll = collectionManager.get_collection_unsafe("Users");
// Search for users within an organization with access to a particular repo.
auto result = coll->search("R", {"user_name"}, "$Participants(org_id:=org_a) && $Links(repo_id:=repo_b)", {}, {}, {0},
10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD).get();
ASSERT_EQ(2, result["found"].get<size_t>());
ASSERT_EQ(2, result["hits"].size());
ASSERT_EQ("user_b", result["hits"][0]["document"]["user_id"].get<std::string>());
ASSERT_EQ("user_a", result["hits"][1]["document"]["user_id"].get<std::string>());
collectionManager.drop_collection("Users");
collectionManager.drop_collection("Repos");
collectionManager.drop_collection("Links");
}
TEST_F(CollectionJoinTest, FilterByNestedReferences) {
auto schema_json =
R"({
"name": "Coll_A",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
std::vector<nlohmann::json> documents = {
R"({
"title": "coll_a_0"
})"_json,
R"({
"title": "coll_a_1"
})"_json
};
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Coll_B",
"fields": [
{"name": "title", "type": "string"},
{"name": "ref_coll_a", "type": "string", "reference": "Coll_A.id"}
]
})"_json;
documents = {
R"({
"title": "coll_b_0",
"ref_coll_a": "1"
})"_json,
R"({
"title": "coll_b_1",
"ref_coll_a": "0"
})"_json,
R"({
"title": "coll_b_2",
"ref_coll_a": "0"
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Coll_C",
"fields": [
{"name": "title", "type": "string"},
{"name": "ref_coll_b", "type": "string[]", "reference": "Coll_B.id"}
]
})"_json;
documents = {
R"({
"title": "coll_c_0",
"ref_coll_b": ["0"]
})"_json,
R"({
"title": "coll_c_1",
"ref_coll_b": ["1"]
})"_json,
R"({
"title": "coll_c_2",
"ref_coll_b": ["0", "1"]
})"_json,
R"({
"title": "coll_c_3",
"ref_coll_b": ["2"]
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
std::map<std::string, std::string> req_params = {
{"collection", "Coll_A"},
{"q", "*"},
{"filter_by", "$Coll_B($Coll_C(id: [1, 3]))"},
{"include_fields", "title, $Coll_B(title, $Coll_C(title))"}
};
nlohmann::json embedded_params;
std::string json_res;
auto now_ts = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
auto search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
nlohmann::json res_obj = nlohmann::json::parse(json_res);
// coll_b_1 <- coll_c_1
// coll_a_0 <
// coll_b_2 <- coll_c_3
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
ASSERT_EQ(2, res_obj["hits"][0]["document"].size());
ASSERT_EQ("coll_a_0", res_obj["hits"][0]["document"]["title"]);
ASSERT_EQ(2, res_obj["hits"][0]["document"]["Coll_B"].size());
ASSERT_EQ("coll_b_1", res_obj["hits"][0]["document"]["Coll_B"][0]["title"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Coll_B"][0]["Coll_C"].size());
ASSERT_EQ("coll_c_1", res_obj["hits"][0]["document"]["Coll_B"][0]["Coll_C"][0]["title"]);
ASSERT_EQ("coll_b_2", res_obj["hits"][0]["document"]["Coll_B"][1]["title"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Coll_B"][1]["Coll_C"].size());
ASSERT_EQ("coll_c_3", res_obj["hits"][0]["document"]["Coll_B"][1]["Coll_C"][0]["title"]);
req_params = {
{"collection", "Coll_A"},
{"q", "*"},
{"filter_by", "$Coll_B($Coll_C(id: != 0))"},
{"include_fields", "title, $Coll_B(title, $Coll_C(title), strategy:nest_array)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
// coll_a_1 <- coll_b_0 <- coll_c_2
//
// coll_b_1 <- coll_c_1, coll_c_2
// coll_a_0 <
// coll_b_2 <- coll_c_3
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ(2, res_obj["hits"][0]["document"].size());
ASSERT_EQ("coll_a_1", res_obj["hits"][0]["document"]["title"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Coll_B"].size());
ASSERT_EQ("coll_b_0", res_obj["hits"][0]["document"]["Coll_B"][0]["title"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Coll_B"][0]["Coll_C"].size());
ASSERT_EQ("coll_c_2", res_obj["hits"][0]["document"]["Coll_B"][0]["Coll_C"][0]["title"]);
ASSERT_EQ("coll_a_0", res_obj["hits"][1]["document"]["title"]);
ASSERT_EQ(2, res_obj["hits"][1]["document"]["Coll_B"].size());
ASSERT_EQ("coll_b_1", res_obj["hits"][1]["document"]["Coll_B"][0]["title"]);
ASSERT_EQ(2, res_obj["hits"][1]["document"]["Coll_B"][0]["Coll_C"].size());
ASSERT_EQ("coll_c_1", res_obj["hits"][1]["document"]["Coll_B"][0]["Coll_C"][0]["title"]);
ASSERT_EQ("coll_c_2", res_obj["hits"][1]["document"]["Coll_B"][0]["Coll_C"][1]["title"]);
ASSERT_EQ("coll_b_2", res_obj["hits"][1]["document"]["Coll_B"][1]["title"]);
ASSERT_EQ(1, res_obj["hits"][1]["document"]["Coll_B"][1]["Coll_C"].size());
ASSERT_EQ("coll_c_3", res_obj["hits"][1]["document"]["Coll_B"][1]["Coll_C"][0]["title"]);
req_params = {
{"collection", "Coll_C"},
{"q", "*"},
{"filter_by", "$Coll_B($Coll_A(id: 0))"},
{"include_fields", "title, $Coll_B(title, $Coll_A(title))"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
// coll_c_3 -> coll_b_2 -> coll_a_0
//
// coll_c_2 -> coll_b_1 -> coll_a_0
//
// coll_c_1 -> coll_b_1 -> coll_a_0
ASSERT_EQ(3, res_obj["found"].get<size_t>());
ASSERT_EQ(3, res_obj["hits"].size());
ASSERT_EQ(2, res_obj["hits"][0]["document"].size());
ASSERT_EQ("coll_c_3", res_obj["hits"][0]["document"]["title"]);
ASSERT_EQ(2, res_obj["hits"][0]["document"]["Coll_B"].size());
ASSERT_EQ("coll_b_2", res_obj["hits"][0]["document"]["Coll_B"]["title"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Coll_B"]["Coll_A"].size());
ASSERT_EQ("coll_a_0", res_obj["hits"][0]["document"]["Coll_B"]["Coll_A"]["title"]);
ASSERT_EQ(2, res_obj["hits"][1]["document"].size());
ASSERT_EQ("coll_c_2", res_obj["hits"][1]["document"]["title"]);
ASSERT_EQ(2, res_obj["hits"][1]["document"]["Coll_B"].size());
ASSERT_EQ("coll_b_1", res_obj["hits"][1]["document"]["Coll_B"]["title"]);
ASSERT_EQ(1, res_obj["hits"][1]["document"]["Coll_B"]["Coll_A"].size());
ASSERT_EQ("coll_a_0", res_obj["hits"][1]["document"]["Coll_B"]["Coll_A"]["title"]);
ASSERT_EQ(2, res_obj["hits"][2]["document"].size());
ASSERT_EQ("coll_c_1", res_obj["hits"][2]["document"]["title"]);
ASSERT_EQ(2, res_obj["hits"][2]["document"]["Coll_B"].size());
ASSERT_EQ("coll_b_1", res_obj["hits"][2]["document"]["Coll_B"]["title"]);
ASSERT_EQ(1, res_obj["hits"][2]["document"]["Coll_B"]["Coll_A"].size());
ASSERT_EQ("coll_a_0", res_obj["hits"][2]["document"]["Coll_B"]["Coll_A"]["title"]);
schema_json =
R"({
"name": "Coll_D",
"fields": [
{"name": "title", "type": "string"},
{"name": "ref_coll_c", "type": "string[]", "reference": "Coll_C.id"}
]
})"_json;
documents = {
R"({
"title": "coll_d_0",
"ref_coll_c": []
})"_json,
R"({
"title": "coll_d_1",
"ref_coll_c": ["1", "3"]
})"_json,
R"({
"title": "coll_d_2",
"ref_coll_c": ["2", "3"]
})"_json,
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
req_params = {
{"collection", "Coll_B"},
{"q", "*"},
{"filter_by", "$Coll_C($Coll_D(id: *))"},
{"include_fields", "title, $Coll_C(title, $Coll_D(title, strategy:nest_array), strategy:nest_array)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
// coll_b_2 <- coll_c_3 <- coll_d_1, coll_d_2
//
// coll_c_1 <- coll_d_1
// coll_b_1 <
// coll_c_2 <- coll_d_2
//
// coll_b_0 <- coll_c_2 <- coll_d_2
ASSERT_EQ(3, res_obj["found"].get<size_t>());
ASSERT_EQ(3, res_obj["hits"].size());
ASSERT_EQ(2, res_obj["hits"][0]["document"].size());
ASSERT_EQ("coll_b_2", res_obj["hits"][0]["document"]["title"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Coll_C"].size());
ASSERT_EQ("coll_c_3", res_obj["hits"][0]["document"]["Coll_C"][0]["title"]);
ASSERT_EQ(2, res_obj["hits"][0]["document"]["Coll_C"][0]["Coll_D"].size());
ASSERT_EQ("coll_d_1", res_obj["hits"][0]["document"]["Coll_C"][0]["Coll_D"][0]["title"]);
ASSERT_EQ("coll_d_2", res_obj["hits"][0]["document"]["Coll_C"][0]["Coll_D"][1]["title"]);
ASSERT_EQ(2, res_obj["hits"][1]["document"].size());
ASSERT_EQ("coll_b_1", res_obj["hits"][1]["document"]["title"]);
ASSERT_EQ(2, res_obj["hits"][1]["document"]["Coll_C"].size());
ASSERT_EQ("coll_c_1", res_obj["hits"][1]["document"]["Coll_C"][0]["title"]);
ASSERT_EQ(1, res_obj["hits"][1]["document"]["Coll_C"][0]["Coll_D"].size());
ASSERT_EQ("coll_d_1", res_obj["hits"][1]["document"]["Coll_C"][0]["Coll_D"][0]["title"]);
ASSERT_EQ("coll_c_2", res_obj["hits"][1]["document"]["Coll_C"][1]["title"]);
ASSERT_EQ(1, res_obj["hits"][1]["document"]["Coll_C"][1]["Coll_D"].size());
ASSERT_EQ("coll_d_2", res_obj["hits"][1]["document"]["Coll_C"][1]["Coll_D"][0]["title"]);
ASSERT_EQ(2, res_obj["hits"][2]["document"].size());
ASSERT_EQ("coll_b_0", res_obj["hits"][2]["document"]["title"]);
ASSERT_EQ(1, res_obj["hits"][2]["document"]["Coll_C"].size());
ASSERT_EQ("coll_c_2", res_obj["hits"][2]["document"]["Coll_C"][0]["title"]);
ASSERT_EQ(1, res_obj["hits"][2]["document"]["Coll_C"][0]["Coll_D"].size());
ASSERT_EQ("coll_d_2", res_obj["hits"][2]["document"]["Coll_C"][0]["Coll_D"][0]["title"]);
req_params = {
{"collection", "Coll_D"},
{"q", "*"},
{"filter_by", "$Coll_C($Coll_B(id: [0, 1]))"},
{"include_fields", "title, $Coll_C(title, $Coll_B(title, strategy:nest_array), strategy:nest_array)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
// coll_d_2 -> coll_c_2 -> coll_b_0, coll_b_1
//
// coll_d_1 -> coll_c_1 -> coll_b_1
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ(2, res_obj["hits"][0]["document"].size());
ASSERT_EQ("coll_d_2", res_obj["hits"][0]["document"]["title"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Coll_C"].size());
ASSERT_EQ("coll_c_2", res_obj["hits"][0]["document"]["Coll_C"][0]["title"]);
ASSERT_EQ(2, res_obj["hits"][0]["document"]["Coll_C"][0]["Coll_B"].size());
ASSERT_EQ("coll_b_0", res_obj["hits"][0]["document"]["Coll_C"][0]["Coll_B"][0]["title"]);
ASSERT_EQ("coll_b_1", res_obj["hits"][0]["document"]["Coll_C"][0]["Coll_B"][1]["title"]);
ASSERT_EQ(2, res_obj["hits"][1]["document"].size());
ASSERT_EQ("coll_d_1", res_obj["hits"][1]["document"]["title"]);
ASSERT_EQ(1, res_obj["hits"][1]["document"]["Coll_C"].size());
ASSERT_EQ("coll_c_1", res_obj["hits"][1]["document"]["Coll_C"][0]["title"]);
ASSERT_EQ(1, res_obj["hits"][1]["document"]["Coll_C"][0]["Coll_B"].size());
ASSERT_EQ("coll_b_1", res_obj["hits"][1]["document"]["Coll_C"][0]["Coll_B"][0]["title"]);
auto doc = R"({
"title": "coll_b_3",
"ref_coll_a": "0"
})"_json;
auto doc_add_op = collectionManager.get_collection("Coll_B")->add(doc.dump());
if (!doc_add_op.ok()) {
LOG(INFO) << doc_add_op.error();
}
ASSERT_TRUE(doc_add_op.ok());
doc = R"({
"title": "coll_c_4",
"ref_coll_b": ["3"]
})"_json;
doc_add_op = collectionManager.get_collection("Coll_C")->add(doc.dump());
if (!doc_add_op.ok()) {
LOG(INFO) << doc_add_op.error();
}
ASSERT_TRUE(doc_add_op.ok());
doc = R"({
"title": "coll_d_3",
"ref_coll_c": ["4"]
})"_json;
doc_add_op = collectionManager.get_collection("Coll_D")->add(doc.dump());
if (!doc_add_op.ok()) {
LOG(INFO) << doc_add_op.error();
}
ASSERT_TRUE(doc_add_op.ok());
req_params = {
{"collection", "Coll_D"},
{"q", "coll_d_3"},
{"query_by", "title"},
{"filter_by", "$Coll_C(id:*)"},
// We will be able to include Coll_A document since we join on Coll_C that has reference to Coll_B that in
// turn has a reference to Coll_A.
{"include_fields", "title, $Coll_C(title), $Coll_B(title, $Coll_A(title))"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
// coll_d_3 -> coll_c_4 -> coll_b_3 -> coll_a_0
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
ASSERT_EQ(3, res_obj["hits"][0]["document"].size());
ASSERT_EQ("coll_d_3", res_obj["hits"][0]["document"]["title"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Coll_C"].size());
ASSERT_EQ("coll_c_4", res_obj["hits"][0]["document"]["Coll_C"]["title"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Coll_B"].size());
ASSERT_EQ("coll_b_3", res_obj["hits"][0]["document"]["Coll_B"][0]["title"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Coll_B"][0].count("Coll_A"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Coll_B"][0]["Coll_A"].size());
ASSERT_EQ("coll_a_0", res_obj["hits"][0]["document"]["Coll_B"][0]["Coll_A"]["title"]);
schema_json =
R"({
"name": "Coll_E",
"fields": [
{"name": "title", "type": "string"},
{"name": "ref_coll_b", "type": "string", "reference": "Coll_B.id"}
]
})"_json;
collection_create_op = collectionManager.create_collection(schema_json);
doc = R"({
"title": "coll_e_0",
"ref_coll_b": "3"
})"_json;
doc_add_op = collectionManager.get_collection("Coll_E")->add(doc.dump());
if (!doc_add_op.ok()) {
LOG(INFO) << doc_add_op.error();
}
ASSERT_TRUE(doc_add_op.ok());
req_params = {
{"collection", "Coll_D"},
{"q", "coll_d_3"},
{"query_by", "title"},
{"filter_by", "$Coll_C(id:*)"},
// We won't be able to include Coll_E document since we neither join on it nor we have any reference to it.
{"include_fields", "title, $Coll_C(title), $Coll_B(title, $Coll_E(title))"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
ASSERT_EQ(3, res_obj["hits"][0]["document"].size());
ASSERT_EQ("coll_d_3", res_obj["hits"][0]["document"]["title"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Coll_C"].size());
ASSERT_EQ("coll_c_4", res_obj["hits"][0]["document"]["Coll_C"]["title"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Coll_B"].size());
ASSERT_EQ("coll_b_3", res_obj["hits"][0]["document"]["Coll_B"][0]["title"]);
ASSERT_EQ(0, res_obj["hits"][0]["document"]["Coll_B"][0].count("Coll_E"));
schema_json =
R"({
"name": "products",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
documents = {
R"({
"title": "shampoo"
})"_json,
R"({
"title": "soap"
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "product_variants",
"fields": [
{"name": "title", "type": "string"},
{"name": "product_id", "type": "string", "reference": "products.id"}
]
})"_json;
documents = {
R"({
"title": "panteen",
"product_id": "0"
})"_json,
R"({
"title": "loreal",
"product_id": "0"
})"_json,
R"({
"title": "pears",
"product_id": "1"
})"_json,
R"({
"title": "lifebuoy",
"product_id": "1"
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "retailers",
"fields": [
{"name": "title", "type": "string"},
{"name": "location", "type": "geopoint"}
]
})"_json;
documents = {
R"({
"title": "retailer 1",
"location": [48.872576479306765, 2.332291112241466]
})"_json,
R"({
"title": "retailer 2",
"location": [48.888286721920934, 2.342340862419206]
})"_json,
R"({
"title": "retailer 3",
"location": [48.87538726829884, 2.296113163780903]
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "inventory",
"fields": [
{"name": "qty", "type": "int32"},
{"name": "retailer_id", "type": "string", "reference": "retailers.id"},
{"name": "product_variant_id", "type": "string", "reference": "product_variants.id"}
]
})"_json;
documents = {
R"({
"qty": "1",
"retailer_id": "0",
"product_variant_id": "0"
})"_json,
R"({
"qty": "2",
"retailer_id": "0",
"product_variant_id": "1"
})"_json,
R"({
"qty": "3",
"retailer_id": "0",
"product_variant_id": "2"
})"_json,
R"({
"qty": "4",
"retailer_id": "0",
"product_variant_id": "3"
})"_json,
R"({
"qty": "5",
"retailer_id": "1",
"product_variant_id": "0"
})"_json,
R"({
"qty": "6",
"retailer_id": "1",
"product_variant_id": "1"
})"_json,
R"({
"qty": "7",
"retailer_id": "1",
"product_variant_id": "2"
})"_json,
R"({
"qty": "8",
"retailer_id": "1",
"product_variant_id": "3"
})"_json,
R"({
"qty": "9",
"retailer_id": "2",
"product_variant_id": "0"
})"_json,
R"({
"qty": "10",
"retailer_id": "2",
"product_variant_id": "1"
})"_json,
R"({
"qty": "11",
"retailer_id": "2",
"product_variant_id": "2"
})"_json,
R"({
"qty": "12",
"retailer_id": "2",
"product_variant_id": "3"
})"_json,
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
req_params = {
{"collection", "products"},
{"q", "*"},
{"filter_by", "$product_variants($inventory($retailers(location:(48.87538726829884, 2.296113163780903,1 km))))"},
{"include_fields", "$product_variants(id,$inventory(qty,sku,$retailers(id,title)))"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ("1", res_obj["hits"][0]["document"]["id"]);
ASSERT_EQ("soap", res_obj["hits"][0]["document"]["title"]);
ASSERT_EQ(2, res_obj["hits"][0]["document"]["product_variants"].size());
ASSERT_EQ("2", res_obj["hits"][0]["document"]["product_variants"][0]["id"]);
ASSERT_EQ(2, res_obj["hits"][0]["document"]["product_variants"][0]["inventory"].size());
ASSERT_EQ(11, res_obj["hits"][0]["document"]["product_variants"][0]["inventory"]["qty"]);
ASSERT_EQ(2, res_obj["hits"][0]["document"]["product_variants"][0]["inventory"]["retailers"].size());
ASSERT_EQ("2", res_obj["hits"][0]["document"]["product_variants"][0]["inventory"]["retailers"]["id"]);
ASSERT_EQ("retailer 3", res_obj["hits"][0]["document"]["product_variants"][0]["inventory"]["retailers"]["title"]);
ASSERT_EQ("3", res_obj["hits"][0]["document"]["product_variants"][1]["id"]);
ASSERT_EQ(2, res_obj["hits"][0]["document"]["product_variants"][1]["inventory"].size());
ASSERT_EQ(12, res_obj["hits"][0]["document"]["product_variants"][1]["inventory"]["qty"]);
ASSERT_EQ(2, res_obj["hits"][0]["document"]["product_variants"][1]["inventory"]["retailers"].size());
ASSERT_EQ("2", res_obj["hits"][0]["document"]["product_variants"][1]["inventory"]["retailers"]["id"]);
ASSERT_EQ("retailer 3", res_obj["hits"][0]["document"]["product_variants"][1]["inventory"]["retailers"]["title"]);
ASSERT_EQ("0", res_obj["hits"][1]["document"]["id"]);
ASSERT_EQ("shampoo", res_obj["hits"][1]["document"]["title"]);
ASSERT_EQ(2, res_obj["hits"][1]["document"]["product_variants"].size());
ASSERT_EQ("0", res_obj["hits"][1]["document"]["product_variants"][0]["id"]);
ASSERT_EQ(2, res_obj["hits"][1]["document"]["product_variants"][0]["inventory"].size());
ASSERT_EQ(9, res_obj["hits"][1]["document"]["product_variants"][0]["inventory"]["qty"]);
ASSERT_EQ(2, res_obj["hits"][1]["document"]["product_variants"][0]["inventory"]["retailers"].size());
ASSERT_EQ("2", res_obj["hits"][1]["document"]["product_variants"][0]["inventory"]["retailers"]["id"]);
ASSERT_EQ("retailer 3", res_obj["hits"][1]["document"]["product_variants"][0]["inventory"]["retailers"]["title"]);
ASSERT_EQ("1", res_obj["hits"][1]["document"]["product_variants"][1]["id"]);
ASSERT_EQ(2, res_obj["hits"][1]["document"]["product_variants"][1]["inventory"].size());
ASSERT_EQ(10, res_obj["hits"][1]["document"]["product_variants"][1]["inventory"]["qty"]);
ASSERT_EQ(2, res_obj["hits"][1]["document"]["product_variants"][1]["inventory"]["retailers"].size());
ASSERT_EQ("2", res_obj["hits"][1]["document"]["product_variants"][1]["inventory"]["retailers"]["id"]);
ASSERT_EQ("retailer 3", res_obj["hits"][1]["document"]["product_variants"][1]["inventory"]["retailers"]["title"]);
req_params = {
{"collection", "products"},
{"q", "*"},
{"filter_by", "$product_variants($inventory($retailers(id: [0, 1]) && qty: [4..5]))"},
{"include_fields", "$product_variants(id,$inventory(qty,sku,$retailers(id,title)))"},
{"exclude_fields", "$product_variants($inventory($retailers(id)))"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ("1", res_obj["hits"][0]["document"]["id"]);
ASSERT_EQ("soap", res_obj["hits"][0]["document"]["title"]);
ASSERT_EQ("3", res_obj["hits"][0]["document"]["product_variants"]["id"]);
ASSERT_EQ(2, res_obj["hits"][0]["document"]["product_variants"]["inventory"].size());
ASSERT_EQ(4, res_obj["hits"][0]["document"]["product_variants"]["inventory"]["qty"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"]["product_variants"]["inventory"]["retailers"].size());
ASSERT_EQ("retailer 1", res_obj["hits"][0]["document"]["product_variants"]["inventory"]["retailers"]["title"]);
ASSERT_EQ("0", res_obj["hits"][1]["document"]["id"]);
ASSERT_EQ("shampoo", res_obj["hits"][1]["document"]["title"]);
ASSERT_EQ("0", res_obj["hits"][1]["document"]["product_variants"]["id"]);
ASSERT_EQ(2, res_obj["hits"][1]["document"]["product_variants"]["inventory"].size());
ASSERT_EQ(5, res_obj["hits"][1]["document"]["product_variants"]["inventory"]["qty"]);
ASSERT_EQ(1, res_obj["hits"][1]["document"]["product_variants"]["inventory"]["retailers"].size());
ASSERT_EQ("retailer 2", res_obj["hits"][1]["document"]["product_variants"]["inventory"]["retailers"]["title"]);
}
class JoinIncludeExcludeFieldsTest : public ::testing::Test {
protected:
Store* store = nullptr;
CollectionManager & collectionManager = CollectionManager::get_instance();
std::atomic<bool> quit = false;
std::vector<std::string> query_fields;
std::vector<sort_by> sort_fields;
std::string state_dir_path = "/tmp/typesense_test/collection_join";
Collection* products = nullptr;
Collection* customers = nullptr;
std::map<std::string, std::string> req_params;
nlohmann::json embedded_params;
std::string json_res;
long now_ts = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::system_clock::now().time_since_epoch()).count();
void setupCollection() {
LOG(INFO) << "Truncating and creating: " << state_dir_path;
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
store = new Store(state_dir_path);
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
auto schema_json =
R"({
"name": "Products",
"fields": [
{"name": "product_id", "type": "string"},
{"name": "product_name", "type": "string", "infix": true},
{"name": "product_description", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["product_description"], "model_config": {"model_name": "ts/e5-small"}}},
{"name": "rating", "type": "int32"}
]
})"_json;
std::vector<nlohmann::json> documents = {
R"({
"product_id": "product_a",
"product_name": "shampoo",
"product_description": "Our new moisturizing shampoo is perfect for those with dry or damaged hair.",
"rating": "2"
})"_json,
R"({
"product_id": "product_b",
"product_name": "soap",
"product_description": "Introducing our all-natural, organic soap bar made with essential oils and botanical ingredients.",
"rating": "4"
})"_json
};
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
products = collection_create_op.get();
for (auto const &json: documents) {
auto add_op = products->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Customers",
"fields": [
{"name": "customer_id", "type": "string"},
{"name": "customer_name", "type": "string"},
{"name": "product_price", "type": "float"},
{"name": "product_id", "type": "string", "reference": "Products.product_id"}
]
})"_json;
documents = {
R"({
"customer_id": "customer_a",
"customer_name": "Joe",
"product_price": 143,
"product_id": "product_a"
})"_json,
R"({
"customer_id": "customer_a",
"customer_name": "Joe",
"product_price": 73.5,
"product_id": "product_b"
})"_json,
R"({
"customer_id": "customer_b",
"customer_name": "Dan",
"product_price": 75,
"product_id": "product_a"
})"_json,
R"({
"customer_id": "customer_b",
"customer_name": "Dan",
"product_price": 140,
"product_id": "product_b"
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
customers = collection_create_op.get();
for (auto const &json: documents) {
auto add_op = customers->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Users",
"fields": [
{"name": "user_id", "type": "string"},
{"name": "user_name", "type": "string"}
]
})"_json;
documents = {
R"({
"user_id": "user_a",
"user_name": "Roshan"
})"_json,
R"({
"user_id": "user_b",
"user_name": "Ruby"
})"_json,
R"({
"user_id": "user_c",
"user_name": "Joe"
})"_json,
R"({
"user_id": "user_d",
"user_name": "Aby"
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Repos",
"fields": [
{"name": "repo_id", "type": "string"},
{"name": "repo_content", "type": "string"},
{"name": "repo_stars", "type": "int32"},
{"name": "repo_is_private", "type": "bool"}
]
})"_json;
documents = {
R"({
"repo_id": "repo_a",
"repo_content": "body1",
"repo_stars": 431,
"repo_is_private": true
})"_json,
R"({
"repo_id": "repo_b",
"repo_content": "body2",
"repo_stars": 4562,
"repo_is_private": false
})"_json,
R"({
"repo_id": "repo_c",
"repo_content": "body3",
"repo_stars": 945,
"repo_is_private": false
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Links",
"fields": [
{"name": "repo_id", "type": "string", "reference": "Repos.repo_id"},
{"name": "user_id", "type": "string", "reference": "Users.user_id"}
]
})"_json;
documents = {
R"({
"repo_id": "repo_a",
"user_id": "user_b"
})"_json,
R"({
"repo_id": "repo_a",
"user_id": "user_c"
})"_json,
R"({
"repo_id": "repo_b",
"user_id": "user_a"
})"_json,
R"({
"repo_id": "repo_b",
"user_id": "user_b"
})"_json,
R"({
"repo_id": "repo_b",
"user_id": "user_d"
})"_json,
R"({
"repo_id": "repo_c",
"user_id": "user_a"
})"_json,
R"({
"repo_id": "repo_c",
"user_id": "user_b"
})"_json,
R"({
"repo_id": "repo_c",
"user_id": "user_c"
})"_json,
R"({
"repo_id": "repo_c",
"user_id": "user_d"
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Organizations",
"fields": [
{"name": "org_id", "type": "string"},
{"name": "name", "type": "object"},
{"name": "name.first", "type": "string"},
{"name": "name.last", "type": "string"}
],
"enable_nested_fields": true
})"_json;
documents = {
R"({
"org_id": "org_a",
"name": {
"first": "type",
"last": "sense"
}
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Participants",
"fields": [
{"name": "user_id", "type": "string", "reference": "Users.user_id"},
{"name": "org_id", "type": "string", "reference": "Organizations.org_id"}
]
})"_json;
documents = {
R"({
"user_id": "user_a",
"org_id": "org_a"
})"_json,
R"({
"user_id": "user_b",
"org_id": "org_a"
})"_json,
R"({
"user_id": "user_d",
"org_id": "org_a"
})"_json,
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Customers_Optional_Reference",
"fields": [
{"name": "customer_id", "type": "string"},
{"name": "customer_name", "type": "string"},
{"name": "product_price", "type": "float", "optional": true},
{"name": "product_id", "type": "string", "reference": "Products.product_id", "optional": true}
]
})"_json;
documents = {
R"({
"customer_id": "customer_a",
"customer_name": "Joe",
"product_price": 143,
"product_id": "product_a"
})"_json,
R"({
"customer_id": "customer_a",
"customer_name": "Joe"
})"_json,
R"({
"customer_id": "customer_b",
"customer_name": "Dan"
})"_json,
R"({
"customer_id": "customer_b",
"customer_name": "Dan",
"product_price": 140,
"product_id": "product_b"
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Customers_Object_Optional_Reference",
"fields": [
{"name": "customer_id", "type": "string"},
{"name": "customer_name", "type": "string"},
{"name": "product", "type": "object", "optional": true},
{"name": "product.price", "type": "float", "optional": true},
{"name": "product.id", "type": "string", "reference": "Products.product_id", "optional": true}
],
"enable_nested_fields": true
})"_json;
documents = {
R"({
"customer_id": "customer_a",
"customer_name": "Joe",
"product": {
"price": 143,
"id": "product_a"
}
})"_json,
R"({
"customer_id": "customer_a",
"customer_name": "Joe"
})"_json,
R"({
"customer_id": "customer_b",
"customer_name": "Dan"
})"_json,
R"({
"customer_id": "customer_b",
"customer_name": "Dan",
"product": {
"price": 140,
"id": "product_b"
}
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
}
virtual void SetUp() {
setupCollection();
}
virtual void TearDown() {
collectionManager.dispose();
delete store;
}
};
TEST_F(JoinIncludeExcludeFieldsTest, ErrorHandling) {
req_params = {
{"collection", "Products"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "$Customers(customer_id:=customer_a && product_price:<100)"},
{"include_fields", "$foo.bar"}
};
auto search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("Invalid reference `$foo.bar` in include_fields/exclude_fields, expected `$CollectionName(fieldA, ...)`.",
search_op.error());
req_params["include_fields"] = "$foo(bar";
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("Invalid reference `$foo(bar` in include_fields/exclude_fields, expected `$CollectionName(fieldA, ...)`.",
search_op.error());
req_params["include_fields"] = "$foo(bar)";
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("Referenced collection `foo` in `include_fields` not found.", search_op.error());
}
TEST_F(JoinIncludeExcludeFieldsTest, IncludeStrategies) {
req_params = {
{"collection", "Products"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "$Customers(customer_id:=customer_a && product_price:<100)"},
};
auto search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
nlohmann::json res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
// No fields are mentioned in `include_fields`, should include all fields of Products and Customers by default.
ASSERT_EQ(7, res_obj["hits"][0]["document"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_name"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_description"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("embedding"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("rating"));
// Default strategy of reference includes is nest. No alias was provided, collection name becomes the field name.
ASSERT_EQ(5, res_obj["hits"][0]["document"]["Customers"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers"].count("customer_id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers"].count("customer_name"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers"].count("id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers"].count("product_id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers"].count("product_price"));
req_params = {
{"collection", "Products"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "$Customers(customer_id:=customer_a && product_price:<100)"},
{"include_fields", "*, $Customers(*, strategy:nest_array) as Customers"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
// No fields are mentioned in `include_fields`, should include all fields of Products and Customers by default.
ASSERT_EQ(7, res_obj["hits"][0]["document"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_name"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_description"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("embedding"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("rating"));
// In nest_array strategy we return the referenced docs in an array.
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers"][0].count("customer_id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers"][0].count("customer_name"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers"][0].count("id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers"][0].count("product_id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers"][0].count("product_price"));
req_params = {
{"collection", "Products"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "$Customers(customer_id:=customer_a && product_price:<100)"},
{"include_fields", "*, $Customers(*, strategy:merge) as Customers"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
ASSERT_EQ(11, res_obj["hits"][0]["document"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_name"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_description"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("embedding"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("rating"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("Customers.customer_id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("Customers.customer_name"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("Customers.id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("Customers.product_price"));
req_params = {
{"collection", "Products"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "$Customers(customer_id:=customer_a && product_price:<100)"},
{"include_fields", "$Customers(bar, strategy:merge)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
// No fields of Products collection are mentioned in `include_fields`, should include all of its fields by default.
ASSERT_EQ(6, res_obj["hits"][0]["document"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_name"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_description"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("embedding"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("rating"));
req_params = {
{"collection", "Products"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "$Customers(customer_id:=customer_a && product_price:<100)"},
{"include_fields", "$Customers(product_price, strategy:merge)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
ASSERT_EQ(7, res_obj["hits"][0]["document"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_price"));
ASSERT_EQ(73.5, res_obj["hits"][0]["document"].at("product_price"));
req_params = {
{"collection", "Products"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "$Customers(customer_id:=customer_a && product_price:<100)"},
{"include_fields", "$Customers(product_price, customer_id, strategy:merge)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
ASSERT_EQ(8, res_obj["hits"][0]["document"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_price"));
ASSERT_EQ(73.5, res_obj["hits"][0]["document"].at("product_price"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("customer_id"));
ASSERT_EQ("customer_a", res_obj["hits"][0]["document"].at("customer_id"));
req_params = {
{"collection", "Products"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "$Customers(customer_id:=customer_a && product_price:<100)"},
{"include_fields", "*, $Customers(product_price, customer_id, strategy:merge)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
// 6 fields in Products document and 2 fields from Customers document
ASSERT_EQ(8, res_obj["hits"][0]["document"].size());
req_params = {
{"collection", "Products"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "$Customers(customer_id:=customer_a && product_price:<100)"},
{"include_fields", "$Customers(product*, strategy:merge)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
// 6 fields in Products document and 1 field from Customers document
ASSERT_EQ(7, res_obj["hits"][0]["document"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_price"));
req_params = {
{"collection", "Products"},
{"q", "s"},
{"query_by", "product_name"},
{"filter_by", "$Customers(customer_id:=customer_a && product_price:<100)"},
{"include_fields", "$Customers(product*, strategy:merge)"},
{"exclude_fields", "$Customers(product_id_sequence_id)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
// 6 fields in Products document and 1 fields from Customers document
ASSERT_EQ(7, res_obj["hits"][0]["document"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_name"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_description"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_price"));
ASSERT_EQ(73.5, res_obj["hits"][0]["document"].at("product_price"));
req_params = {
{"collection", "Customers"},
{"q", "Dan"},
{"query_by", "customer_name"},
{"filter_by", "$Products(rating:>3)"},
{"include_fields", "$Products(product_name, strategy:merge), product_price"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
ASSERT_EQ(2, res_obj["hits"][0]["document"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_name"));
ASSERT_EQ("soap", res_obj["hits"][0]["document"].at("product_name"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_price"));
ASSERT_EQ(140, res_obj["hits"][0]["document"].at("product_price"));
// Reference include_by without join
req_params = {
{"collection", "Customers"},
{"q", "Joe"},
{"query_by", "customer_name"},
{"filter_by", "product_price:<100"},
{"include_fields", "$Products(product_name, strategy: merge), product_price"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
ASSERT_EQ(2, res_obj["hits"][0]["document"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_name"));
ASSERT_EQ("soap", res_obj["hits"][0]["document"].at("product_name"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_price"));
ASSERT_EQ(73.5, res_obj["hits"][0]["document"].at("product_price"));
}
TEST_F(JoinIncludeExcludeFieldsTest, Alias) {
// Add alias using `as`
req_params = {
{"collection", "Products"},
{"q", "soap"},
{"query_by", "product_name"},
{"filter_by", "$Customers(id:*)"},
{"include_fields", "id, $Customers(id , strategy:merge)"}
};
auto search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("Could not include the value of `id` key of the reference document of `Customers` collection."
" Expected `id` to be an array. Try adding an alias.", search_op.error());
req_params = {
{"collection", "Products"},
{"q", "soap"},
{"query_by", "product_name"},
{"filter_by", "$Customers(id:*)"},
{"include_fields", "id, $Customers(id , strategy:nest) as id"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("Could not include the reference document of `Customers` collection."
" Expected `id` to be an array. Try renaming the alias.", search_op.error());
req_params = {
{"collection", "Customers"},
{"q", "Joe"},
{"query_by", "customer_name"},
{"filter_by", "product_price:<100"},
// With merge, alias is prepended
{"include_fields", "$Products(product_name, strategy:merge) as prod, product_price"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
auto res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
ASSERT_EQ(2, res_obj["hits"][0]["document"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("prod.product_name"));
ASSERT_EQ("soap", res_obj["hits"][0]["document"].at("prod.product_name"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_price"));
ASSERT_EQ(73.5, res_obj["hits"][0]["document"].at("product_price"));
req_params = {
{"collection", "Customers"},
{"q", "Joe"},
{"query_by", "customer_name"},
{"filter_by", "product_price:<100"},
// With nest, alias becomes the key
{"include_fields", "$Products(product_name, strategy:nest) as prod, product_price"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
ASSERT_EQ(2, res_obj["hits"][0]["document"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("prod"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["prod"].count("product_name"));
ASSERT_EQ("soap", res_obj["hits"][0]["document"]["prod"].at("product_name"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_price"));
ASSERT_EQ(73.5, res_obj["hits"][0]["document"].at("product_price"));
req_params = {
{"collection", "Products"},
{"q", "soap"},
{"query_by", "product_name"},
{"filter_by", "$Customers(id:*)"},
// With nest, alias becomes the key
{"include_fields", "$Customers(customer_name, product_price , strategy:nest) as CustomerPrices, product_name"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
ASSERT_EQ(2, res_obj["hits"][0]["document"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_name"));
ASSERT_EQ("soap", res_obj["hits"][0]["document"]["product_name"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("CustomerPrices"));
ASSERT_EQ(2, res_obj["hits"][0]["document"]["CustomerPrices"].size());
ASSERT_EQ("Joe", res_obj["hits"][0]["document"]["CustomerPrices"].at(0)["customer_name"]);
ASSERT_EQ(73.5, res_obj["hits"][0]["document"]["CustomerPrices"].at(0)["product_price"]);
ASSERT_EQ("Dan", res_obj["hits"][0]["document"]["CustomerPrices"].at(1)["customer_name"]);
ASSERT_EQ(140, res_obj["hits"][0]["document"]["CustomerPrices"].at(1)["product_price"]);
}
TEST_F(JoinIncludeExcludeFieldsTest, IntegrationWithOtherFeatures) {
// Exclude token search
req_params = {
{"collection", "Products"},
{"q", "-shampoo"},
{"query_by", "product_name"},
{"filter_by", "$Customers(product_price:<100)"}, // This filter will match both shampoo and soap.
{"include_fields", "product_name"},
{"exclude_fields", "$Customers(*)"}
};
auto search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
auto res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_name"));
ASSERT_EQ("soap", res_obj["hits"][0]["document"].at("product_name"));
// Phrase search
req_params = {
{"collection", "Products"},
{"q", R"("soap")"},
{"query_by", "product_name"},
{"filter_by", "$Customers(product_price:<100)"}, // This filter will match both shampoo and soap.
{"include_fields", "product_name"},
{"exclude_fields", "$Customers(*)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_name"));
ASSERT_EQ("soap", res_obj["hits"][0]["document"].at("product_name"));
// Combining normal and reference filter
req_params = {
{"collection", "Products"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "product_name:soap && $Customers(product_price:>100)"},
{"include_fields", "product_name, $Customers(product_price, strategy:merge)"},
{"exclude_fields", ""}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
ASSERT_EQ(2, res_obj["hits"][0]["document"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_name"));
ASSERT_EQ("soap", res_obj["hits"][0]["document"].at("product_name"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_price"));
ASSERT_EQ(140, res_obj["hits"][0]["document"].at("product_price"));
// Multiple references
req_params = {
{"collection", "Products"},
{"q", "soap"},
{"query_by", "product_name"},
{"filter_by", "$Customers(product_price: >0)"},
{"include_fields", "product_name, $Customers(customer_name, product_price, strategy:merge)"},
{"exclude_fields", ""}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
ASSERT_EQ(3, res_obj["hits"][0]["document"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_name"));
ASSERT_EQ("soap", res_obj["hits"][0]["document"].at("product_name"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("customer_name"));
ASSERT_EQ("Joe", res_obj["hits"][0]["document"].at("customer_name").at(0));
ASSERT_EQ("Dan", res_obj["hits"][0]["document"].at("customer_name").at(1));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_price"));
ASSERT_EQ(73.5, res_obj["hits"][0]["document"].at("product_price").at(0));
ASSERT_EQ(140, res_obj["hits"][0]["document"].at("product_price").at(1));
// Vector search
req_params = {
{"collection", "Products"},
{"q", "natural products"},
{"query_by", "embedding"},
{"filter_by", "$Customers(customer_id:=customer_a && product_price:<100)"},
{"include_fields", "product_name, $Customers(product_price, strategy:merge)"},
{"exclude_fields", ""}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
ASSERT_EQ(2, res_obj["hits"][0]["document"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_name"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_price"));
ASSERT_EQ(73.5, res_obj["hits"][0]["document"].at("product_price"));
nlohmann::json model_config = R"({
"model_name": "ts/e5-small"
})"_json;
auto query_embedding = EmbedderManager::get_instance().get_text_embedder(model_config).get()->Embed("natural products");
std::string vec_string = "[";
for (auto const& i : query_embedding.embedding) {
vec_string += std::to_string(i);
vec_string += ",";
}
vec_string[vec_string.size() - 1] = ']';
req_params = {
{"collection", "Products"},
{"q", "*"},
{"vector_query", "embedding:(" + vec_string + ", flat_search_cutoff: 0)"},
{"filter_by", "$Customers(customer_id:=customer_a && product_price:<100)"},
{"include_fields", "product_name, $Customers(product_price, strategy : merge)"},
{"exclude_fields", ""}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
ASSERT_EQ(2, res_obj["hits"][0]["document"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_name"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_price"));
ASSERT_EQ(73.5, res_obj["hits"][0]["document"].at("product_price"));
// Hybrid search - Both text match and vector match
req_params = {
{"collection", "Products"},
{"q", "soap"},
{"query_by", "product_name, embedding"},
{"filter_by", "$Customers(customer_id:=customer_a && product_price:<100)"},
{"include_fields", "product_name, $Customers(product_price, strategy: merge)"},
{"exclude_fields", ""}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
ASSERT_EQ(2, res_obj["hits"][0]["document"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_name"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_price"));
ASSERT_EQ(73.5, res_obj["hits"][0]["document"].at("product_price"));
ASSERT_NE(0, res_obj["hits"][0].at("text_match"));
ASSERT_NE(0, res_obj["hits"][0].at("vector_distance"));
// Hybrid search - Only vector match
req_params = {
{"collection", "Products"},
{"q", "natural products"},
{"query_by", "product_name, embedding"},
{"filter_by", "$Customers(customer_id:=customer_a && product_price:<100)"},
{"include_fields", "product_name, $Customers(product_price , strategy:merge)"},
{"exclude_fields", ""}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
ASSERT_EQ(2, res_obj["hits"][0]["document"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_name"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_price"));
ASSERT_EQ(73.5, res_obj["hits"][0]["document"].at("product_price"));
ASSERT_EQ(0, res_obj["hits"][0].at("text_match"));
ASSERT_NE(0, res_obj["hits"][0].at("vector_distance"));
// Infix search
req_params = {
{"collection", "Products"},
{"q", "ap"},
{"query_by", "product_name"},
{"infix", "always"},
{"filter_by", "$Customers(customer_id:=customer_a && product_price:<100)"},
{"include_fields", "product_name, $Customers(product_price, strategy:merge)"},
{"exclude_fields", ""}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
ASSERT_EQ(2, res_obj["hits"][0]["document"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_name"));
ASSERT_EQ("soap", res_obj["hits"][0]["document"].at("product_name"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_price"));
ASSERT_EQ(73.5, res_obj["hits"][0]["document"].at("product_price"));
}
TEST_F(JoinIncludeExcludeFieldsTest, MultipleJoins) {
// Search for users within an organization with access to a particular repo.
req_params = {
{"collection", "Users"},
{"q", "R"},
{"query_by", "user_name"},
{"filter_by", "$Participants(org_id:=org_a) && $Links(repo_id:=repo_b)"},
{"include_fields", "user_id, user_name, $Repos(repo_content, strategy:merge), $Organizations(name, strategy:merge) as org"},
{"exclude_fields", "$Participants(*), $Links(*), "}
};
auto search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
auto res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ(4, res_obj["hits"][0]["document"].size());
ASSERT_EQ("user_b", res_obj["hits"][0]["document"].at("user_id"));
ASSERT_EQ("Ruby", res_obj["hits"][0]["document"].at("user_name"));
ASSERT_EQ("body2", res_obj["hits"][0]["document"].at("repo_content"));
ASSERT_EQ("type", res_obj["hits"][0]["document"]["org.name"].at("first"));
ASSERT_EQ("sense", res_obj["hits"][0]["document"]["org.name"].at("last"));
ASSERT_EQ("user_a", res_obj["hits"][1]["document"].at("user_id"));
ASSERT_EQ("Roshan", res_obj["hits"][1]["document"].at("user_name"));
ASSERT_EQ("body2", res_obj["hits"][1]["document"].at("repo_content"));
ASSERT_EQ("type", res_obj["hits"][0]["document"]["org.name"].at("first"));
ASSERT_EQ("sense", res_obj["hits"][0]["document"]["org.name"].at("last"));
}
TEST_F(JoinIncludeExcludeFieldsTest, OptionalRefrenceField) {
req_params = {
{"collection", "Customers_Optional_Reference"},
{"q", "*"},
{"include_fields", "$Products(product_name, strategy: merge), customer_name"}
};
auto search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
auto res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(4, res_obj["found"].get<size_t>());
ASSERT_EQ(4, res_obj["hits"].size());
ASSERT_EQ(2, res_obj["hits"][0]["document"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("customer_name"));
ASSERT_EQ("Dan", res_obj["hits"][0]["document"].at("customer_name"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_name"));
ASSERT_EQ("soap", res_obj["hits"][0]["document"].at("product_name"));
ASSERT_EQ(1, res_obj["hits"][1]["document"].count("customer_name"));
ASSERT_EQ("Dan", res_obj["hits"][1]["document"].at("customer_name"));
ASSERT_EQ(0, res_obj["hits"][1]["document"].count("product_name"));
ASSERT_EQ(1, res_obj["hits"][2]["document"].count("customer_name"));
ASSERT_EQ("Joe", res_obj["hits"][2]["document"].at("customer_name"));
ASSERT_EQ(0, res_obj["hits"][2]["document"].count("product_name"));
ASSERT_EQ(1, res_obj["hits"][3]["document"].count("customer_name"));
ASSERT_EQ("Joe", res_obj["hits"][3]["document"].at("customer_name"));
ASSERT_EQ(1, res_obj["hits"][3]["document"].count("product_name"));
ASSERT_EQ("shampoo", res_obj["hits"][3]["document"].at("product_name"));
req_params = {
{"collection", "Customers_Object_Optional_Reference"},
{"q", "*"},
{"include_fields", "$Products(product_name, strategy: merge), customer_name"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(4, res_obj["found"].get<size_t>());
ASSERT_EQ(4, res_obj["hits"].size());
ASSERT_EQ(2, res_obj["hits"][0]["document"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("customer_name"));
ASSERT_EQ("Dan", res_obj["hits"][0]["document"].at("customer_name"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["product"].count("product_name"));
ASSERT_EQ("soap", res_obj["hits"][0]["document"]["product"].at("product_name"));
ASSERT_EQ(1, res_obj["hits"][1]["document"].count("customer_name"));
ASSERT_EQ("Dan", res_obj["hits"][1]["document"].at("customer_name"));
ASSERT_EQ(0, res_obj["hits"][1]["document"].count("product"));
ASSERT_EQ(1, res_obj["hits"][2]["document"].count("customer_name"));
ASSERT_EQ("Joe", res_obj["hits"][2]["document"].at("customer_name"));
ASSERT_EQ(0, res_obj["hits"][2]["document"].count("product"));
ASSERT_EQ(1, res_obj["hits"][3]["document"].count("customer_name"));
ASSERT_EQ("Joe", res_obj["hits"][3]["document"].at("customer_name"));
ASSERT_EQ(1, res_obj["hits"][3]["document"].count("product"));
ASSERT_EQ(1, res_obj["hits"][3]["document"]["product"].count("product_name"));
ASSERT_EQ("shampoo", res_obj["hits"][3]["document"]["product"].at("product_name"));
}
TEST_F(CollectionJoinTest, FilterByReferenceArrayField) {
auto schema_json =
R"({
"name": "genres",
"fields": [
{ "name": "id", "type": "string" },
{ "name": "name", "type": "string" }
]
})"_json;
std::vector<nlohmann::json> documents = {
R"({"id":"0","name":"Grunge"})"_json,
R"({"id":"1","name":"Arena rock"})"_json,
R"({"id":"2","name":"Blues"})"_json
};
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "songs",
"fields": [
{ "name": "title", "type": "string" },
{ "name": "genres", "type": "string[]", "reference": "genres.id"}
]
})"_json;
documents = {
R"({"title":"Dil De Rani", "genres":[]})"_json,
R"({"title":"Corduroy", "genres":["0"]})"_json,
R"({"title":"Achilles Last Stand", "genres":["1","2"]})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
std::map<std::string, std::string> req_params = {
{"collection", "songs"},
{"q", "*"},
{"include_fields", "$genres(name, strategy:merge) as genre"},
{"exclude_fields", "genres_sequence_id"},
};
nlohmann::json embedded_params;
std::string json_res;
auto now_ts = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
auto search_op_bool = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op_bool.ok());
auto res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(3, res_obj["found"].get<size_t>());
ASSERT_EQ(3, res_obj["hits"].size());
ASSERT_EQ("Achilles Last Stand", res_obj["hits"][0]["document"]["title"].get<std::string>());
ASSERT_EQ(2, res_obj["hits"][0]["document"]["genre.name"].size());
ASSERT_EQ("Arena rock", res_obj["hits"][0]["document"]["genre.name"][0]);
ASSERT_EQ("Blues", res_obj["hits"][0]["document"]["genre.name"][1]);
ASSERT_EQ("Corduroy", res_obj["hits"][1]["document"]["title"].get<std::string>());
ASSERT_EQ(1, res_obj["hits"][1]["document"]["genre.name"].size());
ASSERT_EQ("Grunge", res_obj["hits"][1]["document"]["genre.name"][0]);
ASSERT_EQ("Dil De Rani", res_obj["hits"][2]["document"]["title"].get<std::string>());
ASSERT_EQ(0, res_obj["hits"][2]["document"]["genre.name"].size());
req_params = {
{"collection", "genres"},
{"q", "*"},
{"filter_by", "$songs(id: *)"},
{"include_fields", "$songs(title, strategy:merge) as song"},
};
search_op_bool = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op_bool.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(3, res_obj["found"].get<size_t>());
ASSERT_EQ(3, res_obj["hits"].size());
ASSERT_EQ("Blues", res_obj["hits"][0]["document"]["name"].get<std::string>());
ASSERT_EQ(1, res_obj["hits"][0]["document"]["song.title"].size());
ASSERT_EQ("Achilles Last Stand", res_obj["hits"][0]["document"]["song.title"][0]);
ASSERT_EQ("Arena rock", res_obj["hits"][1]["document"]["name"].get<std::string>());
ASSERT_EQ(1, res_obj["hits"][1]["document"]["song.title"].size());
ASSERT_EQ("Achilles Last Stand", res_obj["hits"][1]["document"]["song.title"][0]);
ASSERT_EQ("Grunge", res_obj["hits"][2]["document"]["name"].get<std::string>());
ASSERT_EQ(1, res_obj["hits"][2]["document"]["song.title"].size());
ASSERT_EQ("Corduroy", res_obj["hits"][2]["document"]["song.title"][0]);
}
TEST_F(CollectionJoinTest, FilterByObjectReferenceField) {
auto schema_json =
R"({
"name": "Products",
"fields": [
{"name": "product_id", "type": "string"},
{"name": "price", "type": "int32"},
{"name": "name", "type": "string"}
]
})"_json;
std::vector<nlohmann::json> documents = {
R"({
"product_id": "product_a",
"price": 50,
"name": "soap"
})"_json,
R"({
"product_id": "product_b",
"price": 10,
"name": "shampoo"
})"_json,
R"({
"product_id": "product_c",
"price": 120,
"name": "milk"
})"_json
};
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "coll1",
"fields": [
{"name": "coll_id", "type": "string"},
{"name": "object.reference", "type": "string", "reference": "Products.product_id", "optional": true},
{"name": "object", "type": "object"}
],
"enable_nested_fields": true
})"_json;
documents = {
R"({
"coll_id": "a",
"object": {}
})"_json,
R"({
"coll_id": "b",
"object": {
"reference": "product_c"
}
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
std::map<std::string, std::string> req_params = {
{"collection", "coll1"},
{"q", "*"},
{"include_fields", "$Products(product_id)"}
};
nlohmann::json embedded_params;
std::string json_res;
auto now_ts = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
auto search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
auto res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ(3, res_obj["hits"][0]["document"].size());
ASSERT_EQ("b", res_obj["hits"][0]["document"]["coll_id"]);
ASSERT_EQ(2, res_obj["hits"][0]["document"]["object"].size());
ASSERT_EQ("product_c", res_obj["hits"][0]["document"]["object"]["reference"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"]["object"].count("Products"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["object"]["Products"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"]["object"]["Products"].count("product_id"));
ASSERT_EQ("product_c", res_obj["hits"][0]["document"]["object"]["Products"]["product_id"]);
ASSERT_EQ(3, res_obj["hits"][1]["document"].size());
ASSERT_EQ("a", res_obj["hits"][1]["document"]["coll_id"]);
ASSERT_EQ(0, res_obj["hits"][1]["document"]["object"].size());
req_params = {
{"collection", "coll1"},
{"q", "*"},
{"include_fields", "$Products(product_id)"},
{"exclude_fields", "object"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ(3, res_obj["hits"][0]["document"].size());
ASSERT_EQ("1", res_obj["hits"][0]["document"]["id"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("object"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["object"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"]["object"].count("Products"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["object"]["Products"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"]["object"]["Products"].count("product_id"));
ASSERT_EQ("product_c", res_obj["hits"][0]["document"]["object"]["Products"]["product_id"]);
ASSERT_EQ(3, res_obj["hits"][1]["document"].size());
ASSERT_EQ("0", res_obj["hits"][1]["document"]["id"]);
ASSERT_EQ(1, res_obj["hits"][1]["document"].count("object"));
ASSERT_EQ(0, res_obj["hits"][1]["document"]["object"].size());
req_params = {
{"collection", "Products"},
{"q", "*"},
{"filter_by", "$coll1(id: *)"},
{"include_fields", "$coll1(coll_id)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
ASSERT_EQ(5, res_obj["hits"][0]["document"].size());
ASSERT_EQ("product_c", res_obj["hits"][0]["document"]["product_id"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("coll1"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["coll1"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"]["coll1"].count("coll_id"));
ASSERT_EQ("b", res_obj["hits"][0]["document"]["coll1"]["coll_id"]);
schema_json =
R"({
"name": "coll2",
"fields": [
{"name": "coll_id", "type": "string"},
{"name": "object.reference_array", "type": "string[]", "reference": "Products.product_id", "optional": true},
{"name": "object", "type": "object"}
],
"enable_nested_fields": true
})"_json;
documents = {
R"({
"coll_id": "a",
"object": {}
})"_json,
R"({
"coll_id": "b",
"object": {
"reference_array": ["product_a", "product_b"]
}
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
req_params = {
{"collection", "coll2"},
{"q", "*"},
{"include_fields", "$Products(product_id)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ(3, res_obj["hits"][0]["document"].size());
ASSERT_EQ("b", res_obj["hits"][0]["document"]["coll_id"]);
ASSERT_EQ(2, res_obj["hits"][0]["document"]["object"].size());
ASSERT_EQ("product_a", res_obj["hits"][0]["document"]["object"]["reference_array"][0]);
ASSERT_EQ("product_b", res_obj["hits"][0]["document"]["object"]["reference_array"][1]);
ASSERT_EQ(1, res_obj["hits"][0]["document"]["object"].count("Products"));
ASSERT_EQ(2, res_obj["hits"][0]["document"]["object"]["Products"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"]["object"]["Products"][0].count("product_id"));
ASSERT_EQ("product_a", res_obj["hits"][0]["document"]["object"]["Products"][0]["product_id"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"]["object"]["Products"][1].count("product_id"));
ASSERT_EQ("product_b", res_obj["hits"][0]["document"]["object"]["Products"][1]["product_id"]);
ASSERT_EQ(3, res_obj["hits"][1]["document"].size());
ASSERT_EQ("a", res_obj["hits"][1]["document"]["coll_id"]);
ASSERT_EQ(0, res_obj["hits"][1]["document"]["object"].size());
req_params = {
{"collection", "coll2"},
{"q", "*"},
{"include_fields", "$Products(product_id)"},
{"exclude_fields", "object"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ(3, res_obj["hits"][0]["document"].size());
ASSERT_EQ("1", res_obj["hits"][0]["document"]["id"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("object"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["object"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"]["object"].count("Products"));
ASSERT_EQ(2, res_obj["hits"][0]["document"]["object"]["Products"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"]["object"]["Products"][0].count("product_id"));
ASSERT_EQ("product_a", res_obj["hits"][0]["document"]["object"]["Products"][0]["product_id"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"]["object"]["Products"][1].count("product_id"));
ASSERT_EQ("product_b", res_obj["hits"][0]["document"]["object"]["Products"][1]["product_id"]);
ASSERT_EQ(3, res_obj["hits"][1]["document"].size());
ASSERT_EQ("0", res_obj["hits"][1]["document"]["id"]);
ASSERT_EQ(1, res_obj["hits"][1]["document"].count("object"));
ASSERT_EQ(0, res_obj["hits"][1]["document"]["object"].size());
req_params = {
{"collection", "Products"},
{"q", "*"},
{"filter_by", "$coll2(id: *)"},
{"include_fields", "$coll2(coll_id)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ(5, res_obj["hits"][0]["document"].size());
ASSERT_EQ("product_b", res_obj["hits"][0]["document"]["product_id"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("coll2"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["coll2"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"]["coll2"][0].count("coll_id"));
ASSERT_EQ("b", res_obj["hits"][0]["document"]["coll2"][0]["coll_id"]);
ASSERT_EQ("product_a", res_obj["hits"][1]["document"]["product_id"]);
ASSERT_EQ(1, res_obj["hits"][1]["document"].count("coll2"));
ASSERT_EQ(1, res_obj["hits"][1]["document"]["coll2"].size());
ASSERT_EQ(1, res_obj["hits"][1]["document"]["coll2"][0].count("coll_id"));
ASSERT_EQ("b", res_obj["hits"][1]["document"]["coll2"][0]["coll_id"]);
schema_json =
R"({
"name": "coll3",
"fields": [
{"name": "coll_id", "type": "string"},
{"name": "object.reference_array", "type": "string[]", "reference": "Products.id", "optional": true},
{"name": "object", "type": "object"}
],
"enable_nested_fields": true
})"_json;
documents = {
R"({
"coll_id": "a",
"object": {}
})"_json,
R"({
"coll_id": "b",
"object": {
"reference_array": ["0", "1"]
}
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
req_params = {
{"collection", "coll3"},
{"q", "*"},
{"include_fields", "$Products(product_id)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ(3, res_obj["hits"][0]["document"].size());
ASSERT_EQ("b", res_obj["hits"][0]["document"]["coll_id"]);
ASSERT_EQ(2, res_obj["hits"][0]["document"]["object"].size());
ASSERT_EQ("0", res_obj["hits"][0]["document"]["object"]["reference_array"][0]);
ASSERT_EQ("1", res_obj["hits"][0]["document"]["object"]["reference_array"][1]);
ASSERT_EQ(1, res_obj["hits"][0]["document"]["object"].count("Products"));
ASSERT_EQ(2, res_obj["hits"][0]["document"]["object"]["Products"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"]["object"]["Products"][0].count("product_id"));
ASSERT_EQ("product_a", res_obj["hits"][0]["document"]["object"]["Products"][0]["product_id"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"]["object"]["Products"][1].count("product_id"));
ASSERT_EQ("product_b", res_obj["hits"][0]["document"]["object"]["Products"][1]["product_id"]);
ASSERT_EQ(3, res_obj["hits"][1]["document"].size());
ASSERT_EQ("a", res_obj["hits"][1]["document"]["coll_id"]);
ASSERT_EQ(0, res_obj["hits"][1]["document"]["object"].size());
req_params = {
{"collection", "Products"},
{"q", "*"},
{"filter_by", "$coll3(id: *)"},
{"include_fields", "$coll3(coll_id)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ(5, res_obj["hits"][0]["document"].size());
ASSERT_EQ("product_b", res_obj["hits"][0]["document"]["product_id"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("coll3"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["coll3"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"]["coll3"][0].count("coll_id"));
ASSERT_EQ("b", res_obj["hits"][0]["document"]["coll3"][0]["coll_id"]);
ASSERT_EQ("product_a", res_obj["hits"][1]["document"]["product_id"]);
ASSERT_EQ(1, res_obj["hits"][1]["document"].count("coll3"));
ASSERT_EQ(1, res_obj["hits"][1]["document"]["coll3"].size());
ASSERT_EQ(1, res_obj["hits"][1]["document"]["coll3"][0].count("coll_id"));
ASSERT_EQ("b", res_obj["hits"][1]["document"]["coll3"][0]["coll_id"]);
schema_json =
R"({
"name": "Portions",
"fields": [
{"name": "portion_id", "type": "string"},
{"name": "quantity", "type": "int32"},
{"name": "unit", "type": "string"}
]
})"_json;
documents = {
R"({
"portion_id": "portion_a",
"quantity": 500,
"unit": "g"
})"_json,
R"({
"portion_id": "portion_b",
"quantity": 1,
"unit": "lt"
})"_json,
R"({
"portion_id": "portion_c",
"quantity": 500,
"unit": "ml"
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Foods",
"fields": [
{"name": "name", "type": "string"},
{"name": "portions", "type": "object[]"},
{"name": "portions.portion_id", "type": "string[]", "reference": "Portions.portion_id", "optional": true}
],
"enable_nested_fields": true
})"_json;
documents = {
R"({
"name": "Bread",
"portions": [
{
"portion_id": "portion_a",
"count": 10
}
]
})"_json,
R"({
"name": "Milk",
"portions": [
{
"portion_id": "portion_b",
"count": 3
},
{
"count": 3
},
{
"portion_id": "portion_c",
"count": 1
}
]
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump(), CREATE, "", DIRTY_VALUES::REJECT);
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
req_params = {
{"collection", "Foods"},
{"q", "*"},
{"include_fields", "$Portions(*, strategy:merge)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ(3, res_obj["hits"][0]["document"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("name"));
ASSERT_EQ("Milk", res_obj["hits"][0]["document"]["name"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("portions"));
ASSERT_EQ(3, res_obj["hits"][0]["document"]["portions"].size());
ASSERT_EQ(5, res_obj["hits"][0]["document"]["portions"][0].size());
ASSERT_EQ("portion_b", res_obj["hits"][0]["document"]["portions"][0].at("portion_id"));
ASSERT_EQ(1 , res_obj["hits"][0]["document"]["portions"][0].at("quantity"));
ASSERT_EQ("lt", res_obj["hits"][0]["document"]["portions"][0].at("unit"));
ASSERT_EQ(3 , res_obj["hits"][0]["document"]["portions"][0].at("count"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["portions"][1].size());
ASSERT_EQ(3 , res_obj["hits"][0]["document"]["portions"][1].at("count"));
ASSERT_EQ(5, res_obj["hits"][0]["document"]["portions"][2].size());
ASSERT_EQ("portion_c", res_obj["hits"][0]["document"]["portions"][2].at("portion_id"));
ASSERT_EQ(500 , res_obj["hits"][0]["document"]["portions"][2].at("quantity"));
ASSERT_EQ("ml", res_obj["hits"][0]["document"]["portions"][2].at("unit"));
ASSERT_EQ(1 , res_obj["hits"][0]["document"]["portions"][2].at("count"));
ASSERT_EQ("Bread", res_obj["hits"][1]["document"]["name"]);
ASSERT_EQ(1, res_obj["hits"][1]["document"].count("portions"));
ASSERT_EQ(1, res_obj["hits"][1]["document"]["portions"].size());
ASSERT_EQ(5, res_obj["hits"][1]["document"]["portions"][0].size());
ASSERT_EQ("portion_a", res_obj["hits"][1]["document"]["portions"][0].at("portion_id"));
ASSERT_EQ(500 , res_obj["hits"][1]["document"]["portions"][0].at("quantity"));
ASSERT_EQ("g", res_obj["hits"][1]["document"]["portions"][0].at("unit"));
ASSERT_EQ(10 , res_obj["hits"][1]["document"]["portions"][0].at("count"));
req_params = {
{"collection", "Foods"},
{"q", "*"},
{"include_fields", "$Portions(*, strategy:merge)"},
{"exclude_fields", "portions"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ(3, res_obj["hits"][0]["document"].size());
ASSERT_EQ("1", res_obj["hits"][0]["document"]["id"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("portions"));
ASSERT_EQ(3, res_obj["hits"][0]["document"]["portions"].size());
ASSERT_EQ(4, res_obj["hits"][0]["document"]["portions"][0].size());
ASSERT_EQ("portion_b", res_obj["hits"][0]["document"]["portions"][0].at("portion_id"));
ASSERT_EQ(1 , res_obj["hits"][0]["document"]["portions"][0].at("quantity"));
ASSERT_EQ("lt", res_obj["hits"][0]["document"]["portions"][0].at("unit"));
ASSERT_EQ(0, res_obj["hits"][0]["document"]["portions"][1].size());
ASSERT_EQ(4, res_obj["hits"][0]["document"]["portions"][2].size());
ASSERT_EQ("portion_c", res_obj["hits"][0]["document"]["portions"][2].at("portion_id"));
ASSERT_EQ(500 , res_obj["hits"][0]["document"]["portions"][2].at("quantity"));
ASSERT_EQ("ml", res_obj["hits"][0]["document"]["portions"][2].at("unit"));
ASSERT_EQ("0", res_obj["hits"][1]["document"]["id"]);
ASSERT_EQ(1, res_obj["hits"][1]["document"].count("portions"));
ASSERT_EQ(1, res_obj["hits"][1]["document"]["portions"].size());
ASSERT_EQ(4, res_obj["hits"][1]["document"]["portions"][0].size());
ASSERT_EQ("portion_a", res_obj["hits"][1]["document"]["portions"][0].at("portion_id"));
ASSERT_EQ(500 , res_obj["hits"][1]["document"]["portions"][0].at("quantity"));
ASSERT_EQ("g", res_obj["hits"][1]["document"]["portions"][0].at("unit"));
// recreate collection manager to ensure that it initializes `object_reference_helper_fields` correctly.
collectionManager.dispose();
delete store;
store = new Store(state_dir_path);
collectionManager.init(store, 1.0, "auth_key", quit);
auto load_op = collectionManager.load(8, 1000);
if(!load_op.ok()) {
LOG(ERROR) << load_op.error();
}
ASSERT_TRUE(load_op.ok());
req_params = {
{"collection", "Foods"},
{"q", "*"},
{"include_fields", "$Portions(*, strategy:merge)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ(3, res_obj["hits"][0]["document"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("name"));
ASSERT_EQ("Milk", res_obj["hits"][0]["document"]["name"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("portions"));
ASSERT_EQ(3, res_obj["hits"][0]["document"]["portions"].size());
ASSERT_EQ(5, res_obj["hits"][0]["document"]["portions"][0].size());
ASSERT_EQ("portion_b", res_obj["hits"][0]["document"]["portions"][0].at("portion_id"));
ASSERT_EQ(1 , res_obj["hits"][0]["document"]["portions"][0].at("quantity"));
ASSERT_EQ("lt", res_obj["hits"][0]["document"]["portions"][0].at("unit"));
ASSERT_EQ(3 , res_obj["hits"][0]["document"]["portions"][0].at("count"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["portions"][1].size());
ASSERT_EQ(3 , res_obj["hits"][0]["document"]["portions"][1].at("count"));
ASSERT_EQ(5, res_obj["hits"][0]["document"]["portions"][2].size());
ASSERT_EQ("portion_c", res_obj["hits"][0]["document"]["portions"][2].at("portion_id"));
ASSERT_EQ(500 , res_obj["hits"][0]["document"]["portions"][2].at("quantity"));
ASSERT_EQ("ml", res_obj["hits"][0]["document"]["portions"][2].at("unit"));
ASSERT_EQ(1 , res_obj["hits"][0]["document"]["portions"][2].at("count"));
ASSERT_EQ("Bread", res_obj["hits"][1]["document"]["name"]);
ASSERT_EQ(1, res_obj["hits"][1]["document"].count("portions"));
ASSERT_EQ(1, res_obj["hits"][1]["document"]["portions"].size());
ASSERT_EQ(5, res_obj["hits"][1]["document"]["portions"][0].size());
ASSERT_EQ("portion_a", res_obj["hits"][1]["document"]["portions"][0].at("portion_id"));
ASSERT_EQ(500 , res_obj["hits"][1]["document"]["portions"][0].at("quantity"));
ASSERT_EQ("g", res_obj["hits"][1]["document"]["portions"][0].at("unit"));
ASSERT_EQ(10 , res_obj["hits"][1]["document"]["portions"][0].at("count"));
auto doc = R"({
"name": "Milk",
"portions": [
{
"portion_id": "portion_c",
"count": 1
}
]
})"_json;
auto add_op = collectionManager.get_collection_unsafe("Foods")->add(doc.dump(), index_operation_t::UPDATE, "1",
DIRTY_VALUES::REJECT);
ASSERT_TRUE(add_op.ok());
req_params = {
{"collection", "Foods"},
{"q", "*"},
{"include_fields", "$Portions(*, strategy:merge)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ(3, res_obj["hits"][0]["document"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("name"));
ASSERT_EQ("Milk", res_obj["hits"][0]["document"]["name"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("portions"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["portions"].size());
ASSERT_EQ(5, res_obj["hits"][0]["document"]["portions"][0].size());
ASSERT_EQ("portion_c", res_obj["hits"][0]["document"]["portions"][0].at("portion_id"));
ASSERT_EQ(500 , res_obj["hits"][0]["document"]["portions"][0].at("quantity"));
ASSERT_EQ("ml", res_obj["hits"][0]["document"]["portions"][0].at("unit"));
ASSERT_EQ(1 , res_obj["hits"][0]["document"]["portions"][0].at("count"));
ASSERT_EQ("Bread", res_obj["hits"][1]["document"]["name"]);
ASSERT_EQ(1, res_obj["hits"][1]["document"].count("portions"));
ASSERT_EQ(1, res_obj["hits"][1]["document"]["portions"].size());
ASSERT_EQ(5, res_obj["hits"][1]["document"]["portions"][0].size());
ASSERT_EQ("portion_a", res_obj["hits"][1]["document"]["portions"][0].at("portion_id"));
ASSERT_EQ(500 , res_obj["hits"][1]["document"]["portions"][0].at("quantity"));
ASSERT_EQ("g", res_obj["hits"][1]["document"]["portions"][0].at("unit"));
ASSERT_EQ(10 , res_obj["hits"][1]["document"]["portions"][0].at("count"));
}
TEST_F(CollectionJoinTest, CascadeDeletion) {
auto schema_json =
R"({
"name": "Products",
"fields": [
{"name": "product_idx", "type": "string"},
{"name": "product_name", "type": "string", "infix": true},
{"name": "product_description", "type": "string"}
]
})"_json;
std::vector<nlohmann::json> documents = {
R"({
"product_idx": "product_a",
"product_name": "shampoo",
"product_description": "Our new moisturizing shampoo is perfect for those with dry or damaged hair."
})"_json,
R"({
"product_idx": "product_b",
"product_name": "soap",
"product_description": "Introducing our all-natural, organic soap bar made with essential oils and botanical ingredients."
})"_json
};
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Users",
"fields": [
{"name": "user_id", "type": "string"},
{"name": "user_name", "type": "string"}
]
})"_json;
documents = {
R"({
"user_id": "user_a",
"user_name": "Joe"
})"_json,
R"({
"user_id": "user_b",
"user_name": "Dan"
})"_json,
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "CustomerProductPrices",
"fields": [
{"name": "product_price", "type": "float"},
{"name": "user_id", "type": "string", "reference": "Users.user_id"},
{"name": "product_id", "type": "string", "reference": "Products.product_idx"}
]
})"_json;
documents = {
R"({
"user_id": "user_a",
"product_price": 143,
"product_id": "product_a"
})"_json,
R"({
"user_id": "user_a",
"product_price": 73.5,
"product_id": "product_b"
})"_json,
R"({
"user_id": "user_b",
"product_price": 75,
"product_id": "product_a"
})"_json,
R"({
"user_id": "user_b",
"product_price": 140,
"product_id": "product_b"
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
ASSERT_TRUE(add_op.ok());
}
std::map<std::string, std::string> req_params = {
{"collection", "Products"},
{"q", "*"},
{"filter_by", "$CustomerProductPrices(user_id:= user_a)"},
{"include_fields", "$CustomerProductPrices(product_price)"}
};
nlohmann::json embedded_params;
std::string json_res;
auto now_ts = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
auto search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
nlohmann::json res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ("product_b", res_obj["hits"][0]["document"].at("product_idx"));
ASSERT_EQ("product_a", res_obj["hits"][1]["document"].at("product_idx"));
req_params = {
{"collection", "CustomerProductPrices"},
{"q", "*"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(4, res_obj["found"].get<size_t>());
ASSERT_EQ("product_b", res_obj["hits"][0]["document"].at("product_id"));
ASSERT_EQ("product_a", res_obj["hits"][1]["document"].at("product_id"));
ASSERT_EQ("product_b", res_obj["hits"][0]["document"].at("product_id"));
ASSERT_EQ("product_a", res_obj["hits"][1]["document"].at("product_id"));
req_params = {
{"collection", "Products"},
{"q", "*"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ("product_b", res_obj["hits"][0]["document"].at("product_idx"));
ASSERT_EQ("product_a", res_obj["hits"][1]["document"].at("product_idx"));
collectionManager.get_collection_unsafe("Products")->remove("0");
req_params = {
{"collection", "Products"},
{"q", "*"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ("product_b", res_obj["hits"][0]["document"].at("product_idx"));
req_params = {
{"collection", "CustomerProductPrices"},
{"q", "*"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ("product_b", res_obj["hits"][0]["document"].at("product_id"));
ASSERT_EQ("product_b", res_obj["hits"][1]["document"].at("product_id"));
collectionManager.get_collection_unsafe("Users")->remove("1");
req_params = {
{"collection", "Users"},
{"q", "*"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ("user_a", res_obj["hits"][0]["document"].at("user_id"));
req_params = {
{"collection", "CustomerProductPrices"},
{"q", "*"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ("product_b", res_obj["hits"][0]["document"].at("product_id"));
ASSERT_EQ("user_a", res_obj["hits"][0]["document"].at("user_id"));
schema_json =
R"({
"name": "document",
"fields": [
{"name": "name", "type": "string"}
]
})"_json;
documents = {
R"({
"id": "1",
"name": "doc_1"
})"_json,
R"({
"id": "2",
"name": "doc_2"
})"_json,
R"({
"id": "3",
"name": "doc_3"
})"_json,
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "lead",
"fields": [
{"name": "name", "type": "string"}
]
})"_json;
documents = {
R"({
"id": "1",
"name": "lead_1"
})"_json,
R"({
"id": "2",
"name": "lead_2"
})"_json,
R"({
"id": "3",
"name": "lead_3"
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "lead_document",
"fields": [
{"name": "leadId", "type": "string", "reference":"lead.id"},
{"name": "documentId", "type": "string", "reference":"document.id"}
]
})"_json;
documents = {
R"({
"id": "1",
"leadId": "1",
"documentId": "1"
})"_json,
R"({
"id": "2",
"leadId": "2",
"documentId": "2"
})"_json,
R"({
"id": "3",
"leadId": "3",
"documentId": "2"
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
ASSERT_TRUE(add_op.ok());
}
req_params = {
{"collection", "lead_document"},
{"q", "*"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(3, res_obj["found"].get<size_t>());
ASSERT_EQ("3", res_obj["hits"][0]["document"].at("leadId"));
ASSERT_EQ("2", res_obj["hits"][0]["document"].at("documentId"));
ASSERT_EQ("2", res_obj["hits"][1]["document"].at("leadId"));
ASSERT_EQ("2", res_obj["hits"][1]["document"].at("documentId"));
ASSERT_EQ("1", res_obj["hits"][2]["document"].at("leadId"));
ASSERT_EQ("1", res_obj["hits"][2]["document"].at("documentId"));
collectionManager.get_collection_unsafe("document")->remove("1");
req_params = {
{"collection", "lead_document"},
{"q", "*"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ("3", res_obj["hits"][0]["document"].at("leadId"));
ASSERT_EQ("2", res_obj["hits"][0]["document"].at("documentId"));
ASSERT_EQ("2", res_obj["hits"][1]["document"].at("leadId"));
ASSERT_EQ("2", res_obj["hits"][1]["document"].at("documentId"));
auto doc = R"({
"id": "1",
"leadId": "1",
"documentId": "3"
})"_json;
auto add_doc_op = collectionManager.get_collection_unsafe("lead_document")->add(doc.dump());
ASSERT_TRUE(add_doc_op.ok());
req_params = {
{"collection", "lead_document"},
{"q", "*"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(3, res_obj["found"].get<size_t>());
ASSERT_EQ("1", res_obj["hits"][0]["document"].at("leadId"));
ASSERT_EQ("3", res_obj["hits"][0]["document"].at("documentId"));
ASSERT_EQ("3", res_obj["hits"][1]["document"].at("leadId"));
ASSERT_EQ("2", res_obj["hits"][1]["document"].at("documentId"));
ASSERT_EQ("2", res_obj["hits"][2]["document"].at("leadId"));
ASSERT_EQ("2", res_obj["hits"][2]["document"].at("documentId"));
collectionManager.get_collection_unsafe("lead")->remove("1");
req_params = {
{"collection", "lead_document"},
{"q", "*"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ("3", res_obj["hits"][0]["document"].at("leadId"));
ASSERT_EQ("2", res_obj["hits"][0]["document"].at("documentId"));
ASSERT_EQ("2", res_obj["hits"][1]["document"].at("leadId"));
ASSERT_EQ("2", res_obj["hits"][1]["document"].at("documentId"));
schema_json =
R"({
"name": "split_members",
"fields": [
{ "name": "user_id", "type": "string" }
]
})"_json;
documents = {
R"({"user_id": "user_a"})"_json,
R"({"user_id": "user_b"})"_json,
R"({"user_id": "user_c"})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "splits",
"fields": [
{ "name": "name", "type": "string" },
{ "name": "members", "type": "string[]", "reference": "split_members.user_id" }
]
})"_json;
documents = {
R"({
"name": "foo",
"members": ["user_a", "user_b", "user_c"]
})"_json,
R"({
"name": "bar",
"members": ["user_b"]
})"_json,
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
ASSERT_TRUE(add_op.ok());
}
req_params = {
{"collection", "splits"},
{"q", "*"},
{"include_fields", "$split_members(*)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ("bar", res_obj["hits"][0]["document"].at("name"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].at("split_members").size());
ASSERT_EQ("user_b", res_obj["hits"][0]["document"]["split_members"][0].at("user_id"));
ASSERT_EQ("foo", res_obj["hits"][1]["document"].at("name"));
ASSERT_EQ(3, res_obj["hits"][1]["document"].at("split_members").size());
ASSERT_EQ("user_a", res_obj["hits"][1]["document"]["split_members"][0].at("user_id"));
ASSERT_EQ("user_b", res_obj["hits"][1]["document"]["split_members"][1].at("user_id"));
ASSERT_EQ("user_c", res_obj["hits"][1]["document"]["split_members"][2].at("user_id"));
// Remove `user_b`.
collectionManager.get_collection_unsafe("split_members")->remove("1");
req_params = {
{"collection", "splits"},
{"q", "*"},
{"include_fields", "$split_members(*)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ("foo", res_obj["hits"][0]["document"].at("name"));
ASSERT_EQ(2, res_obj["hits"][0]["document"].at("split_members").size());
ASSERT_EQ("user_a", res_obj["hits"][0]["document"]["split_members"][0].at("user_id"));
ASSERT_EQ("user_c", res_obj["hits"][0]["document"]["split_members"][1].at("user_id"));
collectionManager.drop_collection("Users");
schema_json =
R"({
"name": "Users",
"fields": [
{"name": "user_id", "type": "string"},
{"name": "user_name", "type": "string"}
]
})"_json;
documents = {
R"({
"user_id": "user_a",
"user_name": "Roshan"
})"_json,
R"({
"user_id": "user_b",
"user_name": "Ruby"
})"_json,
R"({
"user_id": "user_c",
"user_name": "Joe"
})"_json,
R"({
"user_id": "user_d",
"user_name": "Aby"
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Repos",
"fields": [
{"name": "repo_id", "type": "string"},
{"name": "repo_content", "type": "string"}
]
})"_json;
documents = {
R"({
"repo_id": "repo_a",
"repo_content": "body1"
})"_json,
R"({
"repo_id": "repo_b",
"repo_content": "body2"
})"_json,
R"({
"repo_id": "repo_c",
"repo_content": "body3"
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Links",
"fields": [
{"name": "repo_id", "type": "string", "reference": "Repos.repo_id", "optional": true},
{"name": "user_id", "type": "string", "reference": "Users.user_id"}
]
})"_json;
documents = {
R"({
"repo_id": "repo_a",
"user_id": "user_b"
})"_json,
R"({
"repo_id": "repo_a",
"user_id": "user_c"
})"_json,
R"({
"repo_id": "repo_b",
"user_id": "user_a"
})"_json,
R"({
"repo_id": "repo_b",
"user_id": "user_b"
})"_json,
R"({
"repo_id": "repo_b",
"user_id": "user_d"
})"_json,
R"({
"repo_id": "repo_c",
"user_id": "user_a"
})"_json,
R"({
"repo_id": "repo_c",
"user_id": "user_b"
})"_json,
R"({
"repo_id": "repo_c",
"user_id": "user_c"
})"_json,
R"({
"repo_id": "repo_c",
"user_id": "user_d"
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
auto links_collection = collectionManager.get_collection_unsafe("Links");
auto links_doc = links_collection->get("0").get();
ASSERT_EQ(1, links_doc.count(".ref"));
ASSERT_EQ(2, links_doc[".ref"].size());
ASSERT_EQ("user_id_sequence_id", links_doc[".ref"][0]);
ASSERT_EQ("repo_id_sequence_id", links_doc[".ref"][1]);
ASSERT_EQ(1, links_doc.count("user_id_sequence_id"));
ASSERT_EQ(1, links_doc["user_id_sequence_id"]);
ASSERT_EQ(1, links_doc.count("repo_id_sequence_id"));
ASSERT_EQ(0, links_doc["repo_id_sequence_id"]);
links_doc = links_collection->get("1").get();
ASSERT_EQ(1, links_doc.count(".ref"));
ASSERT_EQ(2, links_doc[".ref"].size());
ASSERT_EQ("user_id_sequence_id", links_doc[".ref"][0]);
ASSERT_EQ("repo_id_sequence_id", links_doc[".ref"][1]);
ASSERT_EQ(1, links_doc.count("user_id_sequence_id"));
ASSERT_EQ(2, links_doc["user_id_sequence_id"]);
ASSERT_EQ(1, links_doc.count("repo_id_sequence_id"));
ASSERT_EQ(0, links_doc["repo_id_sequence_id"]);
collectionManager.get_collection_unsafe("Repos")->remove("0");
// Only optional reference to repos was deleted, so the document will not be deleted.
links_collection = collectionManager.get_collection_unsafe("Links");
links_doc = links_collection->get("0").get();
ASSERT_EQ(1, links_doc.count(".ref"));
ASSERT_EQ(1, links_doc[".ref"].size());
ASSERT_EQ("user_id_sequence_id", links_doc[".ref"][0]);
ASSERT_EQ(1, links_doc.count("user_id_sequence_id"));
ASSERT_EQ(1, links_doc["user_id_sequence_id"]);
ASSERT_EQ(0, links_doc.count("repo_id_sequence_id"));
links_doc = links_collection->get("1").get();
ASSERT_EQ(1, links_doc.count(".ref"));
ASSERT_EQ(1, links_doc[".ref"].size());
ASSERT_EQ("user_id_sequence_id", links_doc[".ref"][0]);
ASSERT_EQ(1, links_doc.count("user_id_sequence_id"));
ASSERT_EQ(2, links_doc["user_id_sequence_id"]);
ASSERT_EQ(0, links_doc.count("repo_id_sequence_id"));
collectionManager.get_collection_unsafe("Users")->remove("2");
links_doc = links_collection->get("0").get();
ASSERT_EQ(1, links_doc.count(".ref"));
ASSERT_EQ(1, links_doc[".ref"].size());
ASSERT_EQ("user_id_sequence_id", links_doc[".ref"][0]);
ASSERT_EQ(1, links_doc.count("user_id_sequence_id"));
ASSERT_EQ(1, links_doc["user_id_sequence_id"]);
// Required reference to users was deleted, so the documents are removed.
auto get_op = links_collection->get("1");
ASSERT_FALSE(get_op.ok());
ASSERT_EQ("Could not find a document with id: 1", get_op.error());
get_op = links_collection->get("7");
ASSERT_FALSE(get_op.ok());
ASSERT_EQ("Could not find a document with id: 7", get_op.error());
schema_json =
R"({
"name": "Links_2",
"fields": [
{"name": "repo_id", "type": "string", "reference": "Repos.repo_id", "optional": true},
{"name": "user_id", "type": "string", "reference": "Users.user_id", "optional": true}
]
})"_json;
documents = {
R"({
"repo_id": "repo_b",
"user_id": "user_b"
})"_json,
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
auto links_2_collection = collectionManager.get_collection_unsafe("Links_2");
auto links_2_doc = links_2_collection->get("0").get();
ASSERT_EQ(1, links_2_doc.count(".ref"));
ASSERT_EQ(2, links_2_doc[".ref"].size());
ASSERT_EQ("user_id_sequence_id", links_2_doc[".ref"][0]);
ASSERT_EQ("repo_id_sequence_id", links_2_doc[".ref"][1]);
ASSERT_EQ(1, links_2_doc.count("user_id_sequence_id"));
ASSERT_EQ(1, links_2_doc["user_id_sequence_id"]);
ASSERT_EQ(1, links_2_doc.count("repo_id_sequence_id"));
ASSERT_EQ(1, links_2_doc["repo_id_sequence_id"]);
collectionManager.get_collection_unsafe("Users")->remove("1");
links_2_doc = links_2_collection->get("0").get();
ASSERT_EQ(1, links_2_doc.count(".ref"));
ASSERT_EQ(1, links_2_doc[".ref"].size());
ASSERT_EQ("repo_id_sequence_id", links_2_doc[".ref"][0]);
ASSERT_EQ(0, links_2_doc.count("user_id_sequence_id"));
ASSERT_EQ(1, links_2_doc.count("repo_id_sequence_id"));
ASSERT_EQ(1, links_2_doc["repo_id_sequence_id"]);
collectionManager.get_collection_unsafe("Repos")->remove("1");
// All references were deleted, so the document is removed.
get_op = links_2_collection->get("0");
ASSERT_FALSE(get_op.ok());
ASSERT_EQ("Could not find a document with id: 0", get_op.error());
}
class JoinSortTest : public ::testing::Test {
protected:
Store* store = nullptr;
CollectionManager & collectionManager = CollectionManager::get_instance();
std::atomic<bool> quit = false;
std::vector<std::string> query_fields;
std::vector<sort_by> sort_fields;
std::string state_dir_path = "/tmp/typesense_test/collection_join";
Collection* products = nullptr;
Collection* customers = nullptr;
std::map<std::string, std::string> req_params;
nlohmann::json embedded_params;
std::string json_res;
long now_ts = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::system_clock::now().time_since_epoch()).count();
void setupCollection() {
LOG(INFO) << "Truncating and creating: " << state_dir_path;
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
store = new Store(state_dir_path);
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
auto schema_json =
R"({
"name": "Products",
"fields": [
{"name": "product_id", "type": "string"},
{"name": "product_name", "type": "string", "sort": true, "infix": true},
{"name": "product_description", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["product_description"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
std::vector<nlohmann::json> documents = {
R"({
"product_id": "product_a",
"product_name": "shampoo",
"product_description": "Our new moisturizing shampoo is perfect for those with dry or damaged hair."
})"_json,
R"({
"product_id": "product_b",
"product_name": "soap",
"product_description": "Introducing our all-natural, organic soap bar made with essential oils and botanical ingredients."
})"_json
};
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Customers",
"fields": [
{"name": "customer_id", "type": "string"},
{"name": "customer_name", "type": "string", "sort": true},
{"name": "product_price", "type": "float"},
{"name": "product_available", "type": "bool"},
{"name": "product_location", "type": "geopoint"},
{"name": "product_id", "type": "string", "reference": "Products.product_id", "sort": true}
]
})"_json;
documents = {
R"({
"customer_id": "customer_a",
"customer_name": "Joe",
"product_price": 143,
"product_available": true,
"product_location": [48.872576479306765, 2.332291112241466],
"product_id": "product_a"
})"_json,
R"({
"customer_id": "customer_a",
"customer_name": "Joe",
"product_price": 73.5,
"product_available": false,
"product_location": [48.888286721920934, 2.342340862419206],
"product_id": "product_b"
})"_json,
R"({
"customer_id": "customer_b",
"customer_name": "Dan",
"product_price": 75,
"product_available": true,
"product_location": [48.872576479306765, 2.332291112241466],
"product_id": "product_a"
})"_json,
R"({
"customer_id": "customer_b",
"customer_name": "Dan",
"product_price": 140,
"product_available": false,
"product_location": [48.888286721920934, 2.342340862419206],
"product_id": "product_b"
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Users",
"fields": [
{"name": "user_id", "type": "string"},
{"name": "user_name", "type": "string", "sort": true}
]
})"_json;
documents = {
R"({
"user_id": "user_a",
"user_name": "Roshan"
})"_json,
R"({
"id": "foo",
"user_id": "user_b",
"user_name": "Ruby"
})"_json,
R"({
"user_id": "user_c",
"user_name": "Joe"
})"_json,
R"({
"user_id": "user_d",
"user_name": "Aby"
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Repos",
"fields": [
{"name": "repo_id", "type": "string"},
{"name": "repo_content", "type": "string"},
{"name": "repo_stars", "type": "int32"},
{"name": "repo_is_private", "type": "bool"},
{"name": "repo_location", "type": "geopoint", "optional": true}
]
})"_json;
documents = {
R"({
"repo_id": "repo_a",
"repo_content": "body1",
"repo_stars": 431,
"repo_is_private": true,
"repo_location": [13.22112, 80.30511]
})"_json,
R"({
"repo_id": "repo_b",
"repo_content": "body2",
"repo_stars": 4562,
"repo_is_private": false,
"repo_location": [12.98973, 80.23095]
})"_json,
R"({
"repo_id": "repo_c",
"repo_content": "body3",
"repo_stars": 945,
"repo_is_private": false
})"_json,
R"({
"repo_id": "repo_d",
"repo_content": "body4",
"repo_stars": 95,
"repo_is_private": true,
"repo_location": [13.12752, 79.90136]
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Links",
"fields": [
{"name": "repo_id", "type": "string", "reference": "Repos.repo_id"},
{"name": "user_id", "type": "string", "reference": "Users.user_id"}
]
})"_json;
documents = {
R"({
"repo_id": "repo_a",
"user_id": "user_b"
})"_json,
R"({
"repo_id": "repo_a",
"user_id": "user_c"
})"_json,
R"({
"repo_id": "repo_b",
"user_id": "user_a"
})"_json,
R"({
"repo_id": "repo_b",
"user_id": "user_b"
})"_json,
R"({
"repo_id": "repo_b",
"user_id": "user_d"
})"_json,
R"({
"repo_id": "repo_c",
"user_id": "user_a"
})"_json,
R"({
"repo_id": "repo_c",
"user_id": "user_b"
})"_json,
R"({
"repo_id": "repo_c",
"user_id": "user_c"
})"_json,
R"({
"repo_id": "repo_c",
"user_id": "user_d"
})"_json,
R"({
"repo_id": "repo_d",
"user_id": "user_d"
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Structures",
"fields": [
{"name": "id", "type": "string"},
{"name": "name", "type": "string", "sort": true}
]
})"_json;
documents = {
R"({
"id": "struct_a",
"name": "foo"
})"_json,
R"({
"id": "struct_b",
"name": "bar"
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Ads",
"fields": [
{"name": "id", "type": "string"},
{"name": "structure", "type": "string", "reference": "Structures.id"}
]
})"_json;
documents = {
R"({
"id": "ad_a",
"structure": "struct_b"
})"_json,
R"({
"id": "ad_b",
"structure": "struct_a"
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Candidates",
"fields": [
{"name": "structure", "type": "string", "reference": "Structures.id", "optional": true},
{"name": "ad", "type": "string", "reference": "Ads.id", "optional": true}
]
})"_json;
documents = {
R"({
"structure": "struct_b"
})"_json,
R"({
"ad": "ad_a"
})"_json,
R"({
"structure": "struct_a"
})"_json,
R"({
"ad": "ad_b"
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "product",
"fields": [
{"name": "entity_id", "type": "string"},
{"name": "name", "type": "string", "sort": true},
{"name": "location", "type": "geopoint", "optional": true}
]
})"_json;
documents = {
R"({"entity_id": "P0", "name": "Generic brand Tablet", "location": [12.98973, 80.23095]})"_json,
R"({"entity_id": "P1", "name": "Tablet from samsung"})"_json,
R"({"entity_id": "P2", "name": "Tablet from apple", "location": [13.22112, 80.30511]})"_json,
R"({"entity_id": "P3", "name": "Tablet from oppo", "location": [12.98973, 80.23095]})"_json,
R"({"entity_id": "P4", "name": "Tablet from vivo"})"_json,
R"({"entity_id": "P5", "name": "Phone from samsung"})"_json,
R"({"entity_id": "P6", "name": "Tablet from xiaomi", "location": [13.12752, 79.90136]})"_json,
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "stock",
"fields": [
{"name": "entity_id", "type": "string", "reference": "product.entity_id"},
{"name": "store_.*", "type": "bool", "sort": true}
]
})"_json;
documents = {
R"({"entity_id": "P0", "store_1": true, "store_2": true})"_json,
R"({"entity_id": "P1", "store_1": false, "store_2": false})"_json,
R"({"entity_id": "P2", "store_1": false, "store_2": true})"_json,
R"({"entity_id": "P4", "store_1": true, "store_2": true})"_json,
R"({"entity_id": "P6", "store_1": false, "store_2": false})"_json,
R"({"entity_id": "P3", "store_1": true, "store_2": false})"_json,
R"({"entity_id": "P5", "store_1": true, "store_2": true})"_json,
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "skus",
"fields": [
{"name": "id", "type": "string"},
{"name": "sku_id", "type": "string"},
{"name": "sku_name", "type": "string"},
{"name": "seller_id", "type": "string"}
]
})"_json;
documents = {
R"({"id": "x_y_1_1", "sku_id": "1", "sku_name": "Sku Name 1", "seller_id": "1"})"_json,
R"({"id": "x_y_1_2", "sku_id": "2", "sku_name": "Sku Name 2", "seller_id": "1"})"_json,
R"({"id": "x_y_1_3", "sku_id": "3", "sku_name": "Sku Name 3", "seller_id": "1"})"_json,
R"({"id": "x_y_2_1", "sku_id": "1", "sku_name": "Sku Name 1", "seller_id": "2"})"_json,
R"({"id": "x_y_2_2", "sku_id": "2", "sku_name": "Sku Name 2", "seller_id": "2"})"_json,
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "prices",
"fields": [
{"name": "id", "type": "string"},
{"name": "reference", "type": "string", "reference": "skus.id"},
{"name": "price", "type": "float"},
{"name": "sku_id", "type": "string"},
{"name": "seller_id", "type": "string"},
{"name": "cluster", "type": "string"},
{"name": "payment", "type": "string"}
]
})"_json;
documents = {
R"({"id": "x_y_1_1_null_null", "reference": "x_y_1_1", "sku_id": "1", "seller_id": "1", "price": 1, "cluster": "null", "payment": "null"})"_json,
R"({"id": "x_y_1_1_c1_p1", "reference": "x_y_1_1", "sku_id": "1", "seller_id": "1", "price": 1.1, "cluster": "c1", "payment": "p1"})"_json,
R"({"id": "x_y_1_1_c2_p2", "reference": "x_y_1_1", "sku_id": "1", "seller_id": "1", "price": 1.2, "cluster": "c2", "payment": "p2"})"_json,
R"({"id": "x_y_1_2_null_null", "reference": "x_y_1_2", "sku_id": "2", "seller_id": "1", "price": 2, "cluster": "null", "payment": "null" })"_json,
R"({"id": "x_y_1_3_null_null", "reference": "x_y_1_3", "sku_id": "3", "seller_id": "1", "price": 3, "cluster": "null", "payment": "null" })"_json,
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "collections",
"fields": [
{"name": "id", "type": "string"},
{"name": "sku_id", "type": "string"},
{"name": "collection_id", "type": "string"},
{"name": "collection_name", "type": "string"},
{"name": "sort_in_collection", "type": "int32", "sort": true},
{"name": "reference", "type": "string", "reference": "skus.id"}
]
})"_json;
documents = {
R"({"id": "1", "sku_id": "1", "collection_name": "Summer Collection", "collection_id": "abc-abc", "sort_in_collection": 1, "reference": "x_y_1_1"})"_json,
R"({"id": "2", "sku_id": "2", "collection_name": "Summer Collection", "collection_id": "abc-abc", "sort_in_collection": 2, "reference": "x_y_1_2"})"_json,
R"({"id": "3", "sku_id": "3", "collection_name": "Summer Collection", "collection_id": "abc-abc", "sort_in_collection": 3, "reference": "x_y_1_3"})"_json,
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
}
virtual void SetUp() {
setupCollection();
}
virtual void TearDown() {
collectionManager.dispose();
delete store;
}
};
TEST_F(JoinSortTest, ErrorHandling) {
req_params = {
{"collection", "Products"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "$Customers(customer_id:=customer_a)"},
{"sort_by", "$foo(product_price:asc"}
};
auto search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("Parameter `sort_by` is malformed.", search_op.error());
req_params = {
{"collection", "Products"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "$Customers(customer_id:=customer_a)"},
{"sort_by", "$Customers(product_price)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("Reference `sort_by` is malformed.", search_op.error());
req_params = {
{"collection", "Products"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "$Customers(customer_id:=customer_a)"},
{"sort_by", "$foo(product_price:asc)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("Referenced collection `foo` in `sort_by` not found.", search_op.error());
req_params = {
{"collection", "Products"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "$Customers(customer_id:=customer_a)"},
{"sort_by", "$Customers(foo:asc)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("Referenced collection `Customers`: Could not find a field named `foo` in the schema for sorting.",
search_op.error());
// Sort by reference optional filtering.
req_params = {
{"collection", "Products"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "$Customers(id: *)"},
{"sort_by", "$Customers(_eval(product_available):asc)"},
{"include_fields", "product_id, $Customers(product_price, strategy:merge)"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("Referenced collection `Customers`: Error parsing eval expression in sort_by clause.", search_op.error());
req_params = {
{"collection", "Products"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "$Customers(id: *)"},
{"sort_by", "$Customers(_eval([(): 3]):asc)"},
{"include_fields", "product_id, $Customers(product_price, strategy:merge)"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("Referenced collection `Customers`: The eval expression in sort_by is empty.", search_op.error());
req_params = {
{"collection", "product"},
{"q", "tablet"},
{"query_by", "name"},
{"filter_by", "$stock(id: *)"},
{"sort_by", "_eval($stock(store_1:true || store_2:true)):desc"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("Parameter `sort_by` is malformed.", search_op.error());
req_params = {
{"collection", "Products"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "$Customers(id: *)"},
{"sort_by", "$Customers(_eval([(customer_name: Dan && product_price: > 100): 3, (customer_name): 2]):asc)"},
{"include_fields", "product_id, $Customers(product_price, strategy:merge)"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("Referenced collection `Customers`: Error parsing eval expression in sort_by clause.", search_op.error());
req_params = {
{"collection", "Users"},
{"q", "*"},
{"filter_by", "$Links(repo_id:=[repo_a, repo_b, repo_d])"},
{"include_fields", "user_id, user_name, $Repos(repo_content, repo_stars, strategy:merge), "},
{"exclude_fields", "$Links(*), "},
{"sort_by", "$Repos(repo_location(13.12631, 80.20252): asc), user_name:desc"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("`Users` collection's `id: foo` document references multiple documents of `Links` collection.",
search_op.error());
req_params = {
{"collection", "Users"},
{"q", "*"},
{"filter_by", "user_id: != user_b && $Links(repo_id:=[repo_a, repo_b, repo_d])"},
{"include_fields", "user_id, user_name, $Repos(repo_content, repo_stars, strategy:merge), "},
{"exclude_fields", "$Links(*), "},
{"sort_by", "$Repos(repo_location(13.12631, 80.20252): asc), user_name:desc"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("`Users` collection's `id: 3` document references multiple documents of `Links` collection.",
search_op.error());
// Multiple references - Wildcard search
req_params = {
{"collection", "Products"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "$Customers(product_price: >0)"},
{"sort_by", "$Customers(product_price:desc)"},
{"include_fields", "product_id, $Customers(product_price)"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("Error while sorting on `Customers.product_price: `Products` collection's `id: 0` document references multiple"
" documents of `Customers` collection.", search_op.error());
// Multiple references - Text search
req_params = {
{"collection", "Products"},
{"q", "s"},
{"query_by", "product_name"},
{"filter_by", "$Customers(product_price: >0)"},
{"sort_by", "$Customers(product_price:desc)"},
{"include_fields", "product_id, $Customers(product_price)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("Error while sorting on `Customers.product_price: `Products` collection's `id: 1` document references multiple"
" documents of `Customers` collection.", search_op.error());
// Multiple references - Phrase search
req_params = {
{"collection", "Products"},
{"q", R"("our")"},
{"query_by", "product_description"},
{"filter_by", "$Customers(product_price: >0)"},
{"include_fields", "product_id, $Customers(product_price)"},
{"sort_by", "$Customers(product_price:desc)"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("Error while sorting on `Customers.product_price: `Products` collection's `id: 0` document references multiple"
" documents of `Customers` collection.", search_op.error());
// Multiple references - Vector search
req_params = {
{"collection", "Products"},
{"q", "natural products"},
{"query_by", "embedding"},
{"filter_by", "$Customers(product_price:>0)"},
{"include_fields", "product_name, $Customers(product_price)"},
{"sort_by", "$Customers(product_price:desc)"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("Error while sorting on `Customers.product_price: `Products` collection's `id: 0` document references multiple"
" documents of `Customers` collection.", search_op.error());
nlohmann::json model_config = R"({
"model_name": "ts/e5-small"
})"_json;
auto query_embedding = EmbedderManager::get_instance().get_text_embedder(model_config).get()->Embed("natural products");
std::string vec_string = "[";
for (auto const& i : query_embedding.embedding) {
vec_string += std::to_string(i);
vec_string += ",";
}
vec_string[vec_string.size() - 1] = ']';
req_params = {
{"collection", "Products"},
{"q", "*"},
{"vector_query", "embedding:(" + vec_string + ", flat_search_cutoff: 0)"},
{"filter_by", "$Customers(product_price: >0)"},
{"include_fields", "product_id, $Customers(product_price)"},
{"sort_by", "$Customers(product_price:desc)"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("Error while sorting on `Customers.product_price: `Products` collection's `id: 0` document references multiple"
" documents of `Customers` collection.", search_op.error());
// Multiple references - Hybrid search
req_params = {
{"collection", "Products"},
{"q", "soap"},
{"query_by", "product_name, embedding"},
{"filter_by", "$Customers(product_price: >0)"},
{"include_fields", "product_id, $Customers(product_price)"},
{"sort_by", "$Customers(product_price:desc)"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("Error while sorting on `Customers.product_price: `Products` collection's `id: 1` document references multiple"
" documents of `Customers` collection.", search_op.error());
req_params = {
{"collection", "Products"},
{"q", "natural products"},
{"query_by", "product_name, embedding"},
{"filter_by", "$Customers(product_price: >0)"},
{"include_fields", "product_id, $Customers(product_price)"},
{"sort_by", "$Customers(product_price:desc)"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("Error while sorting on `Customers.product_price: `Products` collection's `id: 0` document references multiple"
" documents of `Customers` collection.", search_op.error());
// Multiple references - Infix search
req_params = {
{"collection", "Products"},
{"q", "p"},
{"query_by", "product_name"},
{"infix", "always"},
{"filter_by", "$Customers(product_price: >0)"},
{"include_fields", "product_id, $Customers(product_price)"},
{"sort_by", "$Customers(product_price:desc)"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("Error while sorting on `Customers.product_price: `Products` collection's `id: 0` document references multiple"
" documents of `Customers` collection.", search_op.error());
}
TEST_F(JoinSortTest, SortByReferencedCollField) {
// Sort by reference numeric field
req_params = {
{"collection", "Products"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "$Customers(customer_id:=customer_a)"},
{"sort_by", "$Customers(product_price:asc)"},
{"include_fields", "product_id, $Customers(product_price, strategy:merge)"},
};
auto search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
auto res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ("product_b", res_obj["hits"][0]["document"].at("product_id"));
ASSERT_EQ(73.5, res_obj["hits"][0]["document"].at("product_price"));
ASSERT_EQ("product_a", res_obj["hits"][1]["document"].at("product_id"));
ASSERT_EQ(143, res_obj["hits"][1]["document"].at("product_price"));
req_params = {
{"collection", "Products"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "$Customers(customer_id:=customer_a)"},
{"sort_by", "$Customers(product_price:desc)"},
{"include_fields", "product_id, $Customers(product_price, strategy:merge)"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ("product_a", res_obj["hits"][0]["document"].at("product_id"));
ASSERT_EQ(143, res_obj["hits"][0]["document"].at("product_price"));
ASSERT_EQ("product_b", res_obj["hits"][1]["document"].at("product_id"));
ASSERT_EQ(73.5, res_obj["hits"][1]["document"].at("product_price"));
// Sort by reference string field
req_params = {
{"collection", "Products"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "$Customers(customer_id:=customer_a)"},
{"sort_by", "$Customers(product_id:asc)"},
{"include_fields", "product_id, $Customers(product_price, strategy:merge)"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ("product_a", res_obj["hits"][0]["document"].at("product_id"));
ASSERT_EQ(143, res_obj["hits"][0]["document"].at("product_price"));
ASSERT_EQ("product_b", res_obj["hits"][1]["document"].at("product_id"));
ASSERT_EQ(73.5, res_obj["hits"][1]["document"].at("product_price"));
// Sort by reference geopoint field
req_params = {
{"collection", "Products"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "$Customers(customer_id:=customer_a)"},
{"sort_by", "$Customers(product_location(48.87709, 2.33495, precision: 1km):asc)"},
{"include_fields", "product_id, $Customers(product_price, strategy:merge)"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ("product_a", res_obj["hits"][0]["document"].at("product_id"));
ASSERT_EQ(143, res_obj["hits"][0]["document"].at("product_price"));
ASSERT_EQ(1, res_obj["hits"][0].count("geo_distance_meters"));
ASSERT_EQ(1, res_obj["hits"][0]["geo_distance_meters"].count("product_location"));
ASSERT_EQ(538, res_obj["hits"][0]["geo_distance_meters"]["product_location"]);
ASSERT_EQ("product_b", res_obj["hits"][1]["document"].at("product_id"));
ASSERT_EQ(73.5, res_obj["hits"][1]["document"].at("product_price"));
ASSERT_EQ(1356, res_obj["hits"][1]["geo_distance_meters"]["product_location"]);
req_params = {
{"collection", "Products"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "$Customers(customer_id:=customer_a)"},
{"sort_by", "$Customers(product_location(48.87709, 2.33495, precision: 1km):desc)"},
{"include_fields", "product_id, $Customers(product_price, strategy:merge)"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ("product_b", res_obj["hits"][0]["document"].at("product_id"));
ASSERT_EQ(73.5, res_obj["hits"][0]["document"].at("product_price"));
ASSERT_EQ(1, res_obj["hits"][0].count("geo_distance_meters"));
ASSERT_EQ(1, res_obj["hits"][0]["geo_distance_meters"].count("product_location"));
ASSERT_EQ(1356, res_obj["hits"][0]["geo_distance_meters"]["product_location"]);
ASSERT_EQ("product_a", res_obj["hits"][1]["document"].at("product_id"));
ASSERT_EQ(143, res_obj["hits"][1]["document"].at("product_price"));
ASSERT_EQ(538, res_obj["hits"][1]["geo_distance_meters"]["product_location"]);
// Sort by reference optional filtering.
req_params = {
{"collection", "Products"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "$Customers(customer_id:=customer_a)"},
{"sort_by", "$Customers(_eval(product_available:true):asc)"},
{"include_fields", "product_id, $Customers(product_price, strategy:merge)"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ("product_b", res_obj["hits"][0]["document"].at("product_id"));
ASSERT_EQ(73.5, res_obj["hits"][0]["document"].at("product_price"));
ASSERT_EQ("product_a", res_obj["hits"][1]["document"].at("product_id"));
ASSERT_EQ(143, res_obj["hits"][1]["document"].at("product_price"));
req_params = {
{"collection", "Products"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "$Customers(customer_id:=customer_a)"},
{"sort_by", "$Customers(_eval(product_available:true):desc)"},
{"include_fields", "product_id, $Customers(product_price, strategy:merge)"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ("product_a", res_obj["hits"][0]["document"].at("product_id"));
ASSERT_EQ(143, res_obj["hits"][0]["document"].at("product_price"));
ASSERT_EQ("product_b", res_obj["hits"][1]["document"].at("product_id"));
ASSERT_EQ(73.5, res_obj["hits"][1]["document"].at("product_price"));
req_params = {
{"collection", "Products"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "$Customers(customer_id: customer_a)"},
{"sort_by", "_eval(id:!foo):desc, $Customers(_eval(product_location:(48.87709, 2.33495, 1km)):desc)"}, // Closer to product_a
{"include_fields", "product_id, $Customers(product_price, strategy:merge)"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ("product_a", res_obj["hits"][0]["document"].at("product_id"));
ASSERT_EQ(143, res_obj["hits"][0]["document"].at("product_price"));
ASSERT_EQ("product_b", res_obj["hits"][1]["document"].at("product_id"));
ASSERT_EQ(73.5, res_obj["hits"][1]["document"].at("product_price"));
// Reference sort_by without join
req_params = {
{"collection", "Customers"},
{"q", "*"},
{"filter_by", "customer_name:= [Joe, Dan] && product_price:<100"},
{"include_fields", "$Products(product_name, strategy:merge), product_price"},
{"sort_by", "$Products(product_name:desc)"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ(2, res_obj["hits"][0]["document"].size());
ASSERT_EQ("soap", res_obj["hits"][0]["document"].at("product_name"));
ASSERT_EQ(73.5, res_obj["hits"][0]["document"].at("product_price"));
ASSERT_EQ("shampoo", res_obj["hits"][1]["document"].at("product_name"));
ASSERT_EQ(75, res_obj["hits"][1]["document"].at("product_price"));
req_params = {
{"collection", "Customers"},
{"q", "*"},
{"filter_by", "customer_name:= [Joe, Dan] && product_price:<100"},
{"include_fields", "$Products(product_name, strategy:merge), product_price"},
{"sort_by", "$Products(product_name:asc)"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ(2, res_obj["hits"][0]["document"].size());
ASSERT_EQ("shampoo", res_obj["hits"][0]["document"].at("product_name"));
ASSERT_EQ(75, res_obj["hits"][0]["document"].at("product_price"));
ASSERT_EQ("soap", res_obj["hits"][1]["document"].at("product_name"));
ASSERT_EQ(73.5, res_obj["hits"][1]["document"].at("product_price"));
req_params = {
{"collection", "Customers"},
{"q", "*"},
{"include_fields", "$Products(product_name, strategy:merge), customer_name, id"},
{"sort_by", "$Products(product_name:asc), customer_name:desc"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(4, res_obj["found"].get<size_t>());
ASSERT_EQ(4, res_obj["hits"].size());
ASSERT_EQ(3, res_obj["hits"][0]["document"].size());
ASSERT_EQ("0", res_obj["hits"][0]["document"].at("id"));
ASSERT_EQ("Joe", res_obj["hits"][0]["document"].at("customer_name"));
ASSERT_EQ("shampoo", res_obj["hits"][0]["document"].at("product_name"));
ASSERT_EQ(3, res_obj["hits"][1]["document"].size());
ASSERT_EQ("2", res_obj["hits"][1]["document"].at("id"));
ASSERT_EQ("Dan", res_obj["hits"][1]["document"].at("customer_name"));
ASSERT_EQ("shampoo", res_obj["hits"][1]["document"].at("product_name"));
ASSERT_EQ(3, res_obj["hits"][2]["document"].size());
ASSERT_EQ("1", res_obj["hits"][2]["document"].at("id"));
ASSERT_EQ("Joe", res_obj["hits"][2]["document"].at("customer_name"));
ASSERT_EQ("soap", res_obj["hits"][2]["document"].at("product_name"));
ASSERT_EQ(3, res_obj["hits"][3]["document"].size());
ASSERT_EQ("3", res_obj["hits"][3]["document"].at("id"));
ASSERT_EQ("Dan", res_obj["hits"][3]["document"].at("customer_name"));
ASSERT_EQ("soap", res_obj["hits"][3]["document"].at("product_name"));
req_params = {
{"collection", "Candidates"},
{"q", "*"},
{"filter_by", "$Ads(id:*) || $Structures(id:*)"},
{"sort_by", "$Structures(name: asc)"},
{"include_fields", "$Ads($Structures(*))"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(4, res_obj["found"].get<size_t>());
ASSERT_EQ(4, res_obj["hits"].size());
ASSERT_EQ("0", res_obj["hits"][0]["document"].at("id"));
ASSERT_EQ("bar", res_obj["hits"][0]["document"]["Structures"].at("name"));
ASSERT_EQ(0, res_obj["hits"][0]["document"].count("Ads"));
ASSERT_EQ("2", res_obj["hits"][1]["document"].at("id"));
ASSERT_EQ("foo", res_obj["hits"][1]["document"]["Structures"].at("name"));
ASSERT_EQ(0, res_obj["hits"][1]["document"].count("Ads"));
ASSERT_EQ("3", res_obj["hits"][2]["document"].at("id"));
ASSERT_EQ(0, res_obj["hits"][2]["document"].count("Structures"));
ASSERT_EQ(1, res_obj["hits"][2]["document"].count("Ads"));
ASSERT_EQ(1, res_obj["hits"][2]["document"]["Ads"].count("Structures"));
ASSERT_EQ("foo", res_obj["hits"][2]["document"]["Ads"]["Structures"]["name"]);
ASSERT_EQ("1", res_obj["hits"][3]["document"].at("id"));
ASSERT_EQ(0, res_obj["hits"][3]["document"].count("Structures"));
ASSERT_EQ(1, res_obj["hits"][3]["document"].count("Ads"));
ASSERT_EQ(1, res_obj["hits"][3]["document"]["Ads"].count("Structures"));
ASSERT_EQ("bar", res_obj["hits"][3]["document"]["Ads"]["Structures"]["name"]);
req_params = {
{"collection", "product"},
{"q", "tablet"},
{"query_by", "name"},
{"filter_by", "$stock(id: *)"},
{"sort_by", "$stock(_eval(store_1:true || store_2:true):desc)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(6, res_obj["found"].get<size_t>());
ASSERT_EQ(6, res_obj["hits"].size());
ASSERT_EQ("4", res_obj["hits"][0]["document"].at("id"));
ASSERT_EQ("3", res_obj["hits"][1]["document"].at("id"));
ASSERT_EQ("2", res_obj["hits"][2]["document"].at("id"));
ASSERT_EQ("0", res_obj["hits"][3]["document"].at("id"));
ASSERT_EQ("6", res_obj["hits"][4]["document"].at("id"));
ASSERT_EQ("1", res_obj["hits"][5]["document"].at("id"));
req_params = {
{"collection", "product"},
{"q", "tablet"},
{"query_by", "name"},
{"filter_by", "$stock(id: *)"},
{"sort_by", "$stock(_eval([(store_1:true && store_2:true):3, (store_1:true || store_2:true):2 ]):desc)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(6, res_obj["found"].get<size_t>());
ASSERT_EQ(6, res_obj["hits"].size());
ASSERT_EQ("4", res_obj["hits"][0]["document"].at("id"));
ASSERT_EQ("0", res_obj["hits"][1]["document"].at("id"));
ASSERT_EQ("3", res_obj["hits"][2]["document"].at("id"));
ASSERT_EQ("2", res_obj["hits"][3]["document"].at("id"));
ASSERT_EQ("6", res_obj["hits"][4]["document"].at("id"));
ASSERT_EQ("1", res_obj["hits"][5]["document"].at("id"));
req_params = {
{"collection", "product"},
{"q", "tablet"},
{"query_by", "name"},
{"filter_by", "$stock(id: *)"},
{"sort_by", "_text_match:desc, $stock(_eval([(store_1:true && store_2:true):3, (store_1:true || store_2:true):2 ]):desc)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(6, res_obj["found"].get<size_t>());
ASSERT_EQ(6, res_obj["hits"].size());
ASSERT_EQ("4", res_obj["hits"][0]["document"].at("id"));
ASSERT_EQ("0", res_obj["hits"][1]["document"].at("id"));
ASSERT_EQ("3", res_obj["hits"][2]["document"].at("id"));
ASSERT_EQ("2", res_obj["hits"][3]["document"].at("id"));
ASSERT_EQ("6", res_obj["hits"][4]["document"].at("id"));
ASSERT_EQ("1", res_obj["hits"][5]["document"].at("id"));
// Sort by reference geopoint field
req_params = {
{"collection", "stock"},
{"q", "*"},
{"filter_by", "entity_id: [P0, P2, P3, P6]"},
{"sort_by", "$product(location(13.12631, 80.20252):asc, name: desc) "},
{"include_fields", "$product(name)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(4, res_obj["found"].get<size_t>());
ASSERT_EQ(4, res_obj["hits"].size());
ASSERT_EQ("2", res_obj["hits"][0]["document"].at("id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["product"].size());
ASSERT_EQ("Tablet from apple", res_obj["hits"][0]["document"]["product"]["name"]);
ASSERT_EQ(1, res_obj["hits"][0].count("geo_distance_meters"));
ASSERT_EQ(1, res_obj["hits"][0]["geo_distance_meters"].size());
ASSERT_EQ(15310, res_obj["hits"][0]["geo_distance_meters"]["location"]);
ASSERT_EQ("5", res_obj["hits"][1]["document"].at("id"));
ASSERT_EQ(1, res_obj["hits"][1]["document"].count("product"));
ASSERT_EQ(1, res_obj["hits"][1]["document"]["product"].size());
ASSERT_EQ("Tablet from oppo", res_obj["hits"][1]["document"]["product"]["name"]);
ASSERT_EQ(1, res_obj["hits"][1].count("geo_distance_meters"));
ASSERT_EQ(1, res_obj["hits"][1]["geo_distance_meters"].size());
ASSERT_EQ(15492, res_obj["hits"][1]["geo_distance_meters"]["location"]);
ASSERT_EQ("0", res_obj["hits"][2]["document"].at("id"));
ASSERT_EQ(1, res_obj["hits"][2]["document"].count("product"));
ASSERT_EQ(1, res_obj["hits"][2]["document"]["product"].size());
ASSERT_EQ("Generic brand Tablet", res_obj["hits"][2]["document"]["product"]["name"]);
ASSERT_EQ(1, res_obj["hits"][2].count("geo_distance_meters"));
ASSERT_EQ(1, res_obj["hits"][2]["geo_distance_meters"].size());
ASSERT_EQ(15492, res_obj["hits"][2]["geo_distance_meters"]["location"]);
ASSERT_EQ("4", res_obj["hits"][3]["document"].at("id"));
ASSERT_EQ(1, res_obj["hits"][3]["document"].count("product"));
ASSERT_EQ(1, res_obj["hits"][3]["document"]["product"].size());
ASSERT_EQ("Tablet from xiaomi", res_obj["hits"][3]["document"]["product"]["name"]);
ASSERT_EQ(1, res_obj["hits"][3].count("geo_distance_meters"));
ASSERT_EQ(1, res_obj["hits"][3]["geo_distance_meters"].size());
ASSERT_EQ(32605, res_obj["hits"][3]["geo_distance_meters"]["location"]);
req_params = {
{"collection", "stock"},
{"q", "*"},
{"sort_by", "$product(location(13.12631, 80.20252, precision: 15km):asc, name: desc) "},
{"include_fields", "$product(name)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(7, res_obj["found"].get<size_t>());
ASSERT_EQ(7, res_obj["hits"].size());
ASSERT_EQ("5", res_obj["hits"][0]["document"].at("id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["product"].size());
ASSERT_EQ("Tablet from oppo", res_obj["hits"][0]["document"]["product"]["name"]);
ASSERT_EQ(1, res_obj["hits"][0].count("geo_distance_meters"));
ASSERT_EQ(1, res_obj["hits"][0]["geo_distance_meters"].size());
ASSERT_EQ(15492, res_obj["hits"][0]["geo_distance_meters"]["location"]);
ASSERT_EQ("2", res_obj["hits"][1]["document"].at("id"));
ASSERT_EQ(1, res_obj["hits"][1]["document"].count("product"));
ASSERT_EQ(1, res_obj["hits"][1]["document"]["product"].size());
ASSERT_EQ("Tablet from apple", res_obj["hits"][1]["document"]["product"]["name"]);
ASSERT_EQ(1, res_obj["hits"][1].count("geo_distance_meters"));
ASSERT_EQ(1, res_obj["hits"][1]["geo_distance_meters"].size());
ASSERT_EQ(15310, res_obj["hits"][1]["geo_distance_meters"]["location"]);
ASSERT_EQ("0", res_obj["hits"][2]["document"].at("id"));
ASSERT_EQ(1, res_obj["hits"][2]["document"].count("product"));
ASSERT_EQ(1, res_obj["hits"][2]["document"]["product"].size());
ASSERT_EQ("Generic brand Tablet", res_obj["hits"][2]["document"]["product"]["name"]);
ASSERT_EQ(1, res_obj["hits"][2].count("geo_distance_meters"));
ASSERT_EQ(1, res_obj["hits"][2]["geo_distance_meters"].size());
ASSERT_EQ(15492, res_obj["hits"][2]["geo_distance_meters"]["location"]);
ASSERT_EQ("4", res_obj["hits"][3]["document"].at("id"));
ASSERT_EQ(1, res_obj["hits"][3]["document"].count("product"));
ASSERT_EQ(1, res_obj["hits"][3]["document"]["product"].size());
ASSERT_EQ("Tablet from xiaomi", res_obj["hits"][3]["document"]["product"]["name"]);
ASSERT_EQ(1, res_obj["hits"][3].count("geo_distance_meters"));
ASSERT_EQ(1, res_obj["hits"][3]["geo_distance_meters"].size());
ASSERT_EQ(32605, res_obj["hits"][3]["geo_distance_meters"]["location"]);
ASSERT_EQ("4", res_obj["hits"][3]["document"].at("id"));
ASSERT_EQ(1, res_obj["hits"][3]["document"].count("product"));
ASSERT_EQ(1, res_obj["hits"][3]["document"]["product"].size());
ASSERT_EQ("Tablet from xiaomi", res_obj["hits"][3]["document"]["product"]["name"]);
ASSERT_EQ(1, res_obj["hits"][3].count("geo_distance_meters"));
ASSERT_EQ(1, res_obj["hits"][3]["geo_distance_meters"].size());
ASSERT_EQ(32605, res_obj["hits"][3]["geo_distance_meters"]["location"]);
ASSERT_EQ("3", res_obj["hits"][4]["document"].at("id"));
ASSERT_EQ(1, res_obj["hits"][4]["document"].count("product"));
ASSERT_EQ(1, res_obj["hits"][4]["document"]["product"].size());
ASSERT_EQ("Tablet from vivo", res_obj["hits"][4]["document"]["product"]["name"]);
ASSERT_EQ(1, res_obj["hits"][4].count("geo_distance_meters"));
ASSERT_EQ(1, res_obj["hits"][4]["geo_distance_meters"].size());
ASSERT_EQ(2147483647, res_obj["hits"][4]["geo_distance_meters"]["location"]);
ASSERT_EQ("1", res_obj["hits"][5]["document"].at("id"));
ASSERT_EQ(1, res_obj["hits"][5]["document"].count("product"));
ASSERT_EQ(1, res_obj["hits"][5]["document"]["product"].size());
ASSERT_EQ("Tablet from samsung", res_obj["hits"][5]["document"]["product"]["name"]);
ASSERT_EQ(1, res_obj["hits"][5].count("geo_distance_meters"));
ASSERT_EQ(1, res_obj["hits"][5]["geo_distance_meters"].size());
ASSERT_EQ(2147483647, res_obj["hits"][5]["geo_distance_meters"]["location"]);
ASSERT_EQ("6", res_obj["hits"][6]["document"].at("id"));
ASSERT_EQ(1, res_obj["hits"][6]["document"].count("product"));
ASSERT_EQ(1, res_obj["hits"][6]["document"]["product"].size());
ASSERT_EQ("Phone from samsung", res_obj["hits"][6]["document"]["product"]["name"]);
ASSERT_EQ(1, res_obj["hits"][6].count("geo_distance_meters"));
ASSERT_EQ(1, res_obj["hits"][6]["geo_distance_meters"].size());
ASSERT_EQ(2147483647, res_obj["hits"][6]["geo_distance_meters"]["location"]);
}
TEST_F(JoinSortTest, IntegrationWithOtherFeatures) {
// Text search
req_params = {
{"collection", "Products"},
{"q", "s"},
{"query_by", "product_name"},
{"filter_by", "$Customers(customer_id:=customer_a)"},
{"sort_by", "$Customers(product_price:desc)"},
{"include_fields", "product_id, $Customers(product_price, strategy:merge)"},
};
auto search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
auto res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ("product_a", res_obj["hits"][0]["document"].at("product_id"));
ASSERT_EQ(143, res_obj["hits"][0]["document"].at("product_price"));
ASSERT_EQ("product_b", res_obj["hits"][1]["document"].at("product_id"));
ASSERT_EQ(73.5, res_obj["hits"][1]["document"].at("product_price"));
// Phrase search
req_params = {
{"collection", "Products"},
{"q", R"("our")"},
{"query_by", "product_description"},
{"filter_by", "$Customers(customer_id:=customer_a)"},
{"include_fields", "product_id, $Customers(product_price, strategy:merge)"},
{"sort_by", "$Customers(product_price:desc)"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ("product_a", res_obj["hits"][0]["document"].at("product_id"));
ASSERT_EQ(143, res_obj["hits"][0]["document"].at("product_price"));
ASSERT_EQ("product_b", res_obj["hits"][1]["document"].at("product_id"));
ASSERT_EQ(73.5, res_obj["hits"][1]["document"].at("product_price"));
// Vector search
req_params = {
{"collection", "Products"},
{"q", "natural products"},
{"query_by", "embedding"},
{"filter_by", "$Customers(customer_id:=customer_a)"},
{"include_fields", "product_id, $Customers(product_price, strategy:merge)"},
{"sort_by", "$Customers(product_price:desc)"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ("product_a", res_obj["hits"][0]["document"].at("product_id"));
ASSERT_EQ(143, res_obj["hits"][0]["document"].at("product_price"));
ASSERT_EQ("product_b", res_obj["hits"][1]["document"].at("product_id"));
ASSERT_EQ(73.5, res_obj["hits"][1]["document"].at("product_price"));
auto product_a_score = res_obj["hits"][0].at("vector_distance");
auto product_b_score = res_obj["hits"][1].at("vector_distance");
// product_b is a better match for the vector query but sort_by overrides the order.
ASSERT_TRUE(product_b_score < product_a_score);
nlohmann::json model_config = R"({
"model_name": "ts/e5-small"
})"_json;
auto query_embedding = EmbedderManager::get_instance().get_text_embedder(model_config).get()->Embed("natural products");
std::string vec_string = "[";
for (auto const& i : query_embedding.embedding) {
vec_string += std::to_string(i);
vec_string += ",";
}
vec_string[vec_string.size() - 1] = ']';
req_params = {
{"collection", "Products"},
{"q", "*"},
{"vector_query", "embedding:(" + vec_string + ", flat_search_cutoff: 0)"},
{"filter_by", "$Customers(customer_id:=customer_a)"},
{"include_fields", "product_id, $Customers(product_price, strategy:merge)"},
{"sort_by", "$Customers(product_price:desc)"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ("product_a", res_obj["hits"][0]["document"].at("product_id"));
ASSERT_EQ(143, res_obj["hits"][0]["document"].at("product_price"));
ASSERT_EQ("product_b", res_obj["hits"][1]["document"].at("product_id"));
ASSERT_EQ(73.5, res_obj["hits"][1]["document"].at("product_price"));
product_a_score = res_obj["hits"][0].at("vector_distance");
product_b_score = res_obj["hits"][1].at("vector_distance");
// product_b is a better match for the vector query but sort_by overrides the order.
ASSERT_TRUE(product_b_score < product_a_score);
// Hybrid search - Both text match and vector match
req_params = {
{"collection", "Products"},
{"q", "soap"},
{"query_by", "product_name, embedding"},
{"filter_by", "$Customers(customer_id:=customer_a)"},
{"include_fields", "product_id, $Customers(product_price, strategy:merge)"},
{"sort_by", "$Customers(product_price:desc)"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ("product_a", res_obj["hits"][0]["document"].at("product_id"));
ASSERT_EQ(143, res_obj["hits"][0]["document"].at("product_price"));
ASSERT_EQ("product_b", res_obj["hits"][1]["document"].at("product_id"));
ASSERT_EQ(73.5, res_obj["hits"][1]["document"].at("product_price"));
product_a_score = res_obj["hits"][0].at("text_match");
product_b_score = res_obj["hits"][1].at("text_match");
ASSERT_TRUE(product_b_score > product_a_score);
product_a_score = res_obj["hits"][0].at("vector_distance");
product_b_score = res_obj["hits"][1].at("vector_distance");
ASSERT_TRUE(product_b_score < product_a_score);
// Hybrid search - Only vector match
req_params = {
{"collection", "Products"},
{"q", "natural products"},
{"query_by", "product_name, embedding"},
{"filter_by", "$Customers(customer_id:=customer_a)"},
{"include_fields", "product_id, $Customers(product_price, strategy:merge)"},
{"sort_by", "$Customers(product_price:desc)"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ("product_a", res_obj["hits"][0]["document"].at("product_id"));
ASSERT_EQ(143, res_obj["hits"][0]["document"].at("product_price"));
ASSERT_EQ("product_b", res_obj["hits"][1]["document"].at("product_id"));
ASSERT_EQ(73.5, res_obj["hits"][1]["document"].at("product_price"));
product_a_score = res_obj["hits"][0].at("vector_distance");
product_b_score = res_obj["hits"][1].at("vector_distance");
// product_b is a better match for the vector query but sort_by overrides the order.
ASSERT_TRUE(product_b_score < product_a_score);
// Infix search
req_params = {
{"collection", "Products"},
{"q", "p"},
{"query_by", "product_name"},
{"infix", "always"},
{"filter_by", "$Customers(customer_id:=customer_a)"},
{"include_fields", "product_id, $Customers(product_price, strategy:merge)"},
{"sort_by", "$Customers(product_price:desc)"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ("product_a", res_obj["hits"][0]["document"].at("product_id"));
ASSERT_EQ(143, res_obj["hits"][0]["document"].at("product_price"));
ASSERT_EQ("product_b", res_obj["hits"][1]["document"].at("product_id"));
ASSERT_EQ(73.5, res_obj["hits"][1]["document"].at("product_price"));
}
TEST_F(JoinSortTest, SortByNestedReferencedCollField) {
req_params = {
{"collection", "Users"},
{"q", "*"},
{"filter_by", "$Links(repo_id:=[repo_a, repo_d])"},
{"include_fields", "user_id, user_name, $Repos(repo_content, repo_stars, strategy:merge), "},
{"exclude_fields", "$Links(*), "},
{"sort_by", "$Repos(repo_stars: asc)"}
};
auto search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
auto res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(3, res_obj["found"].get<size_t>());
ASSERT_EQ(3, res_obj["hits"].size());
ASSERT_EQ(4, res_obj["hits"][0]["document"].size());
ASSERT_EQ("user_d", res_obj["hits"][0]["document"].at("user_id"));
ASSERT_EQ("Aby", res_obj["hits"][0]["document"].at("user_name"));
ASSERT_EQ("body4", res_obj["hits"][0]["document"].at("repo_content"));
ASSERT_EQ(95, res_obj["hits"][0]["document"].at("repo_stars"));
ASSERT_EQ("user_c", res_obj["hits"][1]["document"].at("user_id"));
ASSERT_EQ("Joe", res_obj["hits"][1]["document"].at("user_name"));
ASSERT_EQ("body1", res_obj["hits"][1]["document"].at("repo_content"));
ASSERT_EQ(431, res_obj["hits"][1]["document"].at("repo_stars"));
ASSERT_EQ("user_b", res_obj["hits"][2]["document"].at("user_id"));
ASSERT_EQ("Ruby", res_obj["hits"][2]["document"].at("user_name"));
ASSERT_EQ("body1", res_obj["hits"][2]["document"].at("repo_content"));
ASSERT_EQ(431, res_obj["hits"][2]["document"].at("repo_stars"));
req_params = {
{"collection", "Users"},
{"q", "*"},
{"filter_by", "$Links(repo_id:=[repo_a, repo_d])"},
{"include_fields", "user_id, user_name, $Repos(repo_content, repo_stars, strategy:merge), "},
{"exclude_fields", "$Links(*), "},
{"sort_by", "$Repos(repo_stars: desc), user_name:desc"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(3, res_obj["found"].get<size_t>());
ASSERT_EQ(3, res_obj["hits"].size());
ASSERT_EQ(4, res_obj["hits"][0]["document"].size());
ASSERT_EQ("user_b", res_obj["hits"][0]["document"].at("user_id"));
ASSERT_EQ("Ruby", res_obj["hits"][0]["document"].at("user_name"));
ASSERT_EQ("body1", res_obj["hits"][0]["document"].at("repo_content"));
ASSERT_EQ(431, res_obj["hits"][0]["document"].at("repo_stars"));
ASSERT_EQ("user_c", res_obj["hits"][1]["document"].at("user_id"));
ASSERT_EQ("Joe", res_obj["hits"][1]["document"].at("user_name"));
ASSERT_EQ("body1", res_obj["hits"][1]["document"].at("repo_content"));
ASSERT_EQ(431, res_obj["hits"][1]["document"].at("repo_stars"));
ASSERT_EQ("user_d", res_obj["hits"][2]["document"].at("user_id"));
ASSERT_EQ("Aby", res_obj["hits"][2]["document"].at("user_name"));
ASSERT_EQ("body4", res_obj["hits"][2]["document"].at("repo_content"));
ASSERT_EQ(95, res_obj["hits"][2]["document"].at("repo_stars"));
// Sort by nested reference geopoint field
req_params = {
{"collection", "Users"},
{"q", "*"},
{"filter_by", "$Links(repo_id:=[repo_a, repo_d])"},
{"include_fields", "user_id, user_name, $Repos(repo_content, repo_stars, strategy:merge), "},
{"exclude_fields", "$Links(*), "},
{"sort_by", "$Repos(repo_location(13.12631, 80.20252): asc), user_name:desc"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(3, res_obj["found"].get<size_t>());
ASSERT_EQ(3, res_obj["hits"].size());
ASSERT_EQ(4, res_obj["hits"][0]["document"].size());
ASSERT_EQ("user_b", res_obj["hits"][0]["document"].at("user_id"));
ASSERT_EQ("Ruby", res_obj["hits"][0]["document"].at("user_name"));
ASSERT_EQ("body1", res_obj["hits"][0]["document"].at("repo_content"));
ASSERT_EQ(1, res_obj["hits"][0].count("geo_distance_meters"));
ASSERT_EQ(1, res_obj["hits"][0]["geo_distance_meters"].count("repo_location"));
ASSERT_EQ(15310, res_obj["hits"][0]["geo_distance_meters"]["repo_location"]);
ASSERT_EQ(4, res_obj["hits"][1]["document"].size());
ASSERT_EQ("user_c", res_obj["hits"][1]["document"].at("user_id"));
ASSERT_EQ("Joe", res_obj["hits"][1]["document"].at("user_name"));
ASSERT_EQ("body1", res_obj["hits"][1]["document"].at("repo_content"));
ASSERT_EQ(1, res_obj["hits"][1].count("geo_distance_meters"));
ASSERT_EQ(1, res_obj["hits"][1]["geo_distance_meters"].count("repo_location"));
ASSERT_EQ(15310, res_obj["hits"][1]["geo_distance_meters"]["repo_location"]);
ASSERT_EQ(4, res_obj["hits"][2]["document"].size());
ASSERT_EQ("user_d", res_obj["hits"][2]["document"].at("user_id"));
ASSERT_EQ("Aby", res_obj["hits"][2]["document"].at("user_name"));
ASSERT_EQ("body4", res_obj["hits"][2]["document"].at("repo_content"));
ASSERT_EQ(1, res_obj["hits"][2].count("geo_distance_meters"));
ASSERT_EQ(1, res_obj["hits"][2]["geo_distance_meters"].count("repo_location"));
ASSERT_EQ(32605, res_obj["hits"][2]["geo_distance_meters"]["repo_location"]);
{
auto const& users_coll = collectionManager.get_collection_unsafe("Users");
nlohmann::json doc_json = R"({
"user_id": "user_e",
"user_name": "Andy"
})"_json;
auto add_doc_op = users_coll->add(doc_json.dump());
ASSERT_TRUE(add_doc_op.ok());
auto const& links_coll = collectionManager.get_collection_unsafe("Links");
auto documents = {
R"({
"repo_id": "repo_c",
"user_id": "user_e"
})"_json,
R"({
"repo_id": "repo_d",
"user_id": "user_e"
})"_json,
};
for (auto const& doc: documents) {
add_doc_op = links_coll->add(doc.dump());
ASSERT_TRUE(add_doc_op.ok());
}
}
req_params = {
{"collection", "Users"},
{"q", "*"},
{"filter_by", "user_id: != [user_b, user_d] && $Links(repo_id:!=[repo_c])"},
{"include_fields", "user_id, user_name, $Repos(repo_content, repo_stars, strategy:merge), "},
{"exclude_fields", "$Links(*), "},
{"sort_by", "$Repos(repo_location(13.12631, 80.20252): asc), user_name:desc"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(3, res_obj["found"].get<size_t>());
ASSERT_EQ(3, res_obj["hits"].size());
ASSERT_EQ(4, res_obj["hits"][0]["document"].size());
ASSERT_EQ("user_c", res_obj["hits"][0]["document"].at("user_id"));
ASSERT_EQ("Joe", res_obj["hits"][0]["document"].at("user_name"));
ASSERT_EQ("body1", res_obj["hits"][0]["document"].at("repo_content"));
ASSERT_EQ(1, res_obj["hits"][0].count("geo_distance_meters"));
ASSERT_EQ(1, res_obj["hits"][0]["geo_distance_meters"].count("repo_location"));
ASSERT_EQ(15310, res_obj["hits"][0]["geo_distance_meters"]["repo_location"]);
ASSERT_EQ(4, res_obj["hits"][1]["document"].size());
ASSERT_EQ("user_a", res_obj["hits"][1]["document"].at("user_id"));
ASSERT_EQ("Roshan", res_obj["hits"][1]["document"].at("user_name"));
ASSERT_EQ("body2", res_obj["hits"][1]["document"].at("repo_content"));
ASSERT_EQ(1, res_obj["hits"][1].count("geo_distance_meters"));
ASSERT_EQ(1, res_obj["hits"][1]["geo_distance_meters"].count("repo_location"));
ASSERT_EQ(15492, res_obj["hits"][1]["geo_distance_meters"]["repo_location"]);
ASSERT_EQ(4, res_obj["hits"][2]["document"].size());
ASSERT_EQ("user_e", res_obj["hits"][2]["document"].at("user_id"));
ASSERT_EQ("Andy", res_obj["hits"][2]["document"].at("user_name"));
ASSERT_EQ("body4", res_obj["hits"][2]["document"].at("repo_content"));
ASSERT_EQ(1, res_obj["hits"][2].count("geo_distance_meters"));
ASSERT_EQ(1, res_obj["hits"][2]["geo_distance_meters"].count("repo_location"));
ASSERT_EQ(32605, res_obj["hits"][2]["geo_distance_meters"]["repo_location"]);
req_params = {
{"collection", "Candidates"},
{"q", "*"},
{"filter_by", "$Ads(id:*) || $Structures(id:*)"},
{"sort_by", "$Ads($Structures(name: asc))"},
{"include_fields", "$Ads($Structures(*))"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(4, res_obj["found"].get<size_t>());
ASSERT_EQ(4, res_obj["hits"].size());
ASSERT_EQ("1", res_obj["hits"][0]["document"].at("id"));
ASSERT_EQ(0, res_obj["hits"][0]["document"].count("Structures"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("Ads"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Ads"].count("Structures"));
ASSERT_EQ("bar", res_obj["hits"][0]["document"]["Ads"]["Structures"]["name"]);
ASSERT_EQ("3", res_obj["hits"][1]["document"].at("id"));
ASSERT_EQ(0, res_obj["hits"][1]["document"].count("Structures"));
ASSERT_EQ(1, res_obj["hits"][1]["document"].count("Ads"));
ASSERT_EQ(1, res_obj["hits"][1]["document"]["Ads"].count("Structures"));
ASSERT_EQ("foo", res_obj["hits"][1]["document"]["Ads"]["Structures"]["name"]);
ASSERT_EQ("2", res_obj["hits"][2]["document"].at("id"));
ASSERT_EQ("foo", res_obj["hits"][2]["document"]["Structures"].at("name"));
ASSERT_EQ(0, res_obj["hits"][2]["document"].count("Ads"));
ASSERT_EQ("0", res_obj["hits"][3]["document"].at("id"));
ASSERT_EQ("bar", res_obj["hits"][3]["document"]["Structures"].at("name"));
ASSERT_EQ(0, res_obj["hits"][3]["document"].count("Ads"));
{
std::vector<nlohmann::json> documents = {
R"({
"product_id": "product_c",
"product_name": "comb",
"product_description": "Experience the natural elegance and gentle care of our handcrafted wooden combs – because your hair deserves the best."
})"_json,
R"({
"product_id": "product_d",
"product_name": "hair oil",
"product_description": "Revitalize your hair with our nourishing hair oil – nature's secret to lustrous, healthy locks."
})"_json
};
auto const &products_coll = collectionManager.get_collection_unsafe("Products");
for (auto const &json: documents) {
auto add_op = products_coll->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
auto schema_json =
R"({
"name": "Orders",
"fields": [
{"name": "product", "type": "string", "reference": "Products.product_id" },
{"name": "amount", "type": "int32" }
]
})"_json;
documents = {
R"({
"product": "product_a",
"amount": 1000
})"_json,
R"({
"product": "product_b",
"amount": 100
})"_json,
R"({
"product": "product_d",
"amount": -100
})"_json
};
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto c = collection_create_op.get();
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
}
// Left join and sort by
req_params = {
{"collection", "Products"},
{"q", "*"},
{"filter_by", "id:* || $Orders(amount:>0)"},
{"sort_by", "$Orders(amount:desc)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(4, res_obj["found"].get<size_t>());
ASSERT_EQ(4, res_obj["hits"].size());
ASSERT_EQ("product_a", res_obj["hits"][0]["document"]["product_id"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("Orders"));
ASSERT_EQ(1000, res_obj["hits"][0]["document"]["Orders"]["amount"]);
ASSERT_EQ("product_b", res_obj["hits"][1]["document"]["product_id"]);
ASSERT_EQ(1, res_obj["hits"][1]["document"].count("Orders"));
ASSERT_EQ(100, res_obj["hits"][1]["document"]["Orders"]["amount"]);
ASSERT_EQ("product_d", res_obj["hits"][2]["document"]["product_id"]);
ASSERT_EQ(0, res_obj["hits"][2]["document"].count("Orders"));
ASSERT_EQ("product_c", res_obj["hits"][3]["document"]["product_id"]);
ASSERT_EQ(0, res_obj["hits"][3]["document"].count("Orders"));
// Sort by nested reference geopoint field
req_params = {
{"collection", "Orders"},
{"q", "*"},
{"filter_by", "$Products($Customers(customer_id: customer_b))"},
{"sort_by", "$Products($Customers(product_location(48.87709, 2.33495, precision: 1km):asc))"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ("product_a", res_obj["hits"][0]["document"].at("product"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("Products"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Products"].count("Customers"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Products"]["Customers"].count("product_price"));
ASSERT_EQ(75, res_obj["hits"][0]["document"]["Products"]["Customers"]["product_price"]);
ASSERT_EQ(1, res_obj["hits"][0].count("geo_distance_meters"));
ASSERT_EQ(1, res_obj["hits"][0]["geo_distance_meters"].count("product_location"));
ASSERT_EQ(538, res_obj["hits"][0]["geo_distance_meters"]["product_location"]);
ASSERT_EQ("product_b", res_obj["hits"][1]["document"].at("product"));
ASSERT_EQ(140, res_obj["hits"][1]["document"]["Products"]["Customers"]["product_price"]);
ASSERT_EQ(1356, res_obj["hits"][1]["geo_distance_meters"]["product_location"]);
req_params = {
{"collection", "prices"},
{"q", "*"},
{"filter_by", "$skus($collections(collection_id:=abc-abc))"},
{"sort_by", "$skus($collections(sort_in_collection:asc))"},
{"include_fields", "$skus($collections(*, strategy:merge), strategy:merge)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(5, res_obj["found"].get<size_t>());
ASSERT_EQ(5, res_obj["hits"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"]["sort_in_collection"]);
ASSERT_EQ("x_y_1_1", res_obj["hits"][0]["document"]["reference"]);
ASSERT_EQ(1.2, res_obj["hits"][0]["document"]["price"]);
ASSERT_EQ("p2", res_obj["hits"][0]["document"]["payment"]);
ASSERT_EQ(1, res_obj["hits"][1]["document"]["sort_in_collection"]);
ASSERT_EQ("x_y_1_1", res_obj["hits"][1]["document"]["reference"]);
ASSERT_EQ(1.1, res_obj["hits"][1]["document"]["price"]);
ASSERT_EQ("p1", res_obj["hits"][1]["document"]["payment"]);
ASSERT_EQ(1, res_obj["hits"][2]["document"]["sort_in_collection"]);
ASSERT_EQ("x_y_1_1", res_obj["hits"][2]["document"]["reference"]);
ASSERT_EQ(1, res_obj["hits"][2]["document"]["price"]);
ASSERT_EQ("null", res_obj["hits"][2]["document"]["payment"]);
ASSERT_EQ(2, res_obj["hits"][3]["document"]["sort_in_collection"]);
ASSERT_EQ("x_y_1_2", res_obj["hits"][3]["document"]["reference"]);
ASSERT_EQ(2, res_obj["hits"][3]["document"]["price"]);
ASSERT_EQ("null", res_obj["hits"][3]["document"]["payment"]);
ASSERT_EQ(3, res_obj["hits"][4]["document"]["sort_in_collection"]);
ASSERT_EQ("x_y_1_3", res_obj["hits"][4]["document"]["reference"]);
ASSERT_EQ(3, res_obj["hits"][4]["document"]["price"]);
ASSERT_EQ("null", res_obj["hits"][4]["document"]["payment"]);
}
TEST_F(CollectionJoinTest, FilterByReferenceAlias) {
auto schema_json =
R"({
"name": "Products",
"fields": [
{"name": "product_id", "type": "string"},
{"name": "product_name", "type": "string", "sort": true},
{"name": "product_description", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["product_description"], "model_config": {"model_name": "ts/e5-small"}}},
{"name": "rating", "type": "int32"}
]
})"_json;
std::vector<nlohmann::json> documents = {
R"({
"product_id": "product_a",
"product_name": "shampoo",
"product_description": "Our new moisturizing shampoo is perfect for those with dry or damaged hair.",
"rating": "2"
})"_json,
R"({
"product_id": "product_b",
"product_name": "soap",
"product_description": "Introducing our all-natural, organic soap bar made with essential oils and botanical ingredients.",
"rating": "4"
})"_json
};
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Customers",
"fields": [
{"name": "customer_id", "type": "string"},
{"name": "customer_name", "type": "string"},
{"name": "product_price", "type": "float"},
{"name": "product_id", "type": "string", "reference": "Products.product_id"}
]
})"_json;
documents = {
R"({
"customer_id": "customer_a",
"customer_name": "Joe",
"product_price": 143,
"product_id": "product_a"
})"_json,
R"({
"customer_id": "customer_a",
"customer_name": "Joe",
"product_price": 73.5,
"product_id": "product_b"
})"_json,
R"({
"customer_id": "customer_b",
"customer_name": "Dan",
"product_price": 75,
"product_id": "product_a"
})"_json,
R"({
"customer_id": "customer_b",
"customer_name": "Dan",
"product_price": 140,
"product_id": "product_b"
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
auto symlink_op = collectionManager.upsert_symlink("Products_alias", "Products");
ASSERT_TRUE(symlink_op.ok());
symlink_op = collectionManager.upsert_symlink("Customers_alias", "Customers");
ASSERT_TRUE(symlink_op.ok());
std::map<std::string, std::string> req_params = {
{"collection", "Products_alias"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "$Customers(customer_id:=customer_a && product_price:<100)"},
};
nlohmann::json embedded_params;
std::string json_res;
auto now_ts = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
auto search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
nlohmann::json res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
// No fields are mentioned in `include_fields`, should include all fields of Products and Customers by default.
ASSERT_EQ(7, res_obj["hits"][0]["document"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_name"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_description"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("embedding"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("rating"));
// Default strategy of reference includes is nest. No alias was provided, collection name becomes the field name.
ASSERT_EQ(5, res_obj["hits"][0]["document"]["Customers"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers"].count("customer_id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers"].count("customer_name"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers"].count("id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers"].count("product_id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers"].count("product_price"));
req_params = {
{"collection", "Products"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "$Customers_alias(customer_id:=customer_a && product_price:<100)"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
// No fields are mentioned in `include_fields`, should include all fields of Products and Customers by default.
ASSERT_EQ(7, res_obj["hits"][0]["document"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_name"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_description"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("embedding"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("rating"));
// Default strategy of reference includes is nest. No alias was provided, collection name becomes the field name.
ASSERT_EQ(5, res_obj["hits"][0]["document"]["Customers_alias"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers_alias"].count("customer_id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers_alias"].count("customer_name"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers_alias"].count("id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers_alias"].count("product_id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers_alias"].count("product_price"));
req_params = {
{"collection", "Products_alias"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "$Customers_alias(customer_id:=customer_a && product_price:<100)"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
// No fields are mentioned in `include_fields`, should include all fields of Products and Customers by default.
ASSERT_EQ(7, res_obj["hits"][0]["document"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_name"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_description"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("embedding"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("rating"));
// Default strategy of reference includes is nest. No alias was provided, collection name becomes the field name.
ASSERT_EQ(5, res_obj["hits"][0]["document"]["Customers_alias"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers_alias"].count("customer_id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers_alias"].count("customer_name"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers_alias"].count("id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers_alias"].count("product_id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers_alias"].count("product_price"));
req_params = {
{"collection", "Products_alias"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "$Customers_alias(customer_id:=customer_a && product_price:<100)"},
{"include_fields", "product_name, $Customers_alias(product_id, product_price)"},
{"exclude_fields", "$Customers_alias(product_id)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
ASSERT_EQ(2, res_obj["hits"][0]["document"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_name"));
// Default strategy of reference includes is nest. No alias was provided, collection name becomes the field name.
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers_alias"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers_alias"].count("product_price"));
req_params = {
{"collection", "Products_alias"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "$Customers_alias(customer_id:=customer_a)"},
{"include_fields", "product_name, $Customers_alias(product_id, product_price)"},
{"exclude_fields", "$Customers_alias(product_id)"},
{"sort_by", "$Customers_alias(product_price: desc)"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ(2, res_obj["hits"][0]["document"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_name"));
ASSERT_EQ("shampoo", res_obj["hits"][0]["document"]["product_name"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("Customers_alias"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers_alias"].count("product_price"));
ASSERT_EQ(143, res_obj["hits"][0]["document"]["Customers_alias"]["product_price"]);
ASSERT_EQ(2, res_obj["hits"][1]["document"].size());
ASSERT_EQ(1, res_obj["hits"][1]["document"].count("product_name"));
ASSERT_EQ("soap", res_obj["hits"][1]["document"]["product_name"]);
ASSERT_EQ(1, res_obj["hits"][1]["document"].count("Customers_alias"));
ASSERT_EQ(1, res_obj["hits"][1]["document"]["Customers_alias"].count("product_price"));
ASSERT_EQ(73.5, res_obj["hits"][1]["document"]["Customers_alias"]["product_price"]);
req_params = {
{"collection", "Customers"},
{"q", "*"},
{"filter_by", "customer_name:= [Joe, Dan] && product_price:<100"},
{"include_fields", "$Products_alias(product_name, strategy:merge), product_price"},
{"sort_by", "$Products_alias(product_name:desc)"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(2, res_obj["found"].get<size_t>());
ASSERT_EQ(2, res_obj["hits"].size());
ASSERT_EQ(2, res_obj["hits"][0]["document"].size());
ASSERT_EQ("soap", res_obj["hits"][0]["document"].at("product_name"));
ASSERT_EQ(73.5, res_obj["hits"][0]["document"].at("product_price"));
ASSERT_EQ("shampoo", res_obj["hits"][1]["document"].at("product_name"));
ASSERT_EQ(75, res_obj["hits"][1]["document"].at("product_price"));
collectionManager.drop_collection("Customers");
// Alias in reference.
schema_json =
R"({
"name": "Customers",
"fields": [
{"name": "customer_id", "type": "string"},
{"name": "customer_name", "type": "string"},
{"name": "product_price", "type": "float"},
{"name": "product_id", "type": "string", "reference": "Products_alias.product_id"}
]
})"_json;
documents = {
R"({
"customer_id": "customer_a",
"customer_name": "Joe",
"product_price": 143,
"product_id": "product_a"
})"_json,
R"({
"customer_id": "customer_a",
"customer_name": "Joe",
"product_price": 73.5,
"product_id": "product_b"
})"_json,
R"({
"customer_id": "customer_b",
"customer_name": "Dan",
"product_price": 75,
"product_id": "product_a"
})"_json,
R"({
"customer_id": "customer_b",
"customer_name": "Dan",
"product_price": 140,
"product_id": "product_b"
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
req_params = {
{"collection", "Products"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "$Customers_alias(customer_id:=customer_a && product_price:<100)"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
// No fields are mentioned in `include_fields`, should include all fields of Products and Customers by default.
ASSERT_EQ(7, res_obj["hits"][0]["document"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_name"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_description"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("embedding"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("rating"));
// Default strategy of reference includes is nest. No alias was provided, collection name becomes the field name.
ASSERT_EQ(5, res_obj["hits"][0]["document"]["Customers_alias"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers_alias"].count("customer_id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers_alias"].count("customer_name"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers_alias"].count("id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers_alias"].count("product_id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers_alias"].count("product_price"));
// recreate collection manager to ensure that it initializes `referenced_in` correctly.
collectionManager.dispose();
delete store;
store = new Store(state_dir_path);
collectionManager.init(store, 1.0, "auth_key", quit);
auto load_op = collectionManager.load(8, 1000);
if(!load_op.ok()) {
LOG(ERROR) << load_op.error();
}
ASSERT_TRUE(load_op.ok());
// Reference field of Customers collection is referencing `Products_alias.product_id`. Alias resolution should happen
// in `CollectionManager::load`.
ASSERT_TRUE(collectionManager.get_collection("Products")->is_referenced_in("Customers"));
req_params = {
{"collection", "Products"},
{"q", "*"},
{"query_by", "product_name"},
{"filter_by", "$Customers_alias(customer_id:=customer_a && product_price:<100)"},
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
// No fields are mentioned in `include_fields`, should include all fields of Products and Customers by default.
ASSERT_EQ(7, res_obj["hits"][0]["document"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_name"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_description"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("embedding"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("rating"));
// Default strategy of reference includes is nest. No alias was provided, collection name becomes the field name.
ASSERT_EQ(5, res_obj["hits"][0]["document"]["Customers_alias"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers_alias"].count("customer_id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers_alias"].count("customer_name"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers_alias"].count("id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers_alias"].count("product_id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers_alias"].count("product_price"));
}
TEST_F(CollectionJoinTest, EmbeddedParamsJoin) {
std::string embedded_filter = "$Customers(customer_id:customer_a)",
query_filter = "$Customers(product_price:<100)";
ASSERT_TRUE(Join::merge_join_conditions(embedded_filter, query_filter));
ASSERT_TRUE(embedded_filter.empty());
ASSERT_EQ("$Customers((customer_id:customer_a) && product_price:<100)", query_filter);
{
embedded_filter = "($Customers(customer_id:customer_a) )";
query_filter = "$Customers(product_price:<100)";
ASSERT_TRUE(Join::merge_join_conditions(embedded_filter, query_filter));
ASSERT_TRUE(embedded_filter.empty());
ASSERT_EQ("$Customers((customer_id:customer_a) && product_price:<100)", query_filter);
embedded_filter = " ( $Customers(customer_id:customer_a) ) ";
query_filter = "$Customers(product_price:<100)";
ASSERT_TRUE(Join::merge_join_conditions(embedded_filter, query_filter));
ASSERT_TRUE(embedded_filter.empty());
ASSERT_EQ("$Customers((customer_id:customer_a) && product_price:<100)", query_filter);
embedded_filter = " ( $Customers((x:2 || y:4) && z: 10) ) ";
query_filter = "$Customers(product_price:<100)";
ASSERT_TRUE(Join::merge_join_conditions(embedded_filter, query_filter));
ASSERT_TRUE(embedded_filter.empty());
ASSERT_EQ("$Customers(((x:2 || y:4) && z: 10) && product_price:<100)", query_filter);
}
{
embedded_filter = "$Customers(customer_id:customer_a) && field:foo";
query_filter = "$Customers(product_price:<100)";
ASSERT_TRUE(Join::merge_join_conditions(embedded_filter, query_filter));
ASSERT_EQ("field:foo", embedded_filter);
ASSERT_EQ("$Customers((customer_id:customer_a) && product_price:<100)", query_filter);
embedded_filter = "( $Customers(customer_id:customer_a) ) && field:foo";
query_filter = "$Customers(product_price:<100)";
ASSERT_TRUE(Join::merge_join_conditions(embedded_filter, query_filter));
ASSERT_EQ("field:foo", embedded_filter);
ASSERT_EQ("$Customers((customer_id:customer_a) && product_price:<100)", query_filter);
embedded_filter = "($Customers(customer_id:customer_a))&&field:foo";
query_filter = "$Customers(product_price:<100)";
ASSERT_TRUE(Join::merge_join_conditions(embedded_filter, query_filter));
ASSERT_EQ("field:foo", embedded_filter);
ASSERT_EQ("$Customers((customer_id:customer_a) && product_price:<100)", query_filter);
embedded_filter = "($Customers(customer_id:customer_a)&&field:foo)";
query_filter = "$Customers(product_price:<100)";
ASSERT_TRUE(Join::merge_join_conditions(embedded_filter, query_filter));
ASSERT_EQ("(field:foo)", embedded_filter);
ASSERT_EQ("$Customers((customer_id:customer_a) && product_price:<100)", query_filter);
}
{
embedded_filter = "field:foo && $Customers(customer_id:customer_a) ";
query_filter = "$Customers(product_price:<100)";
ASSERT_TRUE(Join::merge_join_conditions(embedded_filter, query_filter));
ASSERT_EQ("field:foo", embedded_filter);
ASSERT_EQ("$Customers((customer_id:customer_a) && product_price:<100)", query_filter);
embedded_filter = "field:foo && ( $Customers(customer_id:customer_a) )";
query_filter = "$Customers(product_price:<100)";
ASSERT_TRUE(Join::merge_join_conditions(embedded_filter, query_filter));
ASSERT_EQ("field:foo", embedded_filter);
ASSERT_EQ("$Customers((customer_id:customer_a) && product_price:<100)", query_filter);
embedded_filter = "field:foo&&($Customers(customer_id:customer_a) )";
query_filter = "$Customers(product_price:<100)";
ASSERT_TRUE(Join::merge_join_conditions(embedded_filter, query_filter));
ASSERT_EQ("field:foo", embedded_filter);
ASSERT_EQ("$Customers((customer_id:customer_a) && product_price:<100)", query_filter);
}
{
embedded_filter = " ( $Customers(customer_id:customer_a) && $foo(field:value))";
query_filter = "$Customers(product_price:<100) && $foo(bar:baz)";
ASSERT_TRUE(Join::merge_join_conditions(embedded_filter, query_filter));
ASSERT_TRUE(embedded_filter.empty());
ASSERT_EQ("$Customers((customer_id:customer_a) && product_price:<100) && $foo((field:value) && bar:baz)", query_filter);
embedded_filter = "$Customers(customer_id:customer_a) && $foo(field:value)";
query_filter = "$Customers(product_price:<100) && $foo(bar:baz)";
ASSERT_TRUE(Join::merge_join_conditions(embedded_filter, query_filter));
ASSERT_TRUE(embedded_filter.empty());
ASSERT_EQ("$Customers((customer_id:customer_a) && product_price:<100) && $foo((field:value) && bar:baz)", query_filter);
embedded_filter = "$Customers(customer_id:customer_a)&&$foo( field:value )";
query_filter = "$Customers(product_price:<100) && $foo(bar:baz)";
ASSERT_TRUE(Join::merge_join_conditions(embedded_filter, query_filter));
ASSERT_TRUE(embedded_filter.empty());
ASSERT_EQ("$Customers((customer_id:customer_a) && product_price:<100) && $foo(( field:value ) && bar:baz)", query_filter);
}
{
embedded_filter = "field:value && ( $Customers(customer_id:customer_a) ) && foo:bar";
query_filter = "$Customers(product_price:<100)";
ASSERT_TRUE(Join::merge_join_conditions(embedded_filter, query_filter));
ASSERT_EQ("field:value && foo:bar", embedded_filter);
ASSERT_EQ("$Customers((customer_id:customer_a) && product_price:<100)", query_filter);
embedded_filter = "field:value&&$Customers(customer_id:customer_a)&&foo:bar";
query_filter = "$Customers(product_price:<100)";
ASSERT_TRUE(Join::merge_join_conditions(embedded_filter, query_filter));
ASSERT_EQ("field:value&&foo:bar", embedded_filter);
ASSERT_EQ("$Customers((customer_id:customer_a) && product_price:<100)", query_filter);
}
// Malformed inputs
{
embedded_filter = " (( $Customers(customer_id:customer_a) )) ";
query_filter = "$Customers(product_price:<100)";
ASSERT_FALSE(Join::merge_join_conditions(embedded_filter, query_filter));
embedded_filter = "$Customers(customer_id:customer_a)&&";
query_filter = "$Customers(product_price:<100)";
ASSERT_FALSE(Join::merge_join_conditions(embedded_filter, query_filter));
embedded_filter = "$Customers(customer_id)&&";
query_filter = "$Customers(product_price:<100)";
ASSERT_FALSE(Join::merge_join_conditions(embedded_filter, query_filter));
embedded_filter = "$Customers(custo";
query_filter = "$Customers(product_price:<100)";
ASSERT_FALSE(Join::merge_join_conditions(embedded_filter, query_filter));
embedded_filter = "field:value && $Customers(customer_id:customer_a) || foo:bar";
query_filter = "$Customers(product_price:<100)";
ASSERT_FALSE(Join::merge_join_conditions(embedded_filter, query_filter));
embedded_filter = "field:value && $Customers(customer_id:customer_a) || $Customers(foo:bar)";
query_filter = "$Customers(product_price:<100)";
ASSERT_FALSE(Join::merge_join_conditions(embedded_filter, query_filter));
embedded_filter = "field:value && $Customers(customer_id:customer_a) || foo:bar";
query_filter = "$Customers(product_price:<100) || $Customers(foo:bar)";
ASSERT_FALSE(Join::merge_join_conditions(embedded_filter, query_filter));
}
auto schema_json =
R"({
"name": "Products",
"fields": [
{"name": "product_id", "type": "string"},
{"name": "product_name", "type": "string"},
{"name": "product_description", "type": "string"},
{"name": "rating", "type": "int32"}
]
})"_json;
std::vector<nlohmann::json> documents = {
R"({
"product_id": "product_a",
"product_name": "shampoo",
"product_description": "Our new moisturizing shampoo is perfect for those with dry or damaged hair.",
"rating": "2"
})"_json,
R"({
"product_id": "product_b",
"product_name": "soap",
"product_description": "Introducing our all-natural, organic soap bar made with essential oils and botanical ingredients.",
"rating": "4"
})"_json
};
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Customers",
"fields": [
{"name": "customer_id", "type": "string"},
{"name": "customer_name", "type": "string"},
{"name": "product_price", "type": "float"},
{"name": "product_id", "type": "string", "reference": "Products.product_id"}
]
})"_json;
documents = {
R"({
"customer_id": "customer_a",
"customer_name": "Joe",
"product_price": 143,
"product_id": "product_a"
})"_json,
R"({
"customer_id": "customer_a",
"customer_name": "Joe",
"product_price": 73.5,
"product_id": "product_b"
})"_json,
R"({
"customer_id": "customer_b",
"customer_name": "Dan",
"product_price": 75,
"product_id": "product_a"
})"_json,
R"({
"customer_id": "customer_b",
"customer_name": "Dan",
"product_price": 140,
"product_id": "product_b"
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
if (!add_op.ok()) {
LOG(INFO) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
std::map<std::string, std::string> req_params = {
{"collection", "Products"},
{"q", "*"},
{"filter_by", "$Customers(product_price:<100)"},
};
nlohmann::json embedded_params = R"({
"filter_by": "$Customers(customer_id:customer_a) "
})"_json;
std::string json_res;
auto now_ts = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
auto search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
nlohmann::json res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
// No fields are mentioned in `include_fields`, should include all fields of Products and Customers by default.
ASSERT_EQ(6, res_obj["hits"][0]["document"].size());
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_name"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("product_description"));
ASSERT_EQ(1, res_obj["hits"][0]["document"].count("rating"));
// Default strategy of reference includes is nest. No alias was provided, collection name becomes the field name.
ASSERT_EQ(5, res_obj["hits"][0]["document"]["Customers"].size());
ASSERT_EQ("customer_a", res_obj["hits"][0]["document"]["Customers"]["customer_id"]);
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers"].count("customer_name"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers"].count("id"));
ASSERT_EQ(1, res_obj["hits"][0]["document"]["Customers"].count("product_id"));
ASSERT_EQ(73.5, res_obj["hits"][0]["document"]["Customers"]["product_price"]);
req_params = {
{"collection", "Products"},
{"q", "*"},
{"filter_by", "$Customers(product_price:<100)"},
};
embedded_params = R"({
"filter_by": "$Customers(customer_id:customer_a) || $Customers(customer_id:customer_a) "
})"_json;
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("Error applying search parameters inside Scoped Search API key", search_op.error());
req_params = {
{"collection", "Products"},
{"q", "*"},
{"filter_by", "$Customers(customer_id:customer_a) && $Customers(product_price:<100)"},
};
embedded_params.clear();
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("More than one joins found for collection `Customers` in the `filter_by`. Instead of providing separate "
"join conditions like `$customer_product_prices(customer_id:=customer_a) && "
"$customer_product_prices(custom_price:<100)`, the join condition should be provided as a single filter "
"expression like `$customer_product_prices(customer_id:=customer_a && custom_price:<100)`", search_op.error());
}
TEST_F(CollectionJoinTest, QueryByReference) {
auto schema_json =
R"({
"name": "Products",
"fields": [
{"name": "product_id", "type": "string"},
{"name": "product_name", "type": "string"},
{"name": "product_description", "type": "string"}
]
})"_json;
std::vector<nlohmann::json> documents = {
R"({
"product_id": "product_a",
"product_name": "shampoo",
"product_description": "Our new moisturizing shampoo is perfect for those with dry or damaged hair."
})"_json,
R"({
"product_id": "product_b",
"product_name": "soap",
"product_description": "Introducing our all-natural, organic soap bar made with essential oils and botanical ingredients."
})"_json
};
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
ASSERT_TRUE(add_op.ok());
}
schema_json =
R"({
"name": "Customers",
"fields": [
{"name": "customer_id", "type": "string"},
{"name": "customer_name", "type": "string", "sort": true},
{"name": "product_price", "type": "float"},
{"name": "product_id", "type": "string", "reference": "Products.product_id", "sort": true}
]
})"_json;
documents = {
R"({
"customer_id": "customer_a",
"customer_name": "Joe",
"product_price": 143,
"product_id": "product_a"
})"_json,
R"({
"customer_id": "customer_a",
"customer_name": "Joe",
"product_price": 73.5,
"product_id": "product_b"
})"_json,
R"({
"customer_id": "customer_b",
"customer_name": "Dan",
"product_price": 75,
"product_id": "product_a"
})"_json,
R"({
"customer_id": "customer_b",
"customer_name": "Dan",
"product_price": 140,
"product_id": "product_b"
})"_json
};
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
for (auto const &json: documents) {
auto add_op = collection_create_op.get()->add(json.dump());
ASSERT_TRUE(add_op.ok());
}
std::map<std::string, std::string> req_params = {
{"collection", "Products"},
{"q", "*"},
{"query_by", "$Customers(customer_name)"}
};
nlohmann::json embedded_params;
std::string json_res;
auto now_ts = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
auto search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("Query by reference is not yet supported.", search_op.error());
req_params = {
{"collection", "Products"},
{"q", "*"},
{"query_by", "$Customers(customer_name"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("Could not find `$Customers(customer_name` field in the schema.", search_op.error());
}
TEST_F(CollectionJoinTest, GetReferenceCollectionNames) {
std::string filter_query = "";
ref_include_collection_names_t* ref_includes = nullptr;
Join::get_reference_collection_names(filter_query, ref_includes);
ASSERT_TRUE(ref_includes->collection_names.empty());
ASSERT_EQ(nullptr, ref_includes->nested_include);
delete ref_includes;
ref_includes = nullptr;
filter_query = "foo";
Join::get_reference_collection_names(filter_query, ref_includes);
ASSERT_TRUE(ref_includes->collection_names.empty());
ASSERT_EQ(nullptr, ref_includes->nested_include);
delete ref_includes;
ref_includes = nullptr;
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
auto create_op = collectionManager.create_collection(schema);
ASSERT_TRUE(create_op.ok());
std::map<std::string, std::string> req_params = {
{"collection", "coll1"},
{"q", "*"},
{"filter_by", "title"},
};
nlohmann::json embedded_params;
std::string json_res;
auto now_ts = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
auto search_op_bool = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_FALSE(search_op_bool.ok());
ASSERT_EQ(search_op_bool.error(), "Could not parse the filter query.");
filter_query = "foo:bar";
Join::get_reference_collection_names(filter_query, ref_includes);
ASSERT_TRUE(ref_includes->collection_names.empty());
ASSERT_EQ(nullptr, ref_includes->nested_include);
delete ref_includes;
ref_includes = nullptr;
filter_query = "$foo(bar:baz) & age: <5";
Join::get_reference_collection_names(filter_query, ref_includes);
ASSERT_TRUE(ref_includes->collection_names.empty());
ASSERT_EQ(nullptr, ref_includes->nested_include);
delete ref_includes;
ref_includes = nullptr;
filter_query = "$foo(bar:baz)";
Join::get_reference_collection_names(filter_query, ref_includes);
ASSERT_EQ(1, ref_includes->collection_names.size());
ASSERT_EQ(1, ref_includes->collection_names.count("foo"));
ASSERT_EQ(nullptr, ref_includes->nested_include);
delete ref_includes;
ref_includes = nullptr;
filter_query = "((age: <5 || age: >10) && category:= [shoes]) &&"
" $Customers(customer_id:=customer_a && (product_price:>100 && product_price:<200))";
Join::get_reference_collection_names(filter_query, ref_includes);
ASSERT_EQ(1, ref_includes->collection_names.size());
ASSERT_EQ(1, ref_includes->collection_names.count("Customers"));
ASSERT_EQ(nullptr, ref_includes->nested_include);
delete ref_includes;
ref_includes = nullptr;
filter_query = "$product_variants( $inventory($retailers(location:(33.865,-118.375,100 km))))";
Join::get_reference_collection_names(filter_query, ref_includes);
ASSERT_EQ(1, ref_includes->collection_names.size());
ASSERT_EQ(1, ref_includes->collection_names.count("product_variants"));
ASSERT_EQ(1, ref_includes->nested_include->collection_names.size());
ASSERT_EQ(1, ref_includes->nested_include->collection_names.count("inventory"));
ASSERT_EQ(1, ref_includes->nested_include->nested_include->collection_names.size());
ASSERT_EQ(1, ref_includes->nested_include->nested_include->collection_names.count("retailers"));
ASSERT_EQ(nullptr, ref_includes->nested_include->nested_include->nested_include);
delete ref_includes;
ref_includes = nullptr;
filter_query = "$product_variants( $inventory(id:*) && $retailers(location:(33.865,-118.375,100 km)))";
Join::get_reference_collection_names(filter_query, ref_includes);
ASSERT_EQ(1, ref_includes->collection_names.size());
ASSERT_EQ(1, ref_includes->collection_names.count("product_variants"));
ASSERT_EQ(2, ref_includes->nested_include->collection_names.size());
ASSERT_EQ(1, ref_includes->nested_include->collection_names.count("inventory"));
ASSERT_EQ(1, ref_includes->nested_include->collection_names.count("retailers"));
ASSERT_EQ(nullptr, ref_includes->nested_include->nested_include);
delete ref_includes;
ref_includes = nullptr;
}
TEST_F(CollectionJoinTest, InitializeRefIncludeExcludeFields) {
std::string filter_query = "";
std::vector<std::string> include_fields_vec, exclude_fields_vec;
std::vector<ref_include_exclude_fields> ref_include_exclude_fields_vec;
auto initialize_op = Join::initialize_ref_include_exclude_fields_vec(filter_query, include_fields_vec,
exclude_fields_vec,
ref_include_exclude_fields_vec);
ASSERT_TRUE(initialize_op.ok());
ASSERT_TRUE(ref_include_exclude_fields_vec.empty());
filter_query = "$foo(bar:baz)";
exclude_fields_vec = {"$foo(bar)"};
initialize_op = Join::initialize_ref_include_exclude_fields_vec(filter_query, include_fields_vec,
exclude_fields_vec,
ref_include_exclude_fields_vec);
ASSERT_TRUE(initialize_op.ok());
ASSERT_EQ(1, ref_include_exclude_fields_vec.size());
ASSERT_EQ("foo", ref_include_exclude_fields_vec[0].collection_name);
ASSERT_TRUE(ref_include_exclude_fields_vec[0].include_fields.empty());
ASSERT_EQ("bar", ref_include_exclude_fields_vec[0].exclude_fields);
ASSERT_TRUE(ref_include_exclude_fields_vec[0].alias.empty());
ASSERT_EQ(ref_include::nest, ref_include_exclude_fields_vec[0].strategy);
ASSERT_TRUE(ref_include_exclude_fields_vec[0].nested_join_includes.empty());
ref_include_exclude_fields_vec.clear();
exclude_fields_vec.clear();
filter_query = "";
include_fields_vec = {"$Customers(product_price, strategy: foo) as customers"};
initialize_op = Join::initialize_ref_include_exclude_fields_vec(filter_query, include_fields_vec,
exclude_fields_vec,
ref_include_exclude_fields_vec);
ASSERT_FALSE(initialize_op.ok());
ASSERT_EQ("Error parsing `$Customers(product_price, strategy: foo) as customers`: Unknown include strategy `foo`. "
"Valid options are `merge`, `nest`, `nest_array`.", initialize_op.error());
include_fields_vec = {"$Customers(product_price, foo: bar) as customers"};
initialize_op = Join::initialize_ref_include_exclude_fields_vec(filter_query, include_fields_vec,
exclude_fields_vec,
ref_include_exclude_fields_vec);
ASSERT_FALSE(initialize_op.ok());
ASSERT_EQ("Unknown reference `include_fields` parameter: `foo`.", initialize_op.error());
filter_query = "$Customers(customer_id:=customer_a && (product_price:>100 && product_price:<200))";
include_fields_vec = {"$Customers(product_price, strategy: merge) as customers"};
initialize_op = Join::initialize_ref_include_exclude_fields_vec(filter_query, include_fields_vec,
exclude_fields_vec,
ref_include_exclude_fields_vec);
ASSERT_TRUE(initialize_op.ok());
ASSERT_EQ(1, ref_include_exclude_fields_vec.size());
ASSERT_EQ("Customers", ref_include_exclude_fields_vec[0].collection_name);
ASSERT_EQ("product_price", ref_include_exclude_fields_vec[0].include_fields);
ASSERT_EQ("customers.", ref_include_exclude_fields_vec[0].alias);
ASSERT_EQ(ref_include::merge, ref_include_exclude_fields_vec[0].strategy);
ASSERT_TRUE(ref_include_exclude_fields_vec[0].nested_join_includes.empty());
ref_include_exclude_fields_vec.clear();
filter_query = "$Customers(customer_id:=customer_a && (product_price:>100 && product_price:<200))";
include_fields_vec = {"$Customers(product_price, strategy: nest_array) as customers"};
initialize_op = Join::initialize_ref_include_exclude_fields_vec(filter_query, include_fields_vec,
exclude_fields_vec,
ref_include_exclude_fields_vec);
ASSERT_TRUE(initialize_op.ok());
ASSERT_EQ(1, ref_include_exclude_fields_vec.size());
ASSERT_EQ("Customers", ref_include_exclude_fields_vec[0].collection_name);
ASSERT_EQ("product_price", ref_include_exclude_fields_vec[0].include_fields);
ASSERT_EQ("customers", ref_include_exclude_fields_vec[0].alias);
ASSERT_EQ(ref_include::nest_array, ref_include_exclude_fields_vec[0].strategy);
ASSERT_TRUE(ref_include_exclude_fields_vec[0].nested_join_includes.empty());
ref_include_exclude_fields_vec.clear();
filter_query = "$product_variants( $inventory($retailers(location:(33.865,-118.375,100 km))))";
include_fields_vec = {"$product_variants(id,$inventory(qty,sku,$retailers(id,title)))"};
initialize_op = Join::initialize_ref_include_exclude_fields_vec(filter_query, include_fields_vec,
exclude_fields_vec,
ref_include_exclude_fields_vec);
ASSERT_TRUE(initialize_op.ok());
ASSERT_EQ(1, ref_include_exclude_fields_vec.size());
ASSERT_EQ("product_variants", ref_include_exclude_fields_vec[0].collection_name);
ASSERT_EQ("id,", ref_include_exclude_fields_vec[0].include_fields);
ASSERT_TRUE(ref_include_exclude_fields_vec[0].alias.empty());
ASSERT_EQ(ref_include::nest, ref_include_exclude_fields_vec[0].strategy);
auto nested_include_excludes = ref_include_exclude_fields_vec[0].nested_join_includes;
ASSERT_EQ("inventory", nested_include_excludes[0].collection_name);
ASSERT_EQ("qty,sku,", nested_include_excludes[0].include_fields);
ASSERT_TRUE(nested_include_excludes[0].alias.empty());
ASSERT_EQ(ref_include::nest, nested_include_excludes[0].strategy);
nested_include_excludes = ref_include_exclude_fields_vec[0].nested_join_includes[0].nested_join_includes;
ASSERT_EQ("retailers", nested_include_excludes[0].collection_name);
ASSERT_EQ("id,title", nested_include_excludes[0].include_fields);
ASSERT_TRUE(nested_include_excludes[0].alias.empty());
ASSERT_EQ(ref_include::nest, ref_include_exclude_fields_vec[0].strategy);
ref_include_exclude_fields_vec.clear();
filter_query = "$product_variants( $inventory($retailers(location:(33.865,-118.375,100 km))))";
include_fields_vec = {"$product_variants(title, $inventory(qty, strategy:merge) as inventory, strategy: nest) as variants"};
initialize_op = Join::initialize_ref_include_exclude_fields_vec(filter_query, include_fields_vec,
exclude_fields_vec,
ref_include_exclude_fields_vec);
ASSERT_TRUE(initialize_op.ok());
ASSERT_EQ(1, ref_include_exclude_fields_vec.size());
ASSERT_EQ("product_variants", ref_include_exclude_fields_vec[0].collection_name);
ASSERT_EQ("title", ref_include_exclude_fields_vec[0].include_fields);
ASSERT_EQ("variants", ref_include_exclude_fields_vec[0].alias);
ASSERT_EQ(ref_include::nest, ref_include_exclude_fields_vec[0].strategy);
nested_include_excludes = ref_include_exclude_fields_vec[0].nested_join_includes;
ASSERT_EQ("inventory", nested_include_excludes[0].collection_name);
ASSERT_EQ("qty", nested_include_excludes[0].include_fields);
ASSERT_EQ("inventory.", nested_include_excludes[0].alias);
ASSERT_EQ(ref_include::merge, nested_include_excludes[0].strategy);
nested_include_excludes = ref_include_exclude_fields_vec[0].nested_join_includes[0].nested_join_includes;
ASSERT_EQ("retailers", nested_include_excludes[0].collection_name);
ASSERT_TRUE(nested_include_excludes[0].include_fields.empty());
ASSERT_TRUE(nested_include_excludes[0].alias.empty());
ASSERT_EQ(ref_include::nest, ref_include_exclude_fields_vec[0].strategy);
ref_include_exclude_fields_vec.clear();
filter_query = "$product_variants( $inventory(id:*) && $retailers(location:(33.865,-118.375,100 km)))";
include_fields_vec = {"$product_variants(title, $inventory(qty, strategy:merge) as inventory,"
" $retailers(title), strategy: merge) as variants"};
initialize_op = Join::initialize_ref_include_exclude_fields_vec(filter_query, include_fields_vec,
exclude_fields_vec,
ref_include_exclude_fields_vec);
ASSERT_TRUE(initialize_op.ok());
ASSERT_EQ(1, ref_include_exclude_fields_vec.size());
ASSERT_EQ("product_variants", ref_include_exclude_fields_vec[0].collection_name);
ASSERT_EQ("title", ref_include_exclude_fields_vec[0].include_fields);
ASSERT_EQ("variants.", ref_include_exclude_fields_vec[0].alias);
ASSERT_EQ(ref_include::merge, ref_include_exclude_fields_vec[0].strategy);
nested_include_excludes = ref_include_exclude_fields_vec[0].nested_join_includes;
ASSERT_EQ("inventory", nested_include_excludes[0].collection_name);
ASSERT_EQ("qty", nested_include_excludes[0].include_fields);
ASSERT_EQ("inventory.", nested_include_excludes[0].alias);
ASSERT_EQ(ref_include::merge, nested_include_excludes[0].strategy);
ASSERT_EQ("retailers", nested_include_excludes[1].collection_name);
ASSERT_EQ("title", nested_include_excludes[1].include_fields);
ASSERT_TRUE(nested_include_excludes[1].alias.empty());
ASSERT_EQ(ref_include::nest, nested_include_excludes[1].strategy);
ref_include_exclude_fields_vec.clear();
filter_query = "$product_variants( $inventory(id:*) && $retailers(location:(33.865,-118.375,100 km)))";
include_fields_vec = {"$product_variants(title, $inventory(qty, strategy:merge) as inventory, description,"
" $retailers(title), foo, strategy: merge) as variants"};
initialize_op = Join::initialize_ref_include_exclude_fields_vec(filter_query, include_fields_vec,
exclude_fields_vec,
ref_include_exclude_fields_vec);
ASSERT_TRUE(initialize_op.ok());
ASSERT_EQ(1, ref_include_exclude_fields_vec.size());
ASSERT_EQ("product_variants", ref_include_exclude_fields_vec[0].collection_name);
ASSERT_EQ("title, description, foo", ref_include_exclude_fields_vec[0].include_fields);
ASSERT_EQ("variants.", ref_include_exclude_fields_vec[0].alias);
ASSERT_EQ(ref_include::merge, ref_include_exclude_fields_vec[0].strategy);
nested_include_excludes = ref_include_exclude_fields_vec[0].nested_join_includes;
ASSERT_EQ("inventory", nested_include_excludes[0].collection_name);
ASSERT_EQ("qty", nested_include_excludes[0].include_fields);
ASSERT_EQ("inventory.", nested_include_excludes[0].alias);
ASSERT_EQ(ref_include::merge, nested_include_excludes[0].strategy);
ASSERT_EQ("retailers", nested_include_excludes[1].collection_name);
ASSERT_EQ("title", nested_include_excludes[1].include_fields);
ASSERT_TRUE(nested_include_excludes[1].alias.empty());
ASSERT_EQ(ref_include::nest, nested_include_excludes[1].strategy);
ref_include_exclude_fields_vec.clear();
filter_query = "$Customers(customer_id:=customer_a && (product_price:>100 && product_price:<200))";
include_fields_vec.clear();
exclude_fields_vec = {"$Customers(product_price)"};
initialize_op = Join::initialize_ref_include_exclude_fields_vec(filter_query, include_fields_vec,
exclude_fields_vec,
ref_include_exclude_fields_vec);
ASSERT_TRUE(initialize_op.ok());
ASSERT_EQ(1, ref_include_exclude_fields_vec.size());
ASSERT_EQ("Customers", ref_include_exclude_fields_vec[0].collection_name);
ASSERT_TRUE(ref_include_exclude_fields_vec[0].include_fields.empty());
ASSERT_EQ("product_price", ref_include_exclude_fields_vec[0].exclude_fields);
ASSERT_TRUE(ref_include_exclude_fields_vec[0].alias.empty());
ASSERT_EQ(ref_include::nest, ref_include_exclude_fields_vec[0].strategy);
ASSERT_TRUE(ref_include_exclude_fields_vec[0].nested_join_includes.empty());
ref_include_exclude_fields_vec.clear();
filter_query = "$product_variants( $inventory(id:*) && $retailers(location:(33.865,-118.375,100 km)))";
include_fields_vec.clear();
exclude_fields_vec = {"$product_variants(title, $inventory(qty), description, $retailers(title), foo)"};
initialize_op = Join::initialize_ref_include_exclude_fields_vec(filter_query, include_fields_vec,
exclude_fields_vec,
ref_include_exclude_fields_vec);
ASSERT_TRUE(initialize_op.ok());
ASSERT_EQ(1, ref_include_exclude_fields_vec.size());
ASSERT_EQ("product_variants", ref_include_exclude_fields_vec[0].collection_name);
ASSERT_TRUE(ref_include_exclude_fields_vec[0].include_fields.empty());
ASSERT_EQ("title, description, foo", ref_include_exclude_fields_vec[0].exclude_fields);
ASSERT_TRUE(ref_include_exclude_fields_vec[0].alias.empty());
ASSERT_EQ(ref_include::nest, ref_include_exclude_fields_vec[0].strategy);
nested_include_excludes = ref_include_exclude_fields_vec[0].nested_join_includes;
ASSERT_EQ("inventory", nested_include_excludes[0].collection_name);
ASSERT_TRUE(nested_include_excludes[0].include_fields.empty());
ASSERT_EQ("qty", nested_include_excludes[0].exclude_fields);
ASSERT_TRUE(nested_include_excludes[0].alias.empty());
ASSERT_EQ(ref_include::nest, nested_include_excludes[0].strategy);
ASSERT_EQ("retailers", nested_include_excludes[1].collection_name);
ASSERT_TRUE(nested_include_excludes[1].include_fields.empty());
ASSERT_EQ("title", nested_include_excludes[1].exclude_fields);
ASSERT_TRUE(nested_include_excludes[1].alias.empty());
ASSERT_EQ(ref_include::nest, nested_include_excludes[1].strategy);
ref_include_exclude_fields_vec.clear();
filter_query = "$product_variants( $inventory($retailers(location:(33.865,-118.375,100 km))))";
include_fields_vec = {"$product_variants(title, $inventory(qty, strategy:merge) as inventory, strategy: nest) as variants"};
exclude_fields_vec = {"$product_variants(title, $inventory(qty, $retailers(title)))"};
initialize_op = Join::initialize_ref_include_exclude_fields_vec(filter_query, include_fields_vec,
exclude_fields_vec,
ref_include_exclude_fields_vec);
ASSERT_TRUE(initialize_op.ok());
ASSERT_EQ(1, ref_include_exclude_fields_vec.size());
ASSERT_EQ("product_variants", ref_include_exclude_fields_vec[0].collection_name);
ASSERT_EQ("title", ref_include_exclude_fields_vec[0].include_fields);
ASSERT_EQ("title,", ref_include_exclude_fields_vec[0].exclude_fields);
ASSERT_EQ("variants", ref_include_exclude_fields_vec[0].alias);
ASSERT_EQ(ref_include::nest, ref_include_exclude_fields_vec[0].strategy);
nested_include_excludes = ref_include_exclude_fields_vec[0].nested_join_includes;
ASSERT_EQ("inventory", nested_include_excludes[0].collection_name);
ASSERT_EQ("qty", nested_include_excludes[0].include_fields);
ASSERT_EQ("qty,", nested_include_excludes[0].exclude_fields);
ASSERT_EQ("inventory.", nested_include_excludes[0].alias);
ASSERT_EQ(ref_include::merge, nested_include_excludes[0].strategy);
nested_include_excludes = ref_include_exclude_fields_vec[0].nested_join_includes[0].nested_join_includes;
ASSERT_EQ("retailers", nested_include_excludes[0].collection_name);
ASSERT_TRUE(nested_include_excludes[0].include_fields.empty());
ASSERT_EQ("title", nested_include_excludes[0].exclude_fields);
ASSERT_TRUE(nested_include_excludes[0].alias.empty());
ASSERT_EQ(ref_include::nest, ref_include_exclude_fields_vec[0].strategy);
ref_include_exclude_fields_vec.clear();
}
| 381,145
|
C++
|
.cpp
| 7,864
| 38.23881
| 170
| 0.541063
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,741
|
geo_filtering_old_test.cpp
|
typesense_typesense/test/geo_filtering_old_test.cpp
|
#include <gtest/gtest.h>
#include <string>
#include <vector>
#include <fstream>
#include <algorithm>
#include <collection_manager.h>
#include "collection.h"
class GeoFilteringOldTest : public ::testing::Test {
protected:
Store *store;
CollectionManager & collectionManager = CollectionManager::get_instance();
std::atomic<bool> quit = false;
std::vector<std::string> query_fields;
std::vector<sort_by> sort_fields;
void setupCollection() {
std::string state_dir_path = "/tmp/typesense_test/collection_filtering";
LOG(INFO) << "Truncating and creating: " << state_dir_path;
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
store = new Store(state_dir_path);
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
}
virtual void SetUp() {
setupCollection();
}
virtual void TearDown() {
collectionManager.dispose();
delete store;
}
};
TEST_F(GeoFilteringOldTest, GeoPointFiltering) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("loc", field_types::GEOPOINT, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"Palais Garnier", "48.872576479306765, 2.332291112241466"},
{"Sacre Coeur", "48.888286721920934, 2.342340862419206"},
{"Arc de Triomphe", "48.87538726829884, 2.296113163780903"},
{"Place de la Concorde", "48.86536119187326, 2.321850747347093"},
{"Louvre Musuem", "48.86065813197502, 2.3381285349616725"},
{"Les Invalides", "48.856648379569904, 2.3118555692631357"},
{"Eiffel Tower", "48.85821022164442, 2.294239067890161"},
{"Notre-Dame de Paris", "48.852455825574495, 2.35071182406452"},
{"Musee Grevin", "48.872370541246816, 2.3431536410008906"},
{"Pantheon", "48.84620987789056, 2.345152755563131"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
std::vector<std::string> lat_lng;
StringUtils::split(records[i][1], lat_lng, ", ");
double lat = std::stod(lat_lng[0]);
double lng = std::stod(lat_lng[1]);
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["loc"] = {lat, lng};
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
// pick a location close to only the Sacre Coeur
auto results = coll1->search("*",
{}, "loc: (48.90615915923891, 2.3435897727061175, 3 km)",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_STREQ("1", results["hits"][0]["document"]["id"].get<std::string>().c_str());
results = coll1->search("*", {}, "loc: (48.90615, 2.34358, 1 km) || "
"loc: (48.8462, 2.34515, 1 km)",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(2, results["found"].get<size_t>());
// pick location close to none of the spots
results = coll1->search("*",
{}, "loc: (48.910544830985785, 2.337218333651177, 2 km)",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(0, results["found"].get<size_t>());
// pick a large radius covering all points
results = coll1->search("*",
{}, "loc: (48.910544830985785, 2.337218333651177, 20 km)",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(10, results["found"].get<size_t>());
// 1 mile radius
results = coll1->search("*",
{}, "loc: (48.85825332869331, 2.303816427653377, 1 mi)",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(3, results["found"].get<size_t>());
ASSERT_STREQ("6", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("5", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("3", results["hits"][2]["document"]["id"].get<std::string>().c_str());
// when geo query had NaN
auto gop = coll1->search("*", {}, "loc: (NaN, nan, 1 mi)",
{}, {}, {0}, 10, 1, FREQUENCY);
ASSERT_FALSE(gop.ok());
ASSERT_EQ("Value of filter field `loc`: must be in the `(-44.50, 170.29, 0.75 km)` or "
"(56.33, -65.97, 23.82, -127.82) format.", gop.error());
// when geo field is formatted as string, show meaningful error
nlohmann::json bad_doc;
bad_doc["id"] = "1000";
bad_doc["title"] = "Test record";
bad_doc["loc"] = {"48.91", "2.33"};
bad_doc["points"] = 1000;
auto add_op = coll1->add(bad_doc.dump(), CREATE, "", DIRTY_VALUES::REJECT);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `loc` must be a geopoint.", add_op.error());
bad_doc["loc"] = "foobar";
add_op = coll1->add(bad_doc.dump(), CREATE, "", DIRTY_VALUES::REJECT);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `loc` must be a 2 element array: [lat, lng].", add_op.error());
bad_doc["loc"] = "loc: (48.910544830985785, 2.337218333651177, 2k)";
add_op = coll1->add(bad_doc.dump(), CREATE, "", DIRTY_VALUES::REJECT);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `loc` must be a 2 element array: [lat, lng].", add_op.error());
bad_doc["loc"] = "loc: (48.910544830985785, 2.337218333651177, 2)";
add_op = coll1->add(bad_doc.dump(), CREATE, "", DIRTY_VALUES::REJECT);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `loc` must be a 2 element array: [lat, lng].", add_op.error());
bad_doc["loc"] = {"foo", "bar"};
add_op = coll1->add(bad_doc.dump(), CREATE, "", DIRTY_VALUES::COERCE_OR_REJECT);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `loc` must be a geopoint.", add_op.error());
bad_doc["loc"] = {"2.33", "bar"};
add_op = coll1->add(bad_doc.dump(), CREATE, "", DIRTY_VALUES::COERCE_OR_REJECT);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `loc` must be a geopoint.", add_op.error());
bad_doc["loc"] = {"foo", "2.33"};
add_op = coll1->add(bad_doc.dump(), CREATE, "", DIRTY_VALUES::COERCE_OR_REJECT);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `loc` must be a geopoint.", add_op.error());
// under coercion mode, it should work
bad_doc["loc"] = {"48.91", "2.33"};
add_op = coll1->add(bad_doc.dump(), CREATE, "", DIRTY_VALUES::COERCE_OR_REJECT);
ASSERT_TRUE(add_op.ok());
collectionManager.drop_collection("coll1");
}
TEST_F(GeoFilteringOldTest, GeoPointArrayFiltering) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("loc", field_types::GEOPOINT_ARRAY, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::vector<std::string>>> records = {
{ {"Alpha Inc", "Ennore", "13.22112, 80.30511"},
{"Alpha Inc", "Velachery", "12.98973, 80.23095"}
},
{
{"Veera Inc", "Thiruvallur", "13.12752, 79.90136"},
},
{
{"B1 Inc", "Bengaluru", "12.98246, 77.5847"},
{"B1 Inc", "Hosur", "12.74147, 77.82915"},
{"B1 Inc", "Vellore", "12.91866, 79.13075"},
},
{
{"M Inc", "Nashik", "20.11282, 73.79458"},
{"M Inc", "Pune", "18.56309, 73.855"},
}
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = records[i][0][0];
doc["points"] = i;
std::vector<std::vector<double>> lat_lngs;
for(size_t k = 0; k < records[i].size(); k++) {
std::vector<std::string> lat_lng_str;
StringUtils::split(records[i][k][2], lat_lng_str, ", ");
std::vector<double> lat_lng = {
std::stod(lat_lng_str[0]),
std::stod(lat_lng_str[1])
};
lat_lngs.push_back(lat_lng);
}
doc["loc"] = lat_lngs;
auto add_op = coll1->add(doc.dump());
ASSERT_TRUE(add_op.ok());
}
// pick a location close to Chennai
auto results = coll1->search("*",
{}, "loc: (13.12631, 80.20252, 100km)",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
ASSERT_STREQ("1", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("0", results["hits"][1]["document"]["id"].get<std::string>().c_str());
// pick location close to none of the spots
results = coll1->search("*",
{}, "loc: (13.62601, 79.39559, 10 km)",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(0, results["found"].get<size_t>());
// pick a large radius covering all points
results = coll1->search("*",
{}, "loc: (21.20714729927276, 78.99153966917213, 1000 km)",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(4, results["found"].get<size_t>());
// 1 mile radius
results = coll1->search("*",
{}, "loc: (12.98941, 80.23073, 1mi)",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
// when geo field is formatted badly, show meaningful error
nlohmann::json bad_doc;
bad_doc["id"] = "1000";
bad_doc["title"] = "Test record";
bad_doc["loc"] = {"48.91", "2.33"};
bad_doc["points"] = 1000;
auto add_op = coll1->add(bad_doc.dump(), CREATE, "", DIRTY_VALUES::REJECT);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `loc` must contain 2 element arrays: [ [lat, lng],... ].", add_op.error());
bad_doc["loc"] = "foobar";
add_op = coll1->add(bad_doc.dump(), CREATE, "", DIRTY_VALUES::REJECT);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `loc` must be an array.", add_op.error());
bad_doc["loc"] = nlohmann::json::array();
nlohmann::json points = nlohmann::json::array();
points.push_back("foo");
points.push_back("bar");
bad_doc["loc"].push_back(points);
add_op = coll1->add(bad_doc.dump(), CREATE, "", DIRTY_VALUES::COERCE_OR_REJECT);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `loc` must be an array of geopoint.", add_op.error());
bad_doc["loc"][0][0] = "2.33";
bad_doc["loc"][0][1] = "bar";
add_op = coll1->add(bad_doc.dump(), CREATE, "", DIRTY_VALUES::COERCE_OR_REJECT);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `loc` must be an array of geopoint.", add_op.error());
bad_doc["loc"][0][0] = "foo";
bad_doc["loc"][0][1] = "2.33";
add_op = coll1->add(bad_doc.dump(), CREATE, "", DIRTY_VALUES::COERCE_OR_REJECT);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `loc` must be an array of geopoint.", add_op.error());
// under coercion mode, it should work
bad_doc["loc"][0][0] = "48.91";
bad_doc["loc"][0][1] = "2.33";
add_op = coll1->add(bad_doc.dump(), CREATE, "", DIRTY_VALUES::COERCE_OR_REJECT);
ASSERT_TRUE(add_op.ok());
collectionManager.drop_collection("coll1");
}
TEST_F(GeoFilteringOldTest, GeoPointRemoval) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("loc1", field_types::GEOPOINT, false),
field("loc2", field_types::GEOPOINT_ARRAY, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "Palais Garnier";
doc["loc1"] = {48.872576479306765, 2.332291112241466};
doc["loc2"] = nlohmann::json::array();
doc["loc2"][0] = {48.84620987789056, 2.345152755563131};
doc["points"] = 100;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results = coll1->search("*",
{}, "loc1: (48.87491151802846, 2.343945883701618, 1 km)",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
results = coll1->search("*",
{}, "loc2: (48.87491151802846, 2.343945883701618, 10 km)",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
// remove the document, index another document and try querying again
coll1->remove("0");
doc["id"] = "1";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
results = coll1->search("*",
{}, "loc1: (48.87491151802846, 2.343945883701618, 1 km)",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
results = coll1->search("*",
{}, "loc2: (48.87491151802846, 2.343945883701618, 10 km)",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
}
TEST_F(GeoFilteringOldTest, GeoPolygonFiltering) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("loc", field_types::GEOPOINT, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"Palais Garnier", "48.872576479306765, 2.332291112241466"},
{"Sacre Coeur", "48.888286721920934, 2.342340862419206"},
{"Arc de Triomphe", "48.87538726829884, 2.296113163780903"},
{"Place de la Concorde", "48.86536119187326, 2.321850747347093"},
{"Louvre Musuem", "48.86065813197502, 2.3381285349616725"},
{"Les Invalides", "48.856648379569904, 2.3118555692631357"},
{"Eiffel Tower", "48.85821022164442, 2.294239067890161"},
{"Notre-Dame de Paris", "48.852455825574495, 2.35071182406452"},
{"Musee Grevin", "48.872370541246816, 2.3431536410008906"},
{"Pantheon", "48.84620987789056, 2.345152755563131"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
std::vector<std::string> lat_lng;
StringUtils::split(records[i][1], lat_lng, ", ");
double lat = std::stod(lat_lng[0]);
double lng = std::stod(lat_lng[1]);
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["loc"] = {lat, lng};
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
// pick a location close to only the Sacre Coeur
auto results = coll1->search("*",
{}, "loc: (48.875223042424125,2.323509661928681, "
"48.85745408145392, 2.3267084486160856, "
"48.859636574404355,2.351469427048221, "
"48.87756059389807, 2.3443610121873206)",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(3, results["found"].get<size_t>());
ASSERT_EQ(3, results["hits"].size());
ASSERT_STREQ("8", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("4", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("0", results["hits"][2]["document"]["id"].get<std::string>().c_str());
// should work even if points of polygon are clockwise
results = coll1->search("*",
{}, "loc: (48.87756059389807, 2.3443610121873206, "
"48.859636574404355,2.351469427048221, "
"48.85745408145392, 2.3267084486160856, "
"48.875223042424125,2.323509661928681)",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(3, results["found"].get<size_t>());
ASSERT_EQ(3, results["hits"].size());
auto search_op = coll1->search("*", {}, "loc: (10, 20, 11, 12, 14, 16, 10, 20, 11, 40)", {}, {}, {0}, 10, 1,
FREQUENCY);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("Polygon is invalid: Edge 2 has duplicate vertex with edge 4", search_op.error());
search_op = coll1->search("*", {}, "loc: (10, 20, 11, 12, 14, 16, 10, 20)", {}, {}, {0}, 10, 1,
FREQUENCY);
ASSERT_TRUE(search_op.ok());
ASSERT_EQ(0, search_op.get()["found"].get<size_t>());
collectionManager.drop_collection("coll1");
}
TEST_F(GeoFilteringOldTest, GeoPolygonFilteringSouthAmerica) {
Collection *coll1;
std::vector<field> fields = {field("title", field_types::STRING, false),
field("loc", field_types::GEOPOINT, false),
field("points", field_types::INT32, false),};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
}
std::vector<std::vector<std::string>> records = {
{"North of Equator", "4.48615, -71.38049"},
{"South of Equator", "-8.48587, -71.02892"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
std::vector<std::string> lat_lng;
StringUtils::split(records[i][1], lat_lng, ", ");
double lat = std::stod(lat_lng[0]);
double lng = std::stod(lat_lng[1]);
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["loc"] = {lat, lng};
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
// pick a polygon that covers both points
auto results = coll1->search("*",
{}, "loc: (13.3163, -82.3585, "
"-29.134, -82.3585, "
"-29.134, -59.8528, "
"13.3163, -59.8528)",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
collectionManager.drop_collection("coll1");
}
TEST_F(GeoFilteringOldTest, GeoPointFilteringWithNonSortableLocationField) {
std::vector<field> fields = {field("title", field_types::STRING, false),
field("loc", field_types::GEOPOINT, false),
field("points", field_types::INT32, false),};
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string", "sort": false},
{"name": "loc", "type": "geopoint", "sort": true},
{"name": "points", "type": "int32", "sort": false}
]
})"_json;
auto coll_op = collectionManager.create_collection(schema);
ASSERT_TRUE(coll_op.ok());
Collection* coll1 = coll_op.get();
std::vector<std::vector<std::string>> records = {
{"Palais Garnier", "48.872576479306765, 2.332291112241466"},
{"Sacre Coeur", "48.888286721920934, 2.342340862419206"},
{"Arc de Triomphe", "48.87538726829884, 2.296113163780903"},
};
for(size_t i=0; i<records.size(); i++) {
nlohmann::json doc;
std::vector<std::string> lat_lng;
StringUtils::split(records[i][1], lat_lng, ", ");
double lat = std::stod(lat_lng[0]);
double lng = std::stod(lat_lng[1]);
doc["id"] = std::to_string(i);
doc["title"] = records[i][0];
doc["loc"] = {lat, lng};
doc["points"] = i;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
// pick a location close to only the Sacre Coeur
auto results = coll1->search("*",
{}, "loc: (48.90615915923891, 2.3435897727061175, 3 km)",
{}, {}, {0}, 10, 1, FREQUENCY).get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
}
| 21,398
|
C++
|
.cpp
| 422
| 40
| 112
| 0.540465
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,742
|
conversation_test.cpp
|
typesense_typesense/test/conversation_test.cpp
|
#include <gtest/gtest.h>
#include "conversation_manager.h"
class ConversationTest : public ::testing::Test {
protected:
CollectionManager & collectionManager = CollectionManager::get_instance();
Store* store;
std::atomic<bool> quit = false;
nlohmann::json model = R"({
"id": "0",
"history_collection": "conversation_store",
"ttl": 86400
})"_json;
void SetUp() override {
std::string state_dir_path = "/tmp/typesense_test/conversation_test";
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
store = new Store(state_dir_path);
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
nlohmann::json schema_json = R"({
"name": "conversation_store",
"fields": [
{
"name": "conversation_id",
"type": "string"
},
{
"name": "role",
"type": "string",
"index": false
},
{
"name": "message",
"type": "string",
"index": false
},
{
"name": "timestamp",
"type": "int32",
"sort": true
},
{
"name": "model_id",
"type": "string"
}
]
})"_json;
collectionManager.create_collection(schema_json);
ConversationModelManager::insert_model_for_testing("0", model);
}
void TearDown() override {
collectionManager.dispose();
delete store;
}
};
TEST_F(ConversationTest, CreateConversation) {
nlohmann::json conversation = nlohmann::json::array();
auto create_res = ConversationManager::get_instance().add_conversation(conversation, model);
ASSERT_TRUE(create_res.ok());
}
TEST_F(ConversationTest, CreateConversationInvalidType) {
nlohmann::json conversation = nlohmann::json::object();
auto create_res = ConversationManager::get_instance().add_conversation(conversation, "conversation_store");
ASSERT_FALSE(create_res.ok());
ASSERT_EQ(create_res.code(), 400);
ASSERT_EQ(create_res.error(), "Conversation is not an array");
}
TEST_F(ConversationTest, GetInvalidConversation) {
auto get_res = ConversationManager::get_instance().get_conversation("qwerty");
ASSERT_FALSE(get_res.ok());
ASSERT_EQ(get_res.code(), 404);
ASSERT_EQ(get_res.error(), "Conversation not found");
}
TEST_F(ConversationTest, AppendConversation) {
nlohmann::json conversation = nlohmann::json::array();
nlohmann::json message = nlohmann::json::object();
message["user"] = "Hello";
conversation.push_back(message);
auto create_res = ConversationManager::get_instance().add_conversation(conversation, model);
ASSERT_TRUE(create_res.ok());
std::string conversation_id = create_res.get();
LOG(INFO) << conversation_id;
auto append_res = ConversationManager::get_instance().add_conversation(conversation, model, conversation_id);
ASSERT_TRUE(append_res.ok());
ASSERT_EQ(append_res.get(), conversation_id);
auto get_res = ConversationManager::get_instance().get_conversation(conversation_id);
ASSERT_TRUE(get_res.ok());
ASSERT_TRUE(get_res.get()["conversation"].is_array());
ASSERT_EQ(get_res.get()["id"], conversation_id);
ASSERT_EQ(get_res.get()["conversation"].size(), 2);
ASSERT_EQ(get_res.get()["conversation"][0]["user"], "Hello");
ASSERT_EQ(get_res.get()["conversation"][1]["user"], "Hello");
}
TEST_F(ConversationTest, AppendInvalidConversation) {
nlohmann::json conversation = nlohmann::json::array();
nlohmann::json message = nlohmann::json::object();
message["user"] = "Hello";
auto create_res = ConversationManager::get_instance().add_conversation(conversation, model);
ASSERT_TRUE(create_res.ok());
std::string conversation_id = create_res.get();
message = "invalid";
auto append_res = ConversationManager::get_instance().add_conversation(message, model, conversation_id);
ASSERT_FALSE(append_res.ok());
ASSERT_EQ(append_res.code(), 400);
ASSERT_EQ(append_res.error(), "Conversation is not an array");
}
TEST_F(ConversationTest, DeleteConversation) {
nlohmann::json conversation = nlohmann::json::array();
nlohmann::json message = nlohmann::json::object();
message["user"] = "Hello";
conversation.push_back(message);
auto create_res = ConversationManager::get_instance().add_conversation(conversation, model);
ASSERT_TRUE(create_res.ok());
std::string conversation_id = create_res.get();
LOG(INFO) << conversation_id;
auto delete_res = ConversationManager::get_instance().delete_conversation(conversation_id);
LOG(INFO) << delete_res.error();
ASSERT_TRUE(delete_res.ok());
auto delete_res_json = delete_res.get();
ASSERT_EQ(delete_res_json["id"], conversation_id);
auto get_res = ConversationManager::get_instance().get_conversation(conversation_id);
ASSERT_FALSE(get_res.ok());
ASSERT_EQ(get_res.code(), 404);
ASSERT_EQ(get_res.error(), "Conversation not found");
}
TEST_F(ConversationTest, DeleteInvalidConversation) {
auto delete_res = ConversationManager::get_instance().delete_conversation("qwerty");
ASSERT_FALSE(delete_res.ok());
ASSERT_EQ(delete_res.code(), 404);
ASSERT_EQ(delete_res.error(), "Conversation not found");
}
TEST_F(ConversationTest, TruncateConversation) {
nlohmann::json conversation = nlohmann::json::array();
nlohmann::json message = nlohmann::json::object();
message["user"] = "Hello";
for (int i = 0; i < 1000; i++) {
conversation.push_back(message);
}
auto truncated = ConversationManager::get_instance().truncate_conversation(conversation, 100);
ASSERT_TRUE(truncated.ok());
ASSERT_TRUE(truncated.get().size() < conversation.size());
ASSERT_TRUE(truncated.get().dump(0).size() < 100);
}
TEST_F(ConversationTest, TruncateConversationEmpty) {
nlohmann::json conversation = nlohmann::json::array();
auto truncated = ConversationManager::get_instance().truncate_conversation(conversation, 100);
ASSERT_TRUE(truncated.ok());
ASSERT_TRUE(truncated.get().size() == 0);
}
TEST_F(ConversationTest, TruncateConversationInvalidType) {
nlohmann::json conversation = nlohmann::json::object();
auto truncated = ConversationManager::get_instance().truncate_conversation(conversation, 100);
ASSERT_FALSE(truncated.ok());
ASSERT_EQ(truncated.code(), 400);
ASSERT_EQ(truncated.error(), "Conversation history is not an array");
}
TEST_F(ConversationTest, TruncateConversationInvalidLimit) {
nlohmann::json conversation = nlohmann::json::array();
auto truncated = ConversationManager::get_instance().truncate_conversation(conversation, 0);
ASSERT_FALSE(truncated.ok());
ASSERT_EQ(truncated.code(), 400);
ASSERT_EQ(truncated.error(), "Limit must be positive integer");
}
TEST_F(ConversationTest, TestConversationExpire) {
nlohmann::json conversation = nlohmann::json::array();
nlohmann::json message = nlohmann::json::object();
message["user"] = "Hello";
conversation.push_back(message);
auto create_res = ConversationManager::get_instance().add_conversation(conversation, model);
ASSERT_TRUE(create_res.ok());
std::string conversation_id = create_res.get();
ConversationManager::get_instance().clear_expired_conversations();
auto get_res = ConversationManager::get_instance().get_conversation(conversation_id);
ASSERT_TRUE(get_res.ok());
ASSERT_TRUE(get_res.get()["conversation"].is_array());
ASSERT_EQ(get_res.get()["id"], conversation_id);
ASSERT_EQ(get_res.get()["conversation"].size(), 1);
ConversationManager::get_instance()._set_ttl_offset(24 * 60 * 60 * 2);
LOG(INFO) << "Clearing expired conversations";
ConversationManager::get_instance().clear_expired_conversations();
LOG(INFO) << "Cleared expired conversations";
get_res = ConversationManager::get_instance().get_conversation(conversation_id);
ASSERT_FALSE(get_res.ok());
ASSERT_EQ(get_res.code(), 404);
ASSERT_EQ(get_res.error(), "Conversation not found");
ConversationManager::get_instance()._set_ttl_offset(0);
}
TEST_F(ConversationTest, TestInvalidConversationCollection) {
nlohmann::json schema_json = R"({
"name": "conversation_store2",
"fields": [
{
"name": "lorem",
"type": "string"
}
]
})"_json;
auto coll = collectionManager.create_collection(schema_json).get();
auto res = ConversationManager::get_instance().validate_conversation_store_schema(coll);
ASSERT_FALSE(res.ok());
ASSERT_EQ(res.code(), 400);
ASSERT_EQ(res.error(), "Schema is missing `conversation_id` field");
}
| 9,260
|
C++
|
.cpp
| 202
| 37.925743
| 113
| 0.643579
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,743
|
ratelimit_test.cpp
|
typesense_typesense/test/ratelimit_test.cpp
|
#include <gtest/gtest.h>
#include <string>
#include <thread>
#include "ratelimit_manager.h"
#include "logger.h"
#include "core_api.h"
// Google test for RateLimitManager
class RateLimitManagerTest : public ::testing::Test
{
protected:
RateLimitManager *manager = RateLimitManager::getInstance();
Store *store;
void changeBaseTimestamp(const uint64_t new_base_timestamp) {
manager->_set_base_timestamp(new_base_timestamp);
}
RateLimitManagerTest() {
}
virtual ~RateLimitManagerTest() {
// You can do clean-up work that doesn't throw exceptions here.
manager->clear_all();
}
// If the constructor and destructor are not enough for setting up
// and cleaning up each test, you can define the following methods:
virtual void SetUp() {
std::string state_dir_path = "/tmp/typesense_test/rate_limit_manager_test_db";
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
store = new Store(state_dir_path);
manager->init(store);
}
virtual void TearDown() {
delete store;
}
// Objects declared here can be used by all tests in the test case for Foo.
};
TEST_F(RateLimitManagerTest, TestAddRateLimitApiKey) {
auto res = manager->add_rule({
{"action", "throttle"},
{"api_keys", nlohmann::json::array({"test"})},
{"max_requests_1m", 10},
{"max_requests_1h", 100},
{"auto_ban_1m_threshold", 10},
{"auto_ban_1m_duration_hours", 1}
});
EXPECT_EQ(manager->get_all_rules().size(), 1);
}
TEST_F(RateLimitManagerTest, TestAddRateLimitIp) {
auto res = manager->add_rule({
{"action", "throttle"},
{"ip_addresses", nlohmann::json::array({"0.0.0.1"})},
{"max_requests_1m", 10},
{"max_requests_1h", 100},
{"auto_ban_1m_threshold", 10},
{"auto_ban_1m_duration_hours", 1}
});
EXPECT_EQ(manager->get_all_rules().size(), 1);
}
TEST_F(RateLimitManagerTest, TestGetBannedIps) {
manager->add_rule({
{"action", "block"},
{"ip_addresses", nlohmann::json::array({"0.0.0.1"})}
});
EXPECT_EQ(manager->get_banned_entities(RateLimitedEntityType::ip).size(), 1);
}
TEST_F(RateLimitManagerTest, TestGetTrackedIps) {
manager->add_rule({
{"action", "throttle"},
{"ip_addresses", nlohmann::json::array({"0.0.0.1"})},
{"max_requests_1m", 10},
{"max_requests_1h", 100},
{"auto_ban_1m_threshold", 10},
{"auto_ban_1m_duration_hours", 1}
});
auto rules = manager->get_all_rules();
bool found = rules[0].action == RateLimitAction::throttle && rules[0].max_requests.minute_threshold == 10 && rules[0].max_requests.hour_threshold == 100;
found = found && rules[0].entities[0].entity_type == RateLimitedEntityType::ip && rules[0].entities[0].entity_id == "0.0.0.1";
EXPECT_TRUE(found);
}
TEST_F(RateLimitManagerTest, TestGetTrackedApiKeys) {
manager->add_rule({
{"action", "throttle"},
{"api_keys", nlohmann::json::array({"test"})},
{"max_requests_1m", 10},
{"max_requests_1h", 100},
{"auto_ban_1m_threshold", 10},
{"auto_ban_1m_duration_hours", 1}
});
auto rules = manager->get_all_rules();
bool found = rules[0].action == RateLimitAction::throttle && rules[0].max_requests.minute_threshold == 10 && rules[0].max_requests.hour_threshold == 100;
found = found && rules[0].entities[0].entity_type == RateLimitedEntityType::api_key && rules[0].entities[0].entity_id == "test";
EXPECT_TRUE(found);
}
TEST_F(RateLimitManagerTest, TestBanIpPermanently) {
manager->add_rule({
{"action", "block"},
{"ip_addresses", nlohmann::json::array({"0.0.0.1"})}
});
auto rules = manager->get_all_rules();
bool found = rules[0].action == RateLimitAction::block && rules[0].entities[0].entity_type == RateLimitedEntityType::ip && rules[0].entities[0].entity_id == "0.0.0.1";
EXPECT_TRUE(found);
}
TEST_F(RateLimitManagerTest, TestIsBannedIp) {
manager->add_rule({
{"action", "block"},
{"ip_addresses", nlohmann::json::array({"0.0.0.1"})}
});
EXPECT_TRUE(manager->get_banned_entities(RateLimitedEntityType::ip).size() == 1);
auto banned_entities = manager->get_banned_entities(RateLimitedEntityType::ip);
bool found = banned_entities[0].entity.entity_type == RateLimitedEntityType::ip && banned_entities[0].entity.entity_id == "0.0.0.1";
EXPECT_EQ(banned_entities[0].and_entity.ok(), false);
EXPECT_TRUE(found);
}
TEST_F(RateLimitManagerTest, TestIsBannedIpTemp) {
manager->add_rule({
{"action", "throttle"},
{"ip_addresses", nlohmann::json::array({"0.0.0.1"})},
{"max_requests_1m", 1},
{"max_requests_1h", 1}
});
EXPECT_TRUE(manager->get_all_rules().size() == 1);
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"},{RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"},{RateLimitedEntityType::ip, "0.0.0.1"}));
}
TEST_F(RateLimitManagerTest, TestIsBannedAPIKeyPermanently) {
manager->add_rule({
{"action", "block"},
{"api_keys", nlohmann::json::array({"test"})}
});
EXPECT_TRUE(manager->get_banned_entities(RateLimitedEntityType::api_key).size() == 1);
auto banned_entities = manager->get_banned_entities(RateLimitedEntityType::api_key);
bool found = banned_entities[0].entity.entity_type == RateLimitedEntityType::api_key && banned_entities[0].entity.entity_id == "test";
EXPECT_TRUE(found);
}
TEST_F(RateLimitManagerTest, TestIsBannedAPIKeyTemp) {
manager->add_rule({
{"action", "throttle"},
{"api_keys", nlohmann::json::array({"test"})},
{"max_requests_1m", 1},
{"max_requests_1h", 1}
});
EXPECT_TRUE(manager->get_all_rules().size() == 1);
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"},{RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"},{RateLimitedEntityType::ip, "0.0.0.1"}));
}
TEST_F(RateLimitManagerTest, TestAllowAPIKey) {
manager->add_rule({
{"action", "allow"},
{"api_keys", nlohmann::json::array({"test"})}
});
EXPECT_TRUE(manager->get_all_rules().size() == 1);
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test_"},{RateLimitedEntityType::ip, "0.0.0.1"}));
}
TEST_F(RateLimitManagerTest, TestAllowIp) {
manager->add_rule({
{"action", "allow"},
{"ip_addresses", nlohmann::json::array({"0.0.0.1"})}
});
EXPECT_TRUE(manager->get_all_rules().size() == 1);
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"},{RateLimitedEntityType::ip, "0.0.0.1"}));
}
TEST_F(RateLimitManagerTest, TestThrottleAPIKey) {
manager->add_rule({
{"action", "throttle"},
{"api_keys", nlohmann::json::array({"test"})},
{"max_requests_1m", 1},
{"max_requests_1h", 1}
});
EXPECT_TRUE(manager->get_all_rules().size() == 1);
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
}
TEST_F(RateLimitManagerTest, TestDeleteRuleByID) {
manager->add_rule({
{"action", "throttle"},
{"api_keys", nlohmann::json::array({"test"})},
{"max_requests_1m", 1},
{"max_requests_1h", 1}
});
EXPECT_TRUE(manager->get_all_rules().size() == 1);
auto rules = manager->get_all_rules();
manager->delete_rule_by_id(rules[0].id);
EXPECT_EQ(manager->get_all_rules().size(), 0);
}
TEST_F(RateLimitManagerTest, TestMinuteRateLimitAPIKey) {
manager->add_rule({
{"action", "throttle"},
{"api_keys", nlohmann::json::array({"test"})},
{"max_requests_1m", 5},
{"max_requests_1h", -1}
});
EXPECT_TRUE(manager->get_all_rules().size() == 1);
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
}
TEST_F(RateLimitManagerTest, TestHourRateLimitAPIKey) {
manager->add_rule({
{"action", "throttle"},
{"api_keys", nlohmann::json::array({"test"})},
{"max_requests_1m", -1},
{"max_requests_1h", 5}
});
EXPECT_TRUE(manager->get_all_rules().size() == 1);
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
}
TEST_F(RateLimitManagerTest, TestMinuteRateLimitIp) {
manager->add_rule({
{"action", "throttle"},
{"ip_addresses", nlohmann::json::array({"0.0.0.1"})},
{"max_requests_1m", 5},
{"max_requests_1h", -1}
});
EXPECT_TRUE(manager->get_all_rules().size() == 1);
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
}
TEST_F(RateLimitManagerTest, TestHourRateLimitIp) {
manager->add_rule({
{"action", "throttle"},
{"ip_addresses", nlohmann::json::array({"0.0.0.1"})},
{"max_requests_1m", -1},
{"max_requests_1h", 5}
});
EXPECT_TRUE(manager->get_all_rules().size() == 1);
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
}
TEST_F(RateLimitManagerTest, TestGetAllRules) {
manager->add_rule({
{"action", "throttle"},
{"ip_addresses", nlohmann::json::array({"0.0.0.1"})},
{"max_requests_1m", -1},
{"max_requests_1h", 5}
});
manager->add_rule({
{"action", "throttle"},
{"api_keys", nlohmann::json::array({"test"})},
{"max_requests_1m", 5},
{"max_requests_1h", -1}
});
EXPECT_TRUE(manager->get_all_rules().size() == 2);
}
TEST_F(RateLimitManagerTest, TestGetAllRulesEmpty) {
auto rules = manager->get_all_rules();
EXPECT_EQ(rules.size(), 0);
}
TEST_F(RateLimitManagerTest, TestGetAllRulesJSON) {
manager->add_rule({
{"action", "throttle"},
{"api_keys", nlohmann::json::array({"test"})},
{"max_requests_1m", 5},
{"max_requests_1h", -1}
});
nlohmann::json rules = manager->get_all_rules_json();
EXPECT_EQ(rules.is_array(), true);
EXPECT_EQ(rules.size(), 1);
EXPECT_EQ(rules.at(0).is_object(), true);
EXPECT_EQ(rules.at(0).at("id").is_number(), true);
EXPECT_EQ(rules.at(0).at("api_keys").is_array(), true);
EXPECT_EQ(rules.at(0).at("api_keys").size(), 1);
EXPECT_EQ(rules.at(0).at("api_keys").at(0).is_string(), true);
EXPECT_EQ(rules.at(0).count("ip_addresses"), 0);
}
TEST_F(RateLimitManagerTest, TestAutoBan) {
manager->add_rule({
{"action", "throttle"},
{"api_keys", nlohmann::json::array({"test"})},
{"max_requests_1m", 5},
{"max_requests_1h", -1},
{"auto_ban_1m_threshold", 2},
{"auto_ban_1m_duration_hours", 1}
});
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
this->changeBaseTimestamp(120);
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
this->changeBaseTimestamp(240);
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
this->changeBaseTimestamp(60*59);
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
this->changeBaseTimestamp(60*60*2);
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
}
TEST_F(RateLimitManagerTest, TestWildcardAPIKeyWithFlag) {
manager->add_rule({
{"action", "throttle"},
{"api_keys", nlohmann::json::array({".*"})},
{"max_requests_1m", 5},
{"max_requests_1h", -1},
{"apply_limit_per_entity", true}
});
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
}
TEST_F(RateLimitManagerTest, TestWildcardAPIKeyWithoutFlag) {
manager->add_rule({
{"action", "throttle"},
{"api_keys", nlohmann::json::array({".*"})},
{"max_requests_1m", 5},
{"max_requests_1h", -1},
});
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
}
TEST_F(RateLimitManagerTest, TestPriority) {
manager->add_rule({
{"action", "throttle"},
{"api_keys", nlohmann::json::array({".*"})},
{"max_requests_1m", 2},
{"max_requests_1h", -1},
{"priority", 3},
{"apply_limit_per_entity", true}
});
manager->add_rule({
{"action", "throttle"},
{"api_keys", nlohmann::json::array({"test"})},
{"max_requests_1m", 5},
{"max_requests_1h", -1},
{"priority", 1}
});
manager->add_rule({
{"action", "block"},
{"api_keys", nlohmann::json::array({"test1"})},
{"priority", 4}
});
manager->add_rule({
{"action", "allow"},
{"api_keys", nlohmann::json::array({"test2"})},
{"priority", 0}
});
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test2"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test2"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test2"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
}
TEST_F(RateLimitManagerTest, TestAndRule) {
manager->add_rule({
{"action", "throttle"},
{"api_keys", nlohmann::json::array({"test"})},
{"ip_addresses", nlohmann::json::array({"0.0.0.1"})},
{"max_requests_1m", 5},
{"max_requests_1h", -1},
{"priority", 3}
});
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
}
TEST_F(RateLimitManagerTest, TestExceedCounter) {
manager->add_rule({
{"action", "throttle"},
{"api_keys", nlohmann::json::array({".*"})},
{"priority", 3},
{"apply_limit_per_entity", true},
{"max_requests_1m", 3},
{"max_requests_1h", -1}
});
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
const auto exceeds = manager->get_exceeded_entities_json();
EXPECT_EQ(exceeds.size(), 2);
EXPECT_EQ(exceeds[0]["api_key"], ".*");
EXPECT_EQ(exceeds[0]["ip"], "0.0.0.2");
EXPECT_EQ(exceeds[0]["request_count"], 10);
EXPECT_EQ(exceeds[1]["api_key"], ".*");
EXPECT_EQ(exceeds[1]["ip"], "0.0.0.1");
EXPECT_EQ(exceeds[1]["request_count"], 9);
}
TEST_F(RateLimitManagerTest, TestActiveThrottles) {
manager->add_rule({
{"action", "throttle"},
{"ip_addresses", nlohmann::json::array({"0.0.0.1"})},
{"priority", 3},
{"max_requests_1m", 3},
{"max_requests_1h", -1},
{"auto_ban_1m_threshold", 1},
{"auto_ban_1m_duration_hours", 1}
});
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
manager->_set_base_timestamp(120);
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
const auto throttles = manager->get_throttled_entities_json();
EXPECT_EQ(throttles.size(), 1);
EXPECT_EQ(throttles[0]["ip_address"], "0.0.0.1");
EXPECT_EQ(throttles[0].count("api_key"), 0);
EXPECT_EQ(throttles[0].count("throttling_from"), 1);
EXPECT_EQ(throttles[0].count("throttling_to"), 1);
}
TEST_F(RateLimitManagerTest, TestMultiSearchRateLimiting) {
manager->add_rule({
{"action", "throttle"},
{"ip_addresses", nlohmann::json::array({".*"})},
{"priority", 3},
{"max_requests_1m", 3},
{"max_requests_1h", -1},
{"apply_limit_per_entity", true}
});
std::shared_ptr<http_req> req = std::make_shared<http_req>();
std::shared_ptr<http_res> res = std::make_shared<http_res>(nullptr);
nlohmann::json body;
body["searches"] = nlohmann::json::array();
nlohmann::json search;
search["collection"] = "players";
search["filter_by"] = "score: > 100";
body["searches"].push_back(search);
body["searches"].push_back(search);
body["searches"].push_back(search);
body["searches"].push_back(search);
body["searches"].push_back(search);
req->embedded_params_vec.push_back(nlohmann::json::object());
req->embedded_params_vec.push_back(nlohmann::json::object());
req->embedded_params_vec.push_back(nlohmann::json::object());
req->embedded_params_vec.push_back(nlohmann::json::object());
req->embedded_params_vec.push_back(nlohmann::json::object());
req->body = body.dump();
req->metadata = "4:test0.0.0.1";
EXPECT_FALSE(post_multi_search(req, res));
EXPECT_EQ(res->status_code, 429);
EXPECT_EQ(res->body, "{\"message\": \"Rate limit exceeded or blocked\"}");
body.erase("searches");
body["searches"] = nlohmann::json::array();
body["searches"].push_back(search);
body["searches"].push_back(search);
req->embedded_params_vec.pop_back();
req->embedded_params_vec.pop_back();
req->embedded_params_vec.pop_back();
req->body = body.dump();
req->metadata = "4:test0.0.0.2";
EXPECT_TRUE(post_multi_search(req, res));
EXPECT_EQ(res->status_code, 200);
}
TEST_F(RateLimitManagerTest, TestDeleteBanByID) {
manager->add_rule({
{"action", "throttle"},
{"ip_addresses", nlohmann::json::array({".*"})},
{"priority", 3},
{"max_requests_1m", 3},
{"max_requests_1h", -1},
{"apply_limit_per_entity", true},
{"auto_ban_1m_threshold", 1},
{"auto_ban_1m_duration_hours", 1}
});
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
manager->_set_base_timestamp(120);
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
const auto throttles = manager->get_throttled_entities_json();
EXPECT_EQ(throttles.size(), 1);
EXPECT_EQ(throttles[0]["ip_address"], "0.0.0.1");
EXPECT_EQ(throttles[0].count("api_key"), 0);
EXPECT_EQ(throttles[0].count("throttling_from"), 1);
EXPECT_EQ(throttles[0].count("throttling_to"), 1);
EXPECT_TRUE(manager->delete_ban_by_id(throttles[0]["id"]));
EXPECT_EQ(manager->get_throttled_entities_json().size(), 0);
}
TEST_F(RateLimitManagerTest, TestInvalidRules) {
auto res = manager->add_rule({
{"action", "throttle"},
{"ip_addresses", nlohmann::json::array({"0.0.0.1", "0.0.0.2"})},
{"api_keys", nlohmann::json::array({"test1", "test2"})},
{"priority", 3},
{"max_requests_1m", 3},
{"max_requests_1h", -1},
{"apply_limit_per_entity", true},
{"auto_ban_1m_threshold", 1},
{"auto_ban_1m_duration_hours", 1}
});
EXPECT_FALSE(res.ok());
EXPECT_EQ(400, res.code());
EXPECT_EQ("Many to many rule is not supported.", res.error());
res = manager->add_rule({
{"action", "throttle"},
{"priority", 3},
{"max_requests_1m", 3},
{"max_requests_1h", -1},
{"apply_limit_per_entity", true},
{"auto_ban_1m_threshold", 1},
{"auto_ban_1m_duration_hours", 1}
});
EXPECT_FALSE(res.ok());
EXPECT_EQ(400, res.code());
EXPECT_EQ("Parameter `ip_addresses` or `api_keys` is required.", res.error());
res = manager->add_rule({
{"ip_addresses", nlohmann::json::array({".*"})},
{"priority", 3},
{"max_requests_1m", 3},
{"max_requests_1h", -1},
{"apply_limit_per_entity", true},
{"auto_ban_1m_threshold", 1},
{"auto_ban_1m_duration_hours", 1}
});
EXPECT_FALSE(res.ok());
EXPECT_EQ(400, res.code());
EXPECT_EQ("Parameter `action` is required.", res.error());
res = manager->add_rule({
{"action", "throttle"},
{"ip_addresses", ".*"},
{"priority", 3},
{"max_requests_1m", 3},
{"max_requests_1h", -1},
{"apply_limit_per_entity", true},
{"auto_ban_1m_threshold", 1},
{"auto_ban_1m_duration_hours", 1}
});
EXPECT_FALSE(res.ok());
EXPECT_EQ(400, res.code());
EXPECT_EQ("Parameter `ip_addresses` must be an array of strings.", res.error());
res = manager->add_rule({
{"action", "throttle"},
{"ip_addresses", nlohmann::json::array({".*"})},
{"priority", 3}
});
EXPECT_FALSE(res.ok());
EXPECT_EQ(400, res.code());
EXPECT_EQ("At least one of `max_requests_1m` or `max_requests_1h` is required.", res.error());
res = manager->add_rule({
{"action", "throttle"},
{"ip_addresses", nlohmann::json::array({".*"})},
{"priority", 3},
{"max_requests_1m", "aa"}
});
EXPECT_FALSE(res.ok());
EXPECT_EQ(400, res.code());
EXPECT_EQ("Parameter `max_requests_1m` must be an integer.", res.error());
res = manager->add_rule({
{"action", "invalid"},
{"ip_addresses", nlohmann::json::array({".*"})},
{"priority", 3},
{"max_requests_1m", 3},
{"max_requests_1h", 3},
{"apply_limit_per_entity", true},
{"auto_ban_1m_threshold", 1},
{"auto_ban_1m_duration_hours", 1}
});
EXPECT_FALSE(res.ok());
EXPECT_EQ(400, res.code());
EXPECT_EQ("Invalid action.", res.error());
}
TEST_F(RateLimitManagerTest, TestOneToManyRule) {
manager->add_rule({
{"action", "throttle"},
{"ip_addresses", nlohmann::json::array({"0.0.0.1", "0.0.0.2"})},
{"api_keys", nlohmann::json::array({"test"})},
{"priority", 3},
{"max_requests_1m", 2},
{"max_requests_1h", -1},
{"apply_limit_per_entity", true},
{"auto_ban_1m_threshold", 1},
{"auto_ban_1m_duration_hours", 1}
});
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.2"}));
}
TEST_F(RateLimitManagerTest, TestDeleteThrottleByID) {
manager->add_rule({
{"action", "throttle"},
{"ip_addresses", nlohmann::json::array({".*"})},
{"priority", 3},
{"max_requests_1m", 3},
{"max_requests_1h", -1},
{"apply_limit_per_entity", true}
});
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
auto exceeds = manager->get_exceeded_entities_json();
EXPECT_EQ(1, exceeds.size());
auto id = exceeds[0]["id"];
auto res = manager->delete_throttle_by_id(id);
EXPECT_TRUE(res);
exceeds = manager->get_exceeded_entities_json();
EXPECT_EQ(0, exceeds.size());
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
}
TEST_F(RateLimitManagerTest, TestOneToManyFillTest) {
manager->add_rule({
{"action", "block"},
{"ip_addresses", nlohmann::json::array({"0.0.0.1"})},
{"api_keys", nlohmann::json::array({"test", "test1", "test2"})},
{"priority", 3},
});
EXPECT_TRUE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
auto rules = manager->get_all_rules_json();
EXPECT_EQ(1, rules.size());
manager->delete_rule_by_id(rules[0]["id"]);
EXPECT_EQ(0, manager->get_all_rules_json().size());
manager->add_rule({
{"action", "block"},
{"ip_addresses", nlohmann::json::array({"0.0.0.1"})},
{"api_keys", nlohmann::json::array({"test", "test2"})},
{"priority", 3},
});
LOG(INFO) << manager->get_all_rules_json();
EXPECT_FALSE(manager->is_rate_limited({RateLimitedEntityType::api_key, "test1"}, {RateLimitedEntityType::ip, "0.0.0.1"}));
}
| 41,406
|
C++
|
.cpp
| 713
| 52.112202
| 171
| 0.653205
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,744
|
numeric_range_trie_test.cpp
|
typesense_typesense/test/numeric_range_trie_test.cpp
|
#include <gtest/gtest.h>
#include <collection_manager.h>
#include "collection.h"
#include "numeric_range_trie.h"
class NumericRangeTrieTest : public ::testing::Test {
protected:
Store *store;
CollectionManager & collectionManager = CollectionManager::get_instance();
std::atomic<bool> quit = false;
std::vector<std::string> query_fields;
std::vector<sort_by> sort_fields;
void setupCollection() {
std::string state_dir_path = "/tmp/typesense_test/collection_filtering";
LOG(INFO) << "Truncating and creating: " << state_dir_path;
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
store = new Store(state_dir_path);
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
}
virtual void SetUp() {
setupCollection();
}
virtual void TearDown() {
collectionManager.dispose();
delete store;
}
};
void reset(uint32_t*& ids, uint32_t& ids_length) {
delete [] ids;
ids = nullptr;
ids_length = 0;
}
TEST_F(NumericRangeTrieTest, SearchRange) {
auto trie = new NumericTrie();
std::unique_ptr<NumericTrie> trie_guard(trie);
std::vector<std::pair<int32_t, uint32_t>> pairs = {
{-0x03010101, 1},
{-0x01010101, 5},
{-32768, 43},
{-24576, 35},
{-16384, 32},
{-8192, 8},
{8192, 49},
{16384, 56},
{24576, 58},
{32768, 91},
{0x01010101, 53},
{0x03010101, 12},
};
for (auto const& pair: pairs) {
trie->insert(pair.first, pair.second);
}
uint32_t* ids = nullptr;
uint32_t ids_length = 0;
trie->search_range(32768, true, -32768, true, ids, ids_length);
ASSERT_EQ(0, ids_length);
reset(ids, ids_length);
trie->search_range(-32768, true, 32768, true, ids, ids_length);
std::vector<uint32_t> expected = {8, 32, 35, 43, 49, 56, 58, 91};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_range(-32768, true, 32768, false, ids, ids_length);
expected = {8, 32, 35, 43, 49, 56, 58};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_range(-32768, true, 0x01000000, true, ids, ids_length);
expected = {8, 32, 35, 43, 49, 56, 58, 91};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_range(-32768, true, 0x0101010101, true, ids, ids_length);
expected = {8, 12, 32, 35, 43, 49, 53, 56, 58, 91};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_range(-32768, true, 0, true, ids, ids_length);
expected = {8, 32, 35, 43};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_range(-32768, true, 0, false, ids, ids_length);
expected = {8, 32, 35, 43};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_range(-32768, false, 32768, true, ids, ids_length);
expected = {8, 32, 35, 49, 56, 58, 91};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_range(-0x01000000, true, 32768, true, ids, ids_length);
expected = {8, 32, 35, 43, 49, 56, 58, 91};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_range(-0x0101010101, true, 32768, true, ids, ids_length);
expected = {1, 5, 8, 32, 35, 43, 49, 56, 58, 91};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_range(-0x01000000, true, 0x01000000, true, ids, ids_length);
expected = {8, 32, 35, 43, 49, 56, 58, 91};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_range(-1, true, 32768, true, ids, ids_length);
expected = {49, 56, 58, 91};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_range(-1, false, 32768, true, ids, ids_length);
expected = {49, 56, 58, 91};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_range(-1, true, 0, true, ids, ids_length);
ASSERT_EQ(0, ids_length);
reset(ids, ids_length);
trie->search_range(-1, false, 0, false, ids, ids_length);
ASSERT_EQ(0, ids_length);
reset(ids, ids_length);
trie->search_range(8192, true, 32768, true, ids, ids_length);
expected = {49, 56, 58, 91};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_range(8192, true, 0x01000000, true, ids, ids_length);
expected = {49, 56, 58, 91};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_range(16384, true, 16384, true, ids, ids_length);
ASSERT_EQ(1, ids_length);
ASSERT_EQ(56, ids[0]);
reset(ids, ids_length);
trie->search_range(16384, true, 16384, false, ids, ids_length);
ASSERT_EQ(0, ids_length);
reset(ids, ids_length);
trie->search_range(16384, false, 16384, true, ids, ids_length);
ASSERT_EQ(0, ids_length);
reset(ids, ids_length);
trie->search_range(16383, true, 16383, true, ids, ids_length);
ASSERT_EQ(0, ids_length);
reset(ids, ids_length);
trie->search_range(8193, true, 16383, true, ids, ids_length);
ASSERT_EQ(0, ids_length);
reset(ids, ids_length);
trie->search_range(-32768, true, -8192, true, ids, ids_length);
expected = {8, 32, 35, 43};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_range(-0x0101010101, true, -8192, true, ids, ids_length);
expected = {1, 5, 8, 32, 35, 43};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_range(32768, true, 0x0101010101, true, ids, ids_length);
expected = {12, 53, 91};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
}
TEST_F(NumericRangeTrieTest, SearchGreaterThan) {
auto trie = new NumericTrie();
std::unique_ptr<NumericTrie> trie_guard(trie);
std::vector<std::pair<int32_t, uint32_t>> pairs = {
{-32768, 43},
{-24576, 35},
{-16384, 32},
{-8192, 8},
{8192, 49},
{16384, 56},
{24576, 58},
{32768, 91},
};
for (auto const& pair: pairs) {
trie->insert(pair.first, pair.second);
}
uint32_t* ids = nullptr;
uint32_t ids_length = 0;
trie->search_greater_than(0, true, ids, ids_length);
std::vector<uint32_t> expected = {49, 56, 58, 91};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_greater_than(-1, false, ids, ids_length);
expected = {49, 56, 58, 91};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_greater_than(-1, true, ids, ids_length);
expected = {49, 56, 58, 91};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_greater_than(-24576, true, ids, ids_length);
expected = {8, 32, 35, 49, 56, 58, 91};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_greater_than(-32768, false, ids, ids_length);
expected = {8, 32, 35, 49, 56, 58, 91};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_greater_than(8192, true, ids, ids_length);
expected = {49, 56, 58, 91};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_greater_than(8192, false, ids, ids_length);
expected = {56, 58, 91};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_greater_than(1000000, false, ids, ids_length);
ASSERT_EQ(0, ids_length);
reset(ids, ids_length);
trie->search_greater_than(-0x01000000, false, ids, ids_length);
expected = {8, 32, 35, 43, 49, 56, 58, 91};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
pairs = {
{0x01010101, 53},
{0x03010101, 12},
};
for (auto const& pair: pairs) {
trie->insert(pair.first, pair.second);
}
trie->search_greater_than(0x01010101, true, ids, ids_length);
expected = {12, 53};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_greater_than(0x0101010101, true, ids, ids_length);
ASSERT_EQ(0, ids_length);
}
TEST_F(NumericRangeTrieTest, SearchLessThan) {
auto trie = new NumericTrie();
std::unique_ptr<NumericTrie> trie_guard(trie);
std::vector<std::pair<int32_t, uint32_t>> pairs = {
{-32768, 8},
{-24576, 32},
{-16384, 35},
{-8192, 43},
{8192, 49},
{16384, 56},
{24576, 58},
{32768, 91},
};
for (auto const& pair: pairs) {
trie->insert(pair.first, pair.second);
}
uint32_t* ids = nullptr;
uint32_t ids_length = 0;
trie->search_less_than(0, true, ids, ids_length);
std::vector<uint32_t> expected = {8, 32, 35, 43};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_less_than(0, false, ids, ids_length);
expected = {8, 32, 35, 43};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_less_than(-1, true, ids, ids_length);
expected = {8, 32, 35, 43};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_less_than(-16384, true, ids, ids_length);
expected = {8, 32, 35};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_less_than(-16384, false, ids, ids_length);
expected = {8, 32};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_less_than(8192, true, ids, ids_length);
expected = {8, 32, 35, 43, 49};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_less_than(8192, false, ids, ids_length);
expected = {8, 32, 35, 43};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_less_than(-0x01000000, false, ids, ids_length);
ASSERT_EQ(0, ids_length);
reset(ids, ids_length);
trie->search_less_than(0x01000000, true, ids, ids_length);
expected = {8, 32, 35, 43, 49, 56, 58, 91};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
pairs = {
{0x01010101, 53},
{0x03010101, 12},
};
for (auto const& pair: pairs) {
trie->insert(pair.first, pair.second);
}
trie->search_less_than(0x01010101010, true, ids, ids_length);
expected = {8, 12, 32, 35, 43, 49, 53, 56, 58, 91};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
pairs = {
{-0x03010101, 1},
{-0x01010101, 5},
};
for (auto const& pair: pairs) {
trie->insert(pair.first, pair.second);
}
trie->search_less_than(-0x01010101, true, ids, ids_length);
expected = {1, 5};
ASSERT_EQ(expected.size(), ids_length);
for (uint32_t i = 0; i < expected.size(); i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_less_than(-0x0101010101, true, ids, ids_length);
ASSERT_EQ(0, ids_length);
}
TEST_F(NumericRangeTrieTest, SearchEqualTo) {
auto trie = new NumericTrie();
std::unique_ptr<NumericTrie> trie_guard(trie);
std::vector<std::pair<int64_t, uint32_t>> pairs = {
{-8192, 8},
{-16384, 32},
{-24576, 35},
{-32769, 41},
{-32768, 43},
{-32767, 45},
{8192, 49},
{16384, 56},
{24576, 58},
{32768, 91},
{0x01010101, 68},
{0x0100000000, 68}
};
for (auto const& pair: pairs) {
trie->insert(pair.first, pair.second);
}
uint32_t* ids = nullptr;
uint32_t ids_length = 0;
trie->search_equal_to(0, ids, ids_length);
ASSERT_EQ(0, ids_length);
reset(ids, ids_length);
trie->search_equal_to(-32768, ids, ids_length);
ASSERT_EQ(1, ids_length);
ASSERT_EQ(43, ids[0]);
reset(ids, ids_length);
trie->search_equal_to(24576, ids, ids_length);
ASSERT_EQ(1, ids_length);
ASSERT_EQ(58, ids[0]);
reset(ids, ids_length);
trie->search_equal_to(0x01010101, ids, ids_length);
ASSERT_EQ(1, ids_length);
ASSERT_EQ(68, ids[0]);
reset(ids, ids_length);
trie->search_equal_to(0x0101010101, ids, ids_length);
ASSERT_EQ(0, ids_length);
}
TEST_F(NumericRangeTrieTest, IterateSearchEqualTo) {
auto trie = new NumericTrie();
std::unique_ptr<NumericTrie> trie_guard(trie);
std::vector<std::pair<int32_t, uint32_t>> pairs = {
{-8192, 8},
{-16384, 32},
{-24576, 35},
{-32769, 41},
{-32768, 43},
{-32767, 45},
{8192, 49},
{16384, 56},
{24576, 58},
{24576, 60},
{32768, 91}
};
for (auto const& pair: pairs) {
trie->insert(pair.first, pair.second);
}
uint32_t* ids = nullptr;
uint32_t ids_length = 0;
auto iterator = trie->search_equal_to(0);
ASSERT_EQ(false, iterator.is_valid);
iterator = trie->search_equal_to(0x202020);
ASSERT_EQ(false, iterator.is_valid);
iterator = trie->search_equal_to(-32768);
ASSERT_EQ(true, iterator.is_valid);
ASSERT_EQ(43, iterator.seq_id);
iterator.next();
ASSERT_EQ(false, iterator.is_valid);
iterator = trie->search_equal_to(24576);
ASSERT_EQ(true, iterator.is_valid);
ASSERT_EQ(58, iterator.seq_id);
iterator.next();
ASSERT_EQ(true, iterator.is_valid);
ASSERT_EQ(60, iterator.seq_id);
iterator.next();
ASSERT_EQ(false, iterator.is_valid);
iterator.reset();
ASSERT_EQ(true, iterator.is_valid);
ASSERT_EQ(58, iterator.seq_id);
iterator.skip_to(4);
ASSERT_EQ(true, iterator.is_valid);
ASSERT_EQ(58, iterator.seq_id);
iterator.skip_to(59);
ASSERT_EQ(true, iterator.is_valid);
ASSERT_EQ(60, iterator.seq_id);
iterator.skip_to(66);
ASSERT_EQ(false, iterator.is_valid);
}
TEST_F(NumericRangeTrieTest, MultivalueData) {
auto trie = new NumericTrie();
std::unique_ptr<NumericTrie> trie_guard(trie);
std::vector<std::pair<int32_t, uint32_t>> pairs = {
{-0x202020, 32},
{-32768, 5},
{-32768, 8},
{-24576, 32},
{-16384, 35},
{-8192, 43},
{0, 43},
{0, 49},
{1, 8},
{256, 91},
{8192, 49},
{16384, 56},
{24576, 58},
{32768, 91},
{0x202020, 35},
};
for (auto const& pair: pairs) {
trie->insert(pair.first, pair.second);
}
uint32_t* ids = nullptr;
uint32_t ids_length = 0;
trie->search_less_than(0, false, ids, ids_length);
std::vector<uint32_t> expected = {5, 8, 32, 35, 43};
ASSERT_EQ(5, ids_length);
for (uint32_t i = 0; i < ids_length; i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_less_than(-16380, false, ids, ids_length);
ASSERT_EQ(4, ids_length);
expected = {5, 8, 32, 35};
for (uint32_t i = 0; i < ids_length; i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_less_than(16384, false, ids, ids_length);
ASSERT_EQ(7, ids_length);
expected = {5, 8, 32, 35, 43, 49, 91};
for (uint32_t i = 0; i < ids_length; i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_greater_than(0, true, ids, ids_length);
ASSERT_EQ(7, ids_length);
expected = {8, 35, 43, 49, 56, 58, 91};
for (uint32_t i = 0; i < ids_length; i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_greater_than(256, true, ids, ids_length);
ASSERT_EQ(5, ids_length);
expected = {35, 49, 56, 58, 91};
for (uint32_t i = 0; i < ids_length; i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_greater_than(-32768, true, ids, ids_length);
ASSERT_EQ(9, ids_length);
expected = {5, 8, 32, 35, 43, 49, 56, 58, 91};
for (uint32_t i = 0; i < ids_length; i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_range(-32768, true, 0, true, ids, ids_length);
ASSERT_EQ(6, ids_length);
expected = {5, 8, 32, 35, 43, 49};
for (uint32_t i = 0; i < ids_length; i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
}
TEST_F(NumericRangeTrieTest, Remove) {
auto trie = new NumericTrie();
std::unique_ptr<NumericTrie> trie_guard(trie);
std::vector<std::pair<int32_t, uint32_t>> pairs = {
{-0x202020, 32},
{-32768, 5},
{-32768, 8},
{-24576, 32},
{-16384, 35},
{-8192, 43},
{0, 2},
{0, 49},
{1, 8},
{256, 91},
{8192, 49},
{16384, 56},
{24576, 58},
{32768, 91},
{0x202020, 35},
{0x01010101, 68},
};
for (auto const& pair: pairs) {
trie->insert(pair.first, pair.second);
}
uint32_t* ids = nullptr;
uint32_t ids_length = 0;
trie->search_less_than(0, false, ids, ids_length);
std::vector<uint32_t> expected = {5, 8, 32, 35, 43};
ASSERT_EQ(5, ids_length);
for (uint32_t i = 0; i < ids_length; i++) {
ASSERT_EQ(expected[i], ids[i]);
}
trie->remove(-24576, 32);
trie->remove(-0x202020, 32);
reset(ids, ids_length);
trie->search_less_than(0, false, ids, ids_length);
expected = {5, 8, 35, 43};
ASSERT_EQ(4, ids_length);
for (uint32_t i = 0; i < ids_length; i++) {
ASSERT_EQ(expected[i], ids[i]);
}
reset(ids, ids_length);
trie->search_equal_to(0, ids, ids_length);
expected = {2, 49};
ASSERT_EQ(2, ids_length);
for (uint32_t i = 0; i < ids_length; i++) {
ASSERT_EQ(expected[i], ids[i]);
}
trie->remove(0, 2);
reset(ids, ids_length);
trie->search_equal_to(0, ids, ids_length);
ASSERT_EQ(1, ids_length);
ASSERT_EQ(49, ids[0]);
reset(ids, ids_length);
trie->remove(0x0101010101, 68);
trie->search_equal_to(16843009, ids, ids_length);
ASSERT_EQ(1, ids_length);
ASSERT_EQ(68, ids[0]);
reset(ids, ids_length);
}
TEST_F(NumericRangeTrieTest, EmptyTrieOperations) {
auto trie = new NumericTrie();
std::unique_ptr<NumericTrie> trie_guard(trie);
uint32_t* ids = nullptr;
uint32_t ids_length = 0;
trie->search_range(-32768, true, 32768, true, ids, ids_length);
std::unique_ptr<uint32_t[]> ids_guard(ids);
ASSERT_EQ(0, ids_length);
trie->search_range(-32768, true, -1, true, ids, ids_length);
ids_guard.reset(ids);
ASSERT_EQ(0, ids_length);
trie->search_range(1, true, 32768, true, ids, ids_length);
ids_guard.reset(ids);
ASSERT_EQ(0, ids_length);
trie->search_greater_than(0, true, ids, ids_length);
ids_guard.reset(ids);
ASSERT_EQ(0, ids_length);
trie->search_greater_than(15, true, ids, ids_length);
ids_guard.reset(ids);
ASSERT_EQ(0, ids_length);
trie->search_greater_than(-15, true, ids, ids_length);
ids_guard.reset(ids);
ASSERT_EQ(0, ids_length);
trie->search_less_than(0, false, ids, ids_length);
ids_guard.reset(ids);
ASSERT_EQ(0, ids_length);
trie->search_less_than(-15, true, ids, ids_length);
ids_guard.reset(ids);
ASSERT_EQ(0, ids_length);
trie->search_less_than(15, true, ids, ids_length);
ids_guard.reset(ids);
ASSERT_EQ(0, ids_length);
trie->search_equal_to(15, ids, ids_length);
ids_guard.reset(ids);
ASSERT_EQ(0, ids_length);
trie->remove(15, 0);
trie->remove(-15, 0);
}
TEST_F(NumericRangeTrieTest, Integration) {
Collection *coll_array_fields;
std::ifstream infile(std::string(ROOT_DIR)+"test/numeric_array_documents.jsonl");
std::vector<field> fields = {
field("name", field_types::STRING, false),
field("rating", field_types::FLOAT, false),
field("age", field_types::INT32, false, false, true, "", -1, -1, false, 0, 0, cosine, "", nlohmann::json(),
true), // Setting range index true.
field("years", field_types::INT32_ARRAY, false),
field("timestamps", field_types::INT64_ARRAY, false, false, true, "", -1, -1, false, 0, 0, cosine, "",
nlohmann::json(), true),
field("tags", field_types::STRING_ARRAY, true)
};
std::vector<sort_by> sort_fields = { sort_by("age", "DESC") };
coll_array_fields = collectionManager.get_collection("coll_array_fields").get();
if(coll_array_fields == nullptr) {
// ensure that default_sorting_field is a non-array numerical field
auto coll_op = collectionManager.create_collection("coll_array_fields", 4, fields, "years");
ASSERT_EQ(false, coll_op.ok());
ASSERT_STREQ("Default sorting field `years` is not a sortable type.", coll_op.error().c_str());
// let's try again properly
coll_op = collectionManager.create_collection("coll_array_fields", 4, fields, "age");
coll_array_fields = coll_op.get();
}
std::string json_line;
while (std::getline(infile, json_line)) {
auto add_op = coll_array_fields->add(json_line);
ASSERT_TRUE(add_op.ok());
}
infile.close();
query_fields = {"name"};
std::vector<std::string> facets;
// Searching on an int32 field
nlohmann::json results = coll_array_fields->search("Jeremy", query_fields, "age:>24", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(3, results["hits"].size());
std::vector<std::string> ids = {"3", "1", "4"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
// searching on an int64 array field - also ensure that padded space causes no issues
results = coll_array_fields->search("Jeremy", query_fields, "timestamps : > 475205222", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(4, results["hits"].size());
ids = {"1", "4", "0", "2"};
for(size_t i = 0; i < results["hits"].size(); i++) {
nlohmann::json result = results["hits"].at(i);
std::string result_id = result["document"]["id"];
std::string id = ids.at(i);
ASSERT_STREQ(id.c_str(), result_id.c_str());
}
results = coll_array_fields->search("Jeremy", query_fields, "rating: [7.812 .. 9.999, 1.05 .. 1.09]", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(3, results["hits"].size());
auto coll_json = coll_array_fields->get_summary_json();
ASSERT_TRUE(coll_json["fields"][2]["range_index"]);
ASSERT_TRUE(coll_json["fields"][4]["range_index"]);
}
| 26,827
|
C++
|
.cpp
| 722
| 30.450139
| 165
| 0.579678
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,745
|
collection_manager_test.cpp
|
typesense_typesense/test/collection_manager_test.cpp
|
#include <gtest/gtest.h>
#include <string>
#include <vector>
#include <fstream>
#include <collection_manager.h>
#include <analytics_manager.h>
#include "string_utils.h"
#include "collection.h"
class CollectionManagerTest : public ::testing::Test {
protected:
Store *store;
Store* analytic_store;
CollectionManager & collectionManager = CollectionManager::get_instance();
std::atomic<bool> quit = false;
Collection *collection1;
std::vector<sort_by> sort_fields;
nlohmann::json schema;
void setupCollection() {
std::string state_dir_path = "/tmp/typesense_test/coll_manager_test_db";
LOG(INFO) << "Truncating and creating: " << state_dir_path;
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
store = new Store(state_dir_path);
analytic_store = new Store(state_dir_path + "/analytics");
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
AnalyticsManager::get_instance().init(store, analytic_store, 5);
schema = R"({
"name": "collection1",
"enable_nested_fields": true,
"fields": [
{"name": "title", "type": "string", "locale": "en"},
{"name": "starring", "type": "string", "infix": true},
{"name": "cast", "type": "string[]", "facet": true, "optional": true},
{"name": ".*_year", "type": "int32", "facet": true, "optional": true},
{"name": "location", "type": "geopoint", "optional": true},
{"name": "not_stored", "type": "string", "optional": true, "index": false},
{"name": "points", "type": "int32"},
{"name": "person", "type": "object", "optional": true},
{"name": "vec", "type": "float[]", "num_dim": 128, "optional": true},
{"name": "product_id", "type": "string", "reference": "Products.product_id", "optional": true, "async_reference": true}
],
"default_sorting_field": "points",
"symbols_to_index":["+"],
"token_separators":["-"]
})"_json;
sort_fields = { sort_by("points", "DESC") };
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
collection1 = op.get();
}
virtual void SetUp() {
setupCollection();
}
virtual void TearDown() {
if(store != nullptr) {
collectionManager.drop_collection("collection1");
collectionManager.dispose();
delete store;
}
delete analytic_store;
}
};
TEST_F(CollectionManagerTest, CollectionCreation) {
CollectionManager & collectionManager2 = CollectionManager::get_instance();
collection1 = collectionManager2.get_collection("collection1").get();
ASSERT_NE(nullptr, collection1);
tsl::htrie_map<char, field> schema = collection1->get_schema();
std::vector<std::string> facet_fields_expected = {"cast"};
ASSERT_EQ(0, collection1->get_collection_id());
ASSERT_EQ(0, collection1->get_next_seq_id());
ASSERT_EQ(facet_fields_expected, collection1->get_facet_fields());
// product_id_sequence_id is also included
ASSERT_EQ(3, collection1->get_sort_fields().size());
ASSERT_EQ("location", collection1->get_sort_fields()[0].name);
ASSERT_EQ("product_id_sequence_id", collection1->get_sort_fields()[1].name);
ASSERT_EQ("points", collection1->get_sort_fields()[2].name);
ASSERT_EQ(schema.size(), collection1->get_schema().size());
ASSERT_EQ("points", collection1->get_default_sorting_field());
ASSERT_EQ(false, schema.at("not_stored").index);
ASSERT_EQ(1, collection1->get_reference_fields().size());
ASSERT_EQ("Products", collection1->get_reference_fields().at("product_id").collection);
ASSERT_EQ("product_id", collection1->get_reference_fields().at("product_id").field);
// check storage as well
rocksdb::Iterator* it = store->get_iterator();
size_t num_keys = 0;
for (it->SeekToFirst(); it->Valid(); it->Next()) {
num_keys += 1;
}
delete it;
std::string collection_meta_json;
nlohmann::json collection_meta;
std::string next_seq_id;
std::string next_collection_id;
store->get(Collection::get_meta_key("collection1"), collection_meta_json);
store->get(Collection::get_next_seq_id_key("collection1"), next_seq_id);
store->get(CollectionManager::NEXT_COLLECTION_ID_KEY, next_collection_id);
ASSERT_EQ(3, num_keys);
// we already call `collection1->get_next_seq_id` above, which is side-effecting
ASSERT_EQ(1, StringUtils::deserialize_uint32_t(next_seq_id));
nlohmann::json expected_meta_json = R"(
{
"created_at":1663234047,
"default_sorting_field":"points",
"enable_nested_fields":true,
"fallback_field_type":"",
"fields":[
{
"facet":false,
"index":true,
"infix":false,
"locale":"en",
"name":"title",
"nested":false,
"optional":false,
"sort":false,
"store":true,
"type":"string",
"range_index":false,
"stem":false
},
{
"facet":false,
"index":true,
"infix":true,
"locale":"",
"name":"starring",
"nested":false,
"optional":false,
"sort":false,
"store":true,
"type":"string",
"range_index":false,
"stem":false
},
{
"facet":true,
"index":true,
"infix":false,
"locale":"",
"name":"cast",
"nested":false,
"optional":true,
"sort":false,
"store":true,
"type":"string[]",
"range_index":false,
"stem":false
},
{
"facet":true,
"index":true,
"infix":false,
"locale":"",
"name":".*_year",
"nested":false,
"optional":true,
"sort":true,
"store":true,
"type":"int32",
"range_index":false,
"stem":false
},
{
"facet":false,
"index":true,
"infix":false,
"locale":"",
"name":"location",
"nested":false,
"optional":true,
"sort":true,
"store":true,
"type":"geopoint",
"range_index":false,
"stem":false
},
{
"facet":false,
"index":false,
"infix":false,
"locale":"",
"name":"not_stored",
"nested":false,
"optional":true,
"sort":false,
"store":true,
"type":"string",
"range_index":false,
"stem":false
},
{
"facet":false,
"index":true,
"infix":false,
"locale":"",
"name":"points",
"nested":false,
"optional":false,
"sort":true,
"store":true,
"type":"int32",
"range_index":false,
"stem":false
},
{
"facet":false,
"index":true,
"infix":false,
"locale":"",
"name":"person",
"nested":true,
"nested_array":2,
"optional":true,
"sort":false,
"store":true,
"type":"object",
"range_index":false,
"stem":false
},
{
"facet":false,
"index":true,
"infix":false,
"locale":"",
"name":"vec",
"nested":false,
"num_dim":128,
"optional":true,
"sort":false,
"store":true,
"type":"float[]",
"vec_dist":"cosine",
"range_index":false,
"stem":false
},
{
"async_reference":true,
"facet":false,
"index":true,
"infix":false,
"locale":"",
"name":"product_id",
"nested":false,
"optional":true,
"sort":false,
"store":true,
"type":"string",
"reference":"Products.product_id",
"range_index":false,
"stem":false
},
{
"facet":false,
"index":true,
"infix":false,
"locale":"",
"name":"product_id_sequence_id",
"nested":false,
"optional":true,
"sort":true,
"store":true,
"type":"int64",
"range_index":false,
"stem":false
}
],
"id":0,
"name":"collection1",
"num_memory_shards":4,
"symbols_to_index":[
"+"
],
"token_separators":[
"-"
]
}
)"_json;
auto actual_json = nlohmann::json::parse(collection_meta_json);
expected_meta_json["created_at"] = actual_json["created_at"];
ASSERT_EQ(expected_meta_json.dump(), actual_json.dump());
ASSERT_EQ("1", next_collection_id);
}
TEST_F(CollectionManagerTest, ParallelCollectionCreation) {
std::vector<std::thread> threads;
for(size_t i = 0; i < 10; i++) {
threads.emplace_back([i, &collectionManager = collectionManager]() {
nlohmann::json coll_json = R"({
"name": "parcoll",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
coll_json["name"] = coll_json["name"].get<std::string>() + std::to_string(i+1);
auto coll_op = collectionManager.create_collection(coll_json);
ASSERT_TRUE(coll_op.ok());
});
}
for(auto& thread : threads){
thread.join();
}
int64_t prev_id = INT32_MAX;
for(auto coll: collectionManager.get_collections().get()) {
// collections are sorted by ID, in descending order
ASSERT_TRUE(coll->get_collection_id() < prev_id);
prev_id = coll->get_collection_id();
}
}
TEST_F(CollectionManagerTest, ShouldInitCollection) {
nlohmann::json collection_meta1 =
nlohmann::json::parse("{\"name\": \"foobar\", \"id\": 100, \"fields\": [{\"name\": \"org\", \"type\": "
"\"string\", \"facet\": false}], \"default_sorting_field\": \"foo\"}");
spp::sparse_hash_map<std::string, std::string> referenced_in;
spp::sparse_hash_map<std::string, std::set<reference_pair_t>> async_referenced_ins;
Collection *collection = collectionManager.init_collection(collection_meta1, 100, store, 1.0f, referenced_in,
async_referenced_ins);
ASSERT_EQ("foobar", collection->get_name());
ASSERT_EQ(100, collection->get_collection_id());
ASSERT_EQ(1, collection->get_fields().size());
ASSERT_EQ("foo", collection->get_default_sorting_field());
ASSERT_EQ(0, collection->get_created_at());
ASSERT_FALSE(collection->get_fields().at(0).infix);
ASSERT_FALSE(collection->get_fields().at(0).sort);
ASSERT_EQ("", collection->get_fields().at(0).locale);
delete collection;
// with non-default values
nlohmann::json collection_meta2 =
nlohmann::json::parse("{\"name\": \"foobar\", \"id\": 100, \"fields\": [{\"name\": \"org\", \"type\": "
"\"string\", \"facet\": false, \"infix\": true, \"sort\": true, \"locale\": \"en\"}], \"created_at\": 12345,"
"\"default_sorting_field\": \"foo\","
"\"symbols_to_index\": [\"+\"], \"token_separators\": [\"-\"]}");
collection = collectionManager.init_collection(collection_meta2, 100, store, 1.0f, referenced_in,
async_referenced_ins);
ASSERT_EQ(12345, collection->get_created_at());
std::vector<char> expected_symbols = {'+'};
std::vector<char> expected_separators = {'-'};
ASSERT_EQ(1, collection->get_token_separators().size());
ASSERT_EQ('-', collection->get_token_separators()[0]);
ASSERT_EQ(1, collection->get_symbols_to_index().size());
ASSERT_EQ('+', collection->get_symbols_to_index()[0]);
ASSERT_TRUE(collection->get_fields().at(0).infix);
ASSERT_TRUE(collection->get_fields().at(0).sort);
ASSERT_EQ("en", collection->get_fields().at(0).locale);
delete collection;
}
TEST_F(CollectionManagerTest, GetAllCollections) {
std::vector<Collection*> collection_vec = collectionManager.get_collections().get();
ASSERT_EQ(1, collection_vec.size());
ASSERT_STREQ("collection1", collection_vec[0]->get_name().c_str());
// try creating one more collection
auto new_schema = R"({
"name": "collection2",
"fields": [
{"name": "title", "type": "string", "locale": "en"},
{"name": "points", "type": "int32"}
]
})"_json;
collectionManager.create_collection(new_schema);
collection_vec = collectionManager.get_collections().get();
ASSERT_EQ(2, collection_vec.size());
// most recently created collection first
ASSERT_STREQ("collection2", collection_vec[0]->get_name().c_str());
ASSERT_STREQ("collection1", collection_vec[1]->get_name().c_str());
collectionManager.drop_collection("collection2");
}
TEST_F(CollectionManagerTest, RestoreRecordsOnRestart) {
std::ifstream infile(std::string(ROOT_DIR)+"test/multi_field_documents.jsonl");
std::string json_line;
while (std::getline(infile, json_line)) {
auto op = collection1->add(json_line);
if (!op.ok()) {
LOG(INFO) << op.error();
}
ASSERT_TRUE(op.ok());
}
infile.close();
// add some overrides
nlohmann::json override_json_include = {
{"id", "include-rule"},
{
"rule", {
{"query", "in"},
{"match", override_t::MATCH_EXACT}
}
}
};
override_json_include["includes"] = nlohmann::json::array();
override_json_include["includes"][0] = nlohmann::json::object();
override_json_include["includes"][0]["id"] = "0";
override_json_include["includes"][0]["position"] = 1;
override_json_include["includes"][1] = nlohmann::json::object();
override_json_include["includes"][1]["id"] = "3";
override_json_include["includes"][1]["position"] = 2;
override_t override_include;
override_t::parse(override_json_include, "", override_include);
nlohmann::json override_json = {
{"id", "exclude-rule"},
{
"rule", {
{"query", "of"},
{"match", override_t::MATCH_EXACT}
}
}
};
override_json["excludes"] = nlohmann::json::array();
override_json["excludes"][0] = nlohmann::json::object();
override_json["excludes"][0]["id"] = "4";
override_json["excludes"][1] = nlohmann::json::object();
override_json["excludes"][1]["id"] = "11";
override_t override_exclude;
override_t::parse(override_json, "", override_exclude);
nlohmann::json override_json_deleted = {
{"id", "deleted-rule"},
{
"rule", {
{"query", "of"},
{"match", override_t::MATCH_EXACT}
}
}
};
override_json_deleted["excludes"] = nlohmann::json::array();
override_json_deleted["excludes"][0] = nlohmann::json::object();
override_json_deleted["excludes"][0]["id"] = "11";
override_t override_deleted;
override_t::parse(override_json_deleted, "", override_deleted);
collection1->add_override(override_include);
collection1->add_override(override_exclude);
collection1->add_override(override_deleted);
collection1->remove_override("deleted-rule");
// make some synonym operation
ASSERT_TRUE(collection1->add_synonym(R"({"id": "id1", "root": "smart phone", "synonyms": ["iphone"]})"_json).ok());
ASSERT_TRUE(collection1->add_synonym(R"({"id": "id2", "root": "mobile phone", "synonyms": ["samsung phone"]})"_json).ok());
ASSERT_TRUE(collection1->add_synonym(R"({"id": "id3", "synonyms": ["football", "foot ball"]})"_json).ok());
collection1->remove_synonym("id2");
std::vector<std::string> search_fields = {"starring", "title"};
std::vector<std::string> facets;
nlohmann::json results = collection1->search("thomas", search_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(4, results["hits"].size());
tsl::htrie_map<char, field> schema = collection1->get_schema();
ASSERT_EQ(schema.count("product_id_sequence_id"), 1);
auto products_schema_json =
R"({
"name": "Products",
"fields": [
{"name": "product_id", "type": "string"},
{"name": "product_name", "type": "string"},
{"name": "product_description", "type": "string"}
]
})"_json;
auto const& collection_create_op = collectionManager.create_collection(products_schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto async_ref_fields = collection_create_op.get()->get_async_referenced_ins();
ASSERT_EQ(1, async_ref_fields.size());
ASSERT_EQ(1, async_ref_fields.count("product_id"));
ASSERT_EQ(1, async_ref_fields["product_id"].size());
ASSERT_EQ("collection1", async_ref_fields["product_id"].begin()->collection);
ASSERT_EQ("product_id", async_ref_fields["product_id"].begin()->field);
// recreate collection manager to ensure that it restores the records from the disk backed store
collectionManager.dispose();
delete store;
store = new Store("/tmp/typesense_test/coll_manager_test_db");
collectionManager.init(store, 1.0, "auth_key", quit);
auto load_op = collectionManager.load(8, 1000);
if(!load_op.ok()) {
LOG(ERROR) << load_op.error();
}
ASSERT_TRUE(load_op.ok());
collection1 = collectionManager.get_collection("collection1").get();
ASSERT_NE(nullptr, collection1);
std::vector<std::string> facet_fields_expected = {"cast"};
ASSERT_EQ(0, collection1->get_collection_id());
ASSERT_EQ(18, collection1->get_next_seq_id());
ASSERT_EQ(facet_fields_expected, collection1->get_facet_fields());
// product_id_sequence_id is also included
ASSERT_EQ(3, collection1->get_sort_fields().size());
ASSERT_EQ("location", collection1->get_sort_fields()[0].name);
ASSERT_EQ("product_id_sequence_id", collection1->get_sort_fields()[1].name);
ASSERT_EQ("points", collection1->get_sort_fields()[2].name);
ASSERT_EQ(schema.size(), collection1->get_schema().size());
ASSERT_EQ("points", collection1->get_default_sorting_field());
ASSERT_EQ(1, collection1->get_reference_fields().size());
ASSERT_EQ("Products", collection1->get_reference_fields().at("product_id").collection);
ASSERT_EQ("product_id", collection1->get_reference_fields().at("product_id").field);
auto restored_schema = collection1->get_schema();
ASSERT_EQ(true, restored_schema.at("cast").optional);
ASSERT_EQ(true, restored_schema.at("cast").facet);
ASSERT_EQ(false, restored_schema.at("title").facet);
ASSERT_EQ(false, restored_schema.at("title").optional);
ASSERT_EQ(false, restored_schema.at("not_stored").index);
ASSERT_TRUE(restored_schema.at("person").nested);
ASSERT_EQ(2, restored_schema.at("person").nested_array);
ASSERT_EQ(128, restored_schema.at("vec").num_dim);
ASSERT_EQ(restored_schema.count("product_id_sequence_id"), 1);
ASSERT_TRUE(collection1->get_enable_nested_fields());
ASSERT_EQ(2, collection1->get_overrides().get().size());
ASSERT_STREQ("exclude-rule", collection1->get_overrides().get()["exclude-rule"]->id.c_str());
ASSERT_STREQ("include-rule", collection1->get_overrides().get()["include-rule"]->id.c_str());
const auto& synonyms = collection1->get_synonyms().get();
ASSERT_EQ(2, synonyms.size());
ASSERT_STREQ("id1", synonyms.at(0)->id.c_str());
ASSERT_EQ(2, synonyms.at(0)->root.size());
ASSERT_EQ(1, synonyms.at(0)->synonyms.size());
ASSERT_STREQ("id3", synonyms.at(1)->id.c_str());
ASSERT_EQ(0, synonyms.at(1)->root.size());
ASSERT_EQ(2, synonyms.at(1)->synonyms.size());
std::vector<char> expected_symbols = {'+'};
std::vector<char> expected_separators = {'-'};
ASSERT_EQ(1, collection1->get_token_separators().size());
ASSERT_EQ('-', collection1->get_token_separators()[0]);
ASSERT_EQ(1, collection1->get_symbols_to_index().size());
ASSERT_EQ('+', collection1->get_symbols_to_index()[0]);
results = collection1->search("thomas", search_fields, "", facets, sort_fields, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(4, results["hits"].size());
async_ref_fields = collectionManager.get_collection("Products").get()->get_async_referenced_ins();
ASSERT_EQ(1, async_ref_fields.size());
ASSERT_EQ(1, async_ref_fields.count("product_id"));
ASSERT_EQ(1, async_ref_fields["product_id"].size());
ASSERT_EQ("collection1", async_ref_fields["product_id"].begin()->collection);
ASSERT_EQ("product_id", async_ref_fields["product_id"].begin()->field);
}
TEST_F(CollectionManagerTest, VerifyEmbeddedParametersOfScopedAPIKey) {
std::vector<field> fields = {field("title", field_types::STRING, false, false, true, "", -1, 1),
field("year", field_types::INT32, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "Tom Sawyer";
doc1["year"] = 1876;
doc1["points"] = 100;
nlohmann::json doc2;
doc2["id"] = "1";
doc2["title"] = "Tom Sawyer";
doc2["year"] = 1922;
doc2["points"] = 200;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
ASSERT_TRUE(coll1->add(doc2.dump()).ok());
auto results = coll1->search("*", {"title"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true}, 5).get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
std::map<std::string, std::string> req_params;
req_params["collection"] = "coll1";
req_params["q"] = "*";
nlohmann::json embedded_params;
embedded_params["filter_by"] = "points: 200";
std::string json_res;
auto now_ts = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
auto search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
nlohmann::json res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
ASSERT_STREQ("1", results["hits"][0]["document"]["id"].get<std::string>().c_str());
// existing filter should be augmented
req_params.clear();
req_params["collection"] = "coll1";
req_params["filter_by"] = "year: 1922";
req_params["q"] = "*";
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
res_obj = nlohmann::json::parse(json_res);
ASSERT_EQ(1, res_obj["found"].get<size_t>());
ASSERT_EQ(1, res_obj["hits"].size());
ASSERT_STREQ("1", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_EQ("(year: 1922) && (points: 200)", req_params["filter_by"]);
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionManagerTest, QuerySuggestionsShouldBeTrimmed) {
std::vector<field> fields = {field("title", field_types::STRING, false, false, true, "", -1, 1),
field("year", field_types::INT32, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "Tom Sawyer";
doc1["year"] = 1876;
doc1["points"] = 100;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
Config::get_instance().set_enable_search_analytics(true);
nlohmann::json analytics_rule = R"({
"name": "top_search_queries",
"type": "popular_queries",
"params": {
"limit": 100,
"source": {
"collections": ["coll1"],
"events": [{"type": "search", "name": "coll_search"}]
},
"destination": {
"collection": "top_queries"
}
}
})"_json;
auto create_op = AnalyticsManager::get_instance().create_rule(analytics_rule, false, true);
ASSERT_TRUE(create_op.ok());
nlohmann::json embedded_params;
std::map<std::string, std::string> req_params;
req_params["collection"] = "coll1";
req_params["q"] = " tom ";
req_params["query_by"] = "title";
std::string json_res;
auto now_ts = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
auto search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
json_res.clear();
req_params["q"] = " ";
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
// check that suggestions have been trimmed
auto popular_queries = AnalyticsManager::get_instance().get_popular_queries();
ASSERT_EQ(2, popular_queries["top_queries"]->get_user_prefix_queries()[""].size());
ASSERT_EQ("tom", popular_queries["top_queries"]->get_user_prefix_queries()[""][0].query);
ASSERT_EQ("", popular_queries["top_queries"]->get_user_prefix_queries()[""][1].query);
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionManagerTest, NoHitsQueryAggregation) {
std::vector<field> fields = {field("title", field_types::STRING, false, false, true, "", -1, 1),
field("year", field_types::INT32, false),
field("points", field_types::INT32, false),};
Collection* coll1 = collectionManager.create_collection("coll1", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "Tom Sawyer";
doc1["year"] = 1876;
doc1["points"] = 100;
ASSERT_TRUE(coll1->add(doc1.dump()).ok());
Config::get_instance().set_enable_search_analytics(true);
nlohmann::json analytics_rule = R"({
"name": "nohits_search_queries",
"type": "nohits_queries",
"params": {
"limit": 100,
"source": {
"collections": ["coll1"]
},
"destination": {
"collection": "nohits_queries"
}
}
})"_json;
auto create_op = AnalyticsManager::get_instance().create_rule(analytics_rule, false, true);
ASSERT_TRUE(create_op.ok());
nlohmann::json embedded_params;
std::map<std::string, std::string> req_params;
req_params["collection"] = "coll1";
req_params["q"] = "foobarbaz";
req_params["query_by"] = "title";
std::string json_res;
auto now_ts = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
auto search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
// check that no hits queries have been populated
auto nohits_queries = AnalyticsManager::get_instance().get_nohits_queries();
ASSERT_EQ(1, nohits_queries["nohits_queries"]->get_user_prefix_queries()[""].size());
ASSERT_EQ("foobarbaz", nohits_queries["nohits_queries"]->get_user_prefix_queries()[""][0].query);
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionManagerTest, RestoreAutoSchemaDocsOnRestart) {
Collection *coll1;
std::ifstream infile(std::string(ROOT_DIR)+"test/optional_fields.jsonl");
std::vector<field> fields = {
field("max", field_types::INT32, false)
};
coll1 = collectionManager.get_collection("coll1").get();
if(coll1 == nullptr) {
coll1 = collectionManager.create_collection("coll1", 1, fields, "max", 0, field_types::AUTO).get();
}
std::string json_line;
while (std::getline(infile, json_line)) {
nlohmann::json document = nlohmann::json::parse(json_line);
Option<nlohmann::json> add_op = coll1->add(document.dump());
ASSERT_TRUE(add_op.ok());
}
infile.close();
ASSERT_EQ(1, coll1->get_collection_id());
ASSERT_EQ(3, coll1->get_sort_fields().size());
// index a document with a 2 bad field values with COERCE_OR_DROP setting
// `title` is an integer and `average` is a string
auto doc_json = R"({"title": 12345, "max": 25, "scores": [22, "how", 44],
"average": "bad data", "is_valid": true})";
Option<nlohmann::json> add_op = coll1->add(doc_json, CREATE, "", DIRTY_VALUES::COERCE_OR_DROP);
ASSERT_TRUE(add_op.ok());
tsl::htrie_map<char, field> schema = collection1->get_schema();
// create a new collection manager to ensure that it restores the records from the disk backed store
CollectionManager & collectionManager2 = CollectionManager::get_instance();
collectionManager2.init(store, 1.0, "auth_key", quit);
auto load_op = collectionManager2.load(8, 1000);
if(!load_op.ok()) {
LOG(ERROR) << load_op.error();
}
ASSERT_TRUE(load_op.ok());
auto restored_coll = collectionManager2.get_collection("coll1").get();
ASSERT_NE(nullptr, restored_coll);
std::vector<std::string> facet_fields_expected = {};
auto restored_schema = restored_coll->get_schema();
ASSERT_EQ(1, restored_coll->get_collection_id());
ASSERT_EQ(7, restored_coll->get_next_seq_id());
ASSERT_EQ(7, restored_coll->get_num_documents());
ASSERT_EQ(facet_fields_expected, restored_coll->get_facet_fields());
ASSERT_EQ(3, restored_coll->get_sort_fields().size());
ASSERT_EQ("average", restored_coll->get_sort_fields()[0].name);
ASSERT_EQ("is_valid", restored_coll->get_sort_fields()[1].name);
ASSERT_EQ("max", restored_coll->get_sort_fields()[2].name);
// ensures that the "id" field is not added to the schema
ASSERT_EQ(6, restored_schema.size());
ASSERT_EQ("max", restored_coll->get_default_sorting_field());
ASSERT_EQ(1, restored_schema.count("title"));
ASSERT_EQ(1, restored_schema.count("max"));
ASSERT_EQ(1, restored_schema.count("description"));
ASSERT_EQ(1, restored_schema.count("scores"));
ASSERT_EQ(1, restored_schema.count("average"));
ASSERT_EQ(1, restored_schema.count("is_valid"));
// all detected schema are optional fields, while defined schema is not
for(const auto& a_field: restored_schema) {
if(a_field.name == "max") {
ASSERT_FALSE(a_field.optional);
} else {
ASSERT_TRUE(a_field.optional);
}
}
// try searching for record with bad data
auto results = restored_coll->search("12345", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(1, results["hits"].size());
// int to string conversion should be done for `title` while `average` field must be dropped
ASSERT_STREQ("12345", results["hits"][0]["document"]["title"].get<std::string>().c_str());
ASSERT_EQ(0, results["hits"][0]["document"].count("average"));
ASSERT_EQ(2, results["hits"][0]["document"]["scores"].size());
ASSERT_EQ(22, results["hits"][0]["document"]["scores"][0]);
ASSERT_EQ(44, results["hits"][0]["document"]["scores"][1]);
// try sorting on `average`, a field that not all records have
ASSERT_EQ(7, restored_coll->get_num_documents());
sort_fields = { sort_by("average", "DESC") };
results = restored_coll->search("*", {"title"}, "", {}, {sort_fields}, {0}, 10, 1, FREQUENCY, {false}).get();
ASSERT_EQ(7, results["hits"].size());
collectionManager.drop_collection("coll1");
collectionManager2.drop_collection("coll1");
}
TEST_F(CollectionManagerTest, RestorePresetsOnRestart) {
auto preset_value = R"(
{"q":"*", "per_page": "12"}
)"_json;
collectionManager.upsert_preset("single_preset", preset_value);
// create a new collection manager to ensure that it restores the records from the disk backed store
CollectionManager& collectionManager2 = CollectionManager::get_instance();
collectionManager2.init(store, 1.0, "auth_key", quit);
auto load_op = collectionManager2.load(8, 1000);
if(!load_op.ok()) {
LOG(ERROR) << load_op.error();
}
ASSERT_TRUE(load_op.ok());
nlohmann::json preset;
collectionManager2.get_preset("single_preset", preset);
ASSERT_EQ("*", preset["q"].get<std::string>());
collectionManager.drop_collection("coll1");
collectionManager2.drop_collection("coll1");
}
TEST_F(CollectionManagerTest, RestoreNestedDocsOnRestart) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "details", "type": "object[]" },
{"name": "company.name", "type": "string" },
{"name": "person", "type": "object"}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"details": [{"tags": ["foobar"]}],
"company": {"name": "Foobar Corp"},
"person": {"first_name": "Foobar"}
})"_json;
ASSERT_TRUE(coll1->add(doc1.dump(), CREATE).ok());
auto res_op = coll1->search("foobar", {"details"}, "", {}, {}, {0}, 10, 1,
token_ordering::FREQUENCY, {true});
ASSERT_TRUE(res_op.ok());
ASSERT_EQ(1, res_op.get()["found"].get<size_t>());
res_op = coll1->search("foobar", {"company.name"}, "", {}, {}, {0}, 10, 1,
token_ordering::FREQUENCY, {true});
ASSERT_TRUE(res_op.ok());
ASSERT_EQ(1, res_op.get()["found"].get<size_t>());
res_op = coll1->search("foobar", {"person"}, "", {}, {}, {0}, 10, 1,
token_ordering::FREQUENCY, {true});
ASSERT_TRUE(res_op.ok());
ASSERT_EQ(1, res_op.get()["found"].get<size_t>());
// create a new collection manager to ensure that it restores the records from the disk backed store
CollectionManager& collectionManager2 = CollectionManager::get_instance();
collectionManager2.init(store, 1.0, "auth_key", quit);
auto load_op = collectionManager2.load(8, 1000);
if(!load_op.ok()) {
LOG(ERROR) << load_op.error();
}
ASSERT_TRUE(load_op.ok());
auto restored_coll = collectionManager2.get_collection("coll1").get();
ASSERT_NE(nullptr, restored_coll);
res_op = restored_coll->search("foobar", {"details"}, "", {}, {}, {0}, 10, 1,
token_ordering::FREQUENCY, {true});
ASSERT_TRUE(res_op.ok());
ASSERT_EQ(1, res_op.get()["found"].get<size_t>());
res_op = restored_coll->search("foobar", {"company.name"}, "", {}, {}, {0}, 10, 1,
token_ordering::FREQUENCY, {true});
ASSERT_TRUE(res_op.ok());
ASSERT_EQ(1, res_op.get()["found"].get<size_t>());
res_op = restored_coll->search("foobar", {"person"}, "", {}, {}, {0}, 10, 1,
token_ordering::FREQUENCY, {true});
ASSERT_TRUE(res_op.ok());
ASSERT_EQ(1, res_op.get()["found"].get<size_t>());
collectionManager.drop_collection("coll1");
collectionManager2.drop_collection("coll1");
}
TEST_F(CollectionManagerTest, RestoreCoercedDocValuesOnRestart) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "product", "type": "object" },
{"name": "product.price", "type": "int64" }
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
auto doc1 = R"({
"product": {"price": 45.78}
})"_json;
auto create_op = coll1->add(doc1.dump(), CREATE);
ASSERT_TRUE(create_op.ok());
auto res_op = coll1->search("*", {}, "product.price:>0", {}, {}, {0}, 10, 1,
token_ordering::FREQUENCY, {true});
ASSERT_TRUE(res_op.ok());
ASSERT_EQ(1, res_op.get()["found"].get<size_t>());
// create a new collection manager to ensure that it restores the records from the disk backed store
CollectionManager& collectionManager2 = CollectionManager::get_instance();
collectionManager2.init(store, 1.0, "auth_key", quit);
auto load_op = collectionManager2.load(8, 1000);
if(!load_op.ok()) {
LOG(ERROR) << load_op.error();
}
ASSERT_TRUE(load_op.ok());
auto restored_coll = collectionManager2.get_collection("coll1").get();
ASSERT_NE(nullptr, restored_coll);
res_op = restored_coll->search("*", {}, "product.price:>0", {}, {}, {0}, 10, 1,
token_ordering::FREQUENCY, {true});
ASSERT_TRUE(res_op.ok());
ASSERT_EQ(1, res_op.get()["found"].get<size_t>());
collectionManager.drop_collection("coll1");
collectionManager2.drop_collection("coll1");
}
TEST_F(CollectionManagerTest, DropCollectionCleanly) {
std::ifstream infile(std::string(ROOT_DIR)+"test/multi_field_documents.jsonl");
std::string json_line;
while (std::getline(infile, json_line)) {
collection1->add(json_line);
}
infile.close();
ASSERT_FALSE(nullptr == collectionManager.get_collection_with_id(0).get());
ASSERT_FALSE(nullptr == collectionManager.get_collection("collection1").get());
collectionManager.drop_collection("collection1");
rocksdb::Iterator* it = store->get_iterator();
size_t num_keys = 0;
for (it->SeekToFirst(); it->Valid(); it->Next()) {
ASSERT_EQ(it->key().ToString(), "$CI");
num_keys += 1;
}
ASSERT_EQ(1, num_keys);
ASSERT_TRUE(it->status().ok());
ASSERT_EQ(nullptr, collectionManager.get_collection("collection1").get());
ASSERT_EQ(nullptr, collectionManager.get_collection_with_id(0).get());
ASSERT_EQ(1, collectionManager.get_next_collection_id());
delete it;
}
TEST_F(CollectionManagerTest, AuthWithMultiSearchKeys) {
api_key_t key1("api_key", "some key", {"documents:create"}, {"foo"}, 64723363199);
collectionManager.getAuthManager().create_key(key1);
std::vector<collection_key_t> collection_keys = {
collection_key_t("foo", "api_key")
};
std::vector<nlohmann::json> embedded_params_vec = { nlohmann::json::object() };
std::map<std::string, std::string> params;
// empty req auth key (present in header / GET param)
ASSERT_TRUE(collectionManager.auth_key_matches("", "documents:create", collection_keys, params,
embedded_params_vec));
// should work with bootstrap key
collection_keys = {
collection_key_t("foo", "auth_key")
};
ASSERT_TRUE(collectionManager.auth_key_matches("", "documents:create", collection_keys, params,
embedded_params_vec));
// bad key
collection_keys = {
collection_key_t("foo", "")
};
ASSERT_FALSE(collectionManager.auth_key_matches("", "documents:create", collection_keys, params,
embedded_params_vec));
}
TEST_F(CollectionManagerTest, Symlinking) {
CollectionManager & cmanager = CollectionManager::get_instance();
std::string state_dir_path = "/tmp/typesense_test/cmanager_test_db";
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
Store *new_store = new Store(state_dir_path);
cmanager.init(new_store, 1.0, "auth_key", quit);
cmanager.load(8, 1000);
// try resolving on a blank slate
Option<std::string> collection_option = cmanager.resolve_symlink("collection");
ASSERT_FALSE(collection_option.ok());
ASSERT_EQ(404, collection_option.code());
ASSERT_EQ(0, cmanager.get_symlinks().size());
// symlink name cannot be the same as an existing collection
Option<bool> inserted = cmanager.upsert_symlink("collection1", "collection_2018");
ASSERT_FALSE(inserted.ok());
ASSERT_STREQ("Name `collection1` conflicts with an existing collection name.", inserted.error().c_str());
// insert a symlink
inserted = cmanager.upsert_symlink("collection_link", "collection_2018");
ASSERT_TRUE(inserted.ok());
collection_option = cmanager.resolve_symlink("collection_link");
ASSERT_TRUE(collection_option.ok());
ASSERT_EQ("collection_2018", collection_option.get());
// let's try inserting another symlink
cmanager.upsert_symlink("company", "company_2018");
collection_option = cmanager.resolve_symlink("company");
ASSERT_TRUE(collection_option.ok());
ASSERT_EQ("company_2018", collection_option.get());
ASSERT_EQ(2, cmanager.get_symlinks().size());
// update existing symlink
inserted = cmanager.upsert_symlink("company", "company_2019");
ASSERT_TRUE(inserted.ok());
collection_option = cmanager.resolve_symlink("company");
ASSERT_TRUE(collection_option.ok());
ASSERT_EQ("company_2019", collection_option.get());
// add and update a symlink against an existing collection
inserted = cmanager.upsert_symlink("collection1_link", "collection1");
ASSERT_TRUE(inserted.ok());
collection_option = cmanager.resolve_symlink("collection1_link");
ASSERT_TRUE(collection_option.ok());
ASSERT_EQ("collection1", collection_option.get());
// try to drop a collection using the alias `collection1_link`
auto drop_op = cmanager.drop_collection("collection1_link");
ASSERT_TRUE(drop_op.ok());
// try to list collections now
nlohmann::json summaries = cmanager.get_collection_summaries().get();
ASSERT_EQ(0, summaries.size());
// remap alias to another non-existing collection
inserted = cmanager.upsert_symlink("collection1_link", "collection2");
ASSERT_TRUE(inserted.ok());
collection_option = cmanager.resolve_symlink("collection1_link");
ASSERT_TRUE(collection_option.ok());
ASSERT_EQ("collection2", collection_option.get());
// remove link
Option<bool> deleted = cmanager.delete_symlink("collection");
ASSERT_TRUE(deleted.ok());
collection_option = cmanager.resolve_symlink("collection");
ASSERT_FALSE(collection_option.ok());
ASSERT_EQ(404, collection_option.code());
// try adding a few more symlinks
cmanager.upsert_symlink("company_1", "company_2018");
cmanager.upsert_symlink("company_2", "company_2019");
cmanager.upsert_symlink("company_3", "company_2020");
// should be able to restore state on init
CollectionManager & cmanager2 = CollectionManager::get_instance();
cmanager2.init(store, 1.0, "auth_key", quit);
cmanager2.load(8, 1000);
collection_option = cmanager2.resolve_symlink("company");
ASSERT_TRUE(collection_option.ok());
ASSERT_EQ("company_2019", collection_option.get());
collection_option = cmanager2.resolve_symlink("company_1");
ASSERT_TRUE(collection_option.ok());
ASSERT_EQ("company_2018", collection_option.get());
collection_option = cmanager2.resolve_symlink("company_3");
ASSERT_TRUE(collection_option.ok());
ASSERT_EQ("company_2020", collection_option.get());
delete new_store;
}
TEST_F(CollectionManagerTest, LoadMultipleCollections) {
// to prevent fixture tear down from running as we are fudging with CollectionManager singleton
collectionManager.dispose();
delete store;
store = nullptr;
CollectionManager & cmanager = CollectionManager::get_instance();
std::string state_dir_path = "/tmp/typesense_test/cmanager_test_db";
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
Store *new_store = new Store(state_dir_path);
cmanager.init(new_store, 1.0, "auth_key", quit);
cmanager.load(8, 1000);
for(size_t i = 0; i < 100; i++) {
auto schema = {
field("title", field_types::STRING, false),
field("starring", field_types::STRING, false),
field("cast", field_types::STRING_ARRAY, true, true),
field(".*_year", field_types::INT32, true, true),
field("location", field_types::GEOPOINT, false, true, true),
field("points", field_types::INT32, false)
};
cmanager.create_collection("collection" + std::to_string(i), 4, schema, "points").get();
}
ASSERT_EQ(100, cmanager.get_collections().get().size());
cmanager.dispose();
delete new_store;
new_store = new Store(state_dir_path);
cmanager.init(new_store, 1.0, "auth_key", quit);
cmanager.load(8, 1000);
ASSERT_EQ(100, cmanager.get_collections().get().size());
for(size_t i = 0; i < 100; i++) {
collectionManager.drop_collection("collection" + std::to_string(i));
}
collectionManager.dispose();
delete new_store;
}
TEST_F(CollectionManagerTest, ParseSortByClause) {
std::vector<sort_by> sort_fields;
bool sort_by_parsed = CollectionManager::parse_sort_by_str("points:desc,loc(24.56,10.45):ASC", sort_fields);
ASSERT_TRUE(sort_by_parsed);
ASSERT_STREQ("points", sort_fields[0].name.c_str());
ASSERT_STREQ("DESC", sort_fields[0].order.c_str());
ASSERT_STREQ("loc(24.56,10.45)", sort_fields[1].name.c_str());
ASSERT_STREQ("ASC", sort_fields[1].order.c_str());
sort_fields.clear();
sort_by_parsed = CollectionManager::parse_sort_by_str(" points:desc , loc(24.56,10.45):ASC", sort_fields);
ASSERT_TRUE(sort_by_parsed);
ASSERT_STREQ("points", sort_fields[0].name.c_str());
ASSERT_STREQ("DESC", sort_fields[0].order.c_str());
ASSERT_STREQ("loc(24.56,10.45)", sort_fields[1].name.c_str());
ASSERT_STREQ("ASC", sort_fields[1].order.c_str());
sort_fields.clear();
sort_by_parsed = CollectionManager::parse_sort_by_str(" loc(24.56,10.45):ASC, points: desc ", sort_fields);
ASSERT_TRUE(sort_by_parsed);
ASSERT_STREQ("loc(24.56,10.45)", sort_fields[0].name.c_str());
ASSERT_STREQ("ASC", sort_fields[0].order.c_str());
ASSERT_STREQ("points", sort_fields[1].name.c_str());
ASSERT_STREQ("DESC", sort_fields[1].order.c_str());
sort_fields.clear();
sort_by_parsed = CollectionManager::parse_sort_by_str(" location(48.853, 2.344, exclude_radius: 2mi):asc,popularity:desc", sort_fields);
ASSERT_TRUE(sort_by_parsed);
ASSERT_EQ("location(48.853, 2.344, exclude_radius: 2mi)", sort_fields[0].name);
ASSERT_STREQ("ASC", sort_fields[0].order.c_str());
sort_fields.clear();
sort_by_parsed = CollectionManager::parse_sort_by_str(" location(48.853, 2.344, precision: 2mi):asc,popularity:desc", sort_fields);
ASSERT_TRUE(sort_by_parsed);
ASSERT_EQ("location(48.853, 2.344, precision: 2mi)", sort_fields[0].name);
ASSERT_STREQ("ASC", sort_fields[0].order.c_str());
sort_fields.clear();
sort_by_parsed = CollectionManager::parse_sort_by_str(" _text_match(buckets: 10):ASC, points:desc ", sort_fields);
ASSERT_TRUE(sort_by_parsed);
ASSERT_EQ("_text_match(buckets: 10)", sort_fields[0].name);
ASSERT_EQ("ASC", sort_fields[0].order);
sort_fields.clear();
sort_by_parsed = CollectionManager::parse_sort_by_str("_eval(brand:nike && foo:bar):DESC,points:desc ", sort_fields);
ASSERT_TRUE(sort_by_parsed);
ASSERT_EQ("_eval", sort_fields[0].name);
ASSERT_FALSE(sort_fields[0].eval_expressions.empty());
ASSERT_EQ("brand:nike && foo:bar", sort_fields[0].eval_expressions[0]);
ASSERT_EQ(1, sort_fields[0].eval.scores.size());
ASSERT_EQ(1, sort_fields[0].eval.scores[0]);
ASSERT_EQ("DESC", sort_fields[0].order);
ASSERT_EQ("points", sort_fields[1].name);
ASSERT_EQ("DESC", sort_fields[1].order);
sort_fields.clear();
sort_by_parsed = CollectionManager::parse_sort_by_str("_eval([(brand:nike || brand:air):3, (brand:adidas):2]):DESC", sort_fields);
ASSERT_TRUE(sort_by_parsed);
ASSERT_EQ("_eval", sort_fields[0].name);
ASSERT_EQ(2, sort_fields[0].eval_expressions.size());
ASSERT_EQ("brand:nike || brand:air", sort_fields[0].eval_expressions[0]);
ASSERT_EQ("brand:adidas", sort_fields[0].eval_expressions[1]);
ASSERT_EQ(2, sort_fields[0].eval.scores.size());
ASSERT_EQ(3, sort_fields[0].eval.scores[0]);
ASSERT_EQ(2, sort_fields[0].eval.scores[1]);
ASSERT_EQ("DESC", sort_fields[0].order);
sort_fields.clear();
sort_by_parsed = CollectionManager::parse_sort_by_str("points:desc, loc(24.56,10.45):ASC, "
"$Customers(product_price:DESC)", sort_fields);
ASSERT_TRUE(sort_by_parsed);
ASSERT_EQ(3, sort_fields.size());
ASSERT_EQ("points", sort_fields[0].name);
ASSERT_EQ("DESC", sort_fields[0].order);
ASSERT_EQ("loc(24.56,10.45)", sort_fields[1].name);
ASSERT_EQ("ASC", sort_fields[1].order);
ASSERT_EQ("$Customers(product_price:DESC)", sort_fields[2].name);
sort_fields.clear();
sort_by_parsed = CollectionManager::parse_sort_by_str("_eval(brand:nike && foo:bar):DESC, "
"$Customers(product_price:DESC)", sort_fields);
ASSERT_TRUE(sort_by_parsed);
ASSERT_EQ(2, sort_fields.size());
ASSERT_EQ("_eval", sort_fields[0].name);
ASSERT_FALSE(sort_fields[0].eval_expressions.empty());
ASSERT_EQ("brand:nike && foo:bar", sort_fields[0].eval_expressions[0]);
ASSERT_EQ(1, sort_fields[0].eval.scores.size());
ASSERT_EQ(1, sort_fields[0].eval.scores[0]);
ASSERT_EQ("DESC", sort_fields[0].order);
ASSERT_EQ("$Customers(product_price:DESC)", sort_fields[1].name);
sort_fields.clear();
sort_by_parsed = CollectionManager::parse_sort_by_str("$foo(bar:ASC), "
"$Customers(product_price:DESC)", sort_fields);
ASSERT_TRUE(sort_by_parsed);
ASSERT_EQ(2, sort_fields.size());
ASSERT_EQ("$foo(bar:ASC)", sort_fields[0].name);
ASSERT_EQ("$Customers(product_price:DESC)", sort_fields[1].name);
sort_fields.clear();
sort_by_parsed = CollectionManager::parse_sort_by_str("$foo( _eval(brand:nike && foo:bar):DESC,points:desc) ",
sort_fields);
ASSERT_TRUE(sort_by_parsed);
ASSERT_EQ("$foo( _eval(brand:nike && foo:bar):DESC,points:desc)", sort_fields[0].name);
sort_fields.clear();
sort_by_parsed = CollectionManager::parse_sort_by_str("$Customers(product_price:DESC, $foo(bar:asc))", sort_fields);
ASSERT_TRUE(sort_by_parsed);
ASSERT_EQ(2, sort_fields.size());
ASSERT_EQ("$Customers(product_price:DESC, )", sort_fields[0].name);
ASSERT_EQ("$foo(bar:asc)", sort_fields[1].name);
ASSERT_TRUE(sort_fields[1].is_nested_join_sort_by());
ASSERT_EQ(2, sort_fields[1].nested_join_collection_names.size());
ASSERT_EQ("Customers", sort_fields[1].nested_join_collection_names[0]);
ASSERT_EQ("foo", sort_fields[1].nested_join_collection_names[1]);
sort_fields.clear();
sort_by_parsed = CollectionManager::parse_sort_by_str("$foo($bar($baz(field:asc)))", sort_fields);
ASSERT_TRUE(sort_by_parsed);
ASSERT_EQ(1, sort_fields.size());
ASSERT_EQ("$baz(field:asc)", sort_fields[0].name);
ASSERT_TRUE(sort_fields[0].is_nested_join_sort_by());
ASSERT_EQ(3, sort_fields[0].nested_join_collection_names.size());
ASSERT_EQ("foo", sort_fields[0].nested_join_collection_names[0]);
ASSERT_EQ("bar", sort_fields[0].nested_join_collection_names[1]);
ASSERT_EQ("baz", sort_fields[0].nested_join_collection_names[2]);
sort_fields.clear();
sort_by_parsed = CollectionManager::parse_sort_by_str("$Customers(product_price:DESC, $foo($bar( _eval(brand:nike && foo:bar):DESC), baz:asc))", sort_fields);
ASSERT_TRUE(sort_by_parsed);
ASSERT_EQ(3, sort_fields.size());
ASSERT_EQ("$Customers(product_price:DESC, )", sort_fields[0].name);
ASSERT_EQ("$bar( _eval(brand:nike && foo:bar):DESC)", sort_fields[1].name);
ASSERT_TRUE(sort_fields[1].is_nested_join_sort_by());
ASSERT_EQ(3, sort_fields[1].nested_join_collection_names.size());
ASSERT_EQ("Customers", sort_fields[1].nested_join_collection_names[0]);
ASSERT_EQ("foo", sort_fields[1].nested_join_collection_names[1]);
ASSERT_EQ("bar", sort_fields[1].nested_join_collection_names[2]);
ASSERT_EQ("$foo(baz:asc)", sort_fields[2].name);
ASSERT_TRUE(sort_fields[2].is_nested_join_sort_by());
ASSERT_EQ(2, sort_fields[2].nested_join_collection_names.size());
ASSERT_EQ("Customers", sort_fields[2].nested_join_collection_names[0]);
ASSERT_EQ("foo", sort_fields[2].nested_join_collection_names[1]);
sort_fields.clear();
sort_by_parsed = CollectionManager::parse_sort_by_str("", sort_fields);
ASSERT_TRUE(sort_by_parsed);
ASSERT_EQ(0, sort_fields.size());
sort_fields.clear();
sort_by_parsed = CollectionManager::parse_sort_by_str("foobar:", sort_fields);
ASSERT_FALSE(sort_by_parsed);
sort_fields.clear();
sort_by_parsed = CollectionManager::parse_sort_by_str("foobar:,bar:desc", sort_fields);
ASSERT_FALSE(sort_by_parsed);
sort_fields.clear();
sort_by_parsed = CollectionManager::parse_sort_by_str(",", sort_fields);
ASSERT_FALSE(sort_by_parsed);
sort_fields.clear();
sort_by_parsed = CollectionManager::parse_sort_by_str(",,", sort_fields);
ASSERT_FALSE(sort_by_parsed);
}
TEST_F(CollectionManagerTest, Presets) {
// try getting on a blank slate
auto presets = collectionManager.get_presets();
ASSERT_TRUE(presets.empty());
// insert some presets
nlohmann::json preset_obj;
preset_obj["query_by"] = "foo";
collectionManager.upsert_preset("preset1", preset_obj);
preset_obj["query_by"] = "bar";
collectionManager.upsert_preset("preset2", preset_obj);
ASSERT_EQ(2, collectionManager.get_presets().size());
// try fetching individual presets
nlohmann::json preset;
auto preset_op = collectionManager.get_preset("preset1", preset);
ASSERT_TRUE(preset_op.ok());
ASSERT_EQ(1, preset.size());
ASSERT_EQ("foo", preset["query_by"]);
preset.clear();
preset_op = collectionManager.get_preset("preset2", preset);
ASSERT_TRUE(preset_op.ok());
ASSERT_EQ(1, preset.size());
ASSERT_EQ("bar", preset["query_by"]);
// delete a preset
auto del_op = collectionManager.delete_preset("preset2");
ASSERT_TRUE(del_op.ok());
std::string val;
auto status = store->get(CollectionManager::get_preset_key("preset2"), val);
ASSERT_EQ(StoreStatus::NOT_FOUND, status);
ASSERT_EQ(1, collectionManager.get_presets().size());
preset.clear();
preset_op = collectionManager.get_preset("preset2", preset);
ASSERT_FALSE(preset_op.ok());
ASSERT_EQ(404, preset_op.code());
// should be able to restore state on init
collectionManager.dispose();
delete store;
store = new Store("/tmp/typesense_test/coll_manager_test_db");
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
ASSERT_EQ(1, collectionManager.get_presets().size());
preset.clear();
preset_op = collectionManager.get_preset("preset1", preset);
ASSERT_TRUE(preset_op.ok());
}
TEST_F(CollectionManagerTest, CloneCollection) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"}
],
"symbols_to_index":["+"],
"token_separators":["-", "?"]
})"_json;
auto create_op = collectionManager.create_collection(schema);
ASSERT_TRUE(create_op.ok());
auto coll1 = create_op.get();
nlohmann::json synonym1 = R"({
"id": "ipod-synonyms",
"synonyms": ["ipod", "i pod", "pod"]
})"_json;
ASSERT_TRUE(coll1->add_synonym(synonym1).ok());
nlohmann::json override_json = {
{"id", "dynamic-cat-filter"},
{
"rule", {
{"query", "{categories}"},
{"match", override_t::MATCH_EXACT}
}
},
{"remove_matched_tokens", true},
{"filter_by", "category: {categories}"}
};
override_t override;
auto op = override_t::parse(override_json, "dynamic-cat-filter", override);
ASSERT_TRUE(op.ok());
coll1->add_override(override);
nlohmann::json req = R"({"name": "coll2"})"_json;
collectionManager.clone_collection("coll1", req);
auto coll2 = collectionManager.get_collection_unsafe("coll2");
ASSERT_FALSE(coll2 == nullptr);
ASSERT_EQ("coll2", coll2->get_name());
ASSERT_EQ(1, coll2->get_fields().size());
ASSERT_EQ(1, coll2->get_synonyms().get().size());
ASSERT_EQ(1, coll2->get_overrides().get().size());
ASSERT_EQ("", coll2->get_fallback_field_type());
ASSERT_EQ(1, coll2->get_symbols_to_index().size());
ASSERT_EQ(2, coll2->get_token_separators().size());
ASSERT_EQ('+', coll2->get_symbols_to_index().at(0));
ASSERT_EQ('-', coll2->get_token_separators().at(0));
ASSERT_EQ('?', coll2->get_token_separators().at(1));
}
TEST_F(CollectionManagerTest, ReferencedInBacklog) {
auto referenced_ins_backlog = collectionManager._get_referenced_in_backlog();
ASSERT_EQ(1, referenced_ins_backlog.count("Products"));
auto const& references = referenced_ins_backlog.at("Products");
ASSERT_EQ(1, references.size());
ASSERT_EQ("collection1", references.cbegin()->collection);
ASSERT_EQ("product_id", references.cbegin()->field);
auto schema_json =
R"({
"name": "Products",
"fields": [
{"name": "product_id", "type": "string"},
{"name": "name", "type": "string"}
]
})"_json;
auto create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(create_op.ok());
referenced_ins_backlog = collectionManager._get_referenced_in_backlog();
ASSERT_EQ(0, referenced_ins_backlog.count("Products"));
auto get_reference_field_op = create_op.get()->get_referenced_in_field_with_lock("collection1");
ASSERT_TRUE(get_reference_field_op.ok());
ASSERT_EQ("product_id", get_reference_field_op.get());
get_reference_field_op = create_op.get()->get_referenced_in_field_with_lock("foo");
ASSERT_FALSE(get_reference_field_op.ok());
ASSERT_EQ("Could not find any field in `Products` referencing the collection `foo`.", get_reference_field_op.error());
}
TEST_F(CollectionManagerTest, ExcludeFieldsInCollectionListing) {
auto schema_json =
R"({
"name": "products",
"fields": [
{"name": "product_id", "type": "string"},
{"name": "name", "type": "string"},
{"name": "points", "type": "int32"}
],
"default_sorting_field": "points"
})"_json;
auto create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(create_op.ok());
nlohmann::json coll_json_summaries = collectionManager.get_collection_summaries(10, 0, {"fields"}).get();
ASSERT_EQ(2, coll_json_summaries.size());
for(auto coll_json: coll_json_summaries) {
ASSERT_FALSE(coll_json.contains("fields"));
}
coll_json_summaries = collectionManager.get_collection_summaries(10, 0, {}).get();
ASSERT_EQ(2, coll_json_summaries.size());
for(auto coll_json: coll_json_summaries) {
ASSERT_TRUE(coll_json.contains("fields"));
}
}
TEST_F(CollectionManagerTest, CollectionCreationWithMetadata) {
CollectionManager & collectionManager3 = CollectionManager::get_instance();
nlohmann::json schema1 = R"({
"name": "collection_meta",
"enable_nested_fields": true,
"fields": [
{"name": "value.color", "type": "string", "optional": false, "facet": true },
{"name": "value.r", "type": "int32", "optional": false, "facet": true },
{"name": "value.g", "type": "int32", "optional": false, "facet": true },
{"name": "value.b", "type": "int32", "optional": false, "facet": true }
],
"metadata": "abc"
})"_json;
auto op = collectionManager.create_collection(schema1);
ASSERT_FALSE(op.ok());
ASSERT_EQ("The `metadata` value should be an object.", op.error());
nlohmann::json schema2 = R"({
"name": "collection_meta",
"enable_nested_fields": true,
"fields": [
{"name": "value.color", "type": "string", "optional": false, "facet": true },
{"name": "value.r", "type": "int32", "optional": false, "facet": true },
{"name": "value.g", "type": "int32", "optional": false, "facet": true },
{"name": "value.b", "type": "int32", "optional": false, "facet": true }
],
"metadata": {
"batch_job":"",
"indexed_from":"2023-04-20T00:00:00.000Z",
"total_docs": 0
}
})"_json;
op = collectionManager.create_collection(schema2);
ASSERT_TRUE(op.ok());
Collection* coll1 = op.get();
std::string collection_meta_json;
nlohmann::json collection_meta;
std::string next_seq_id;
std::string next_collection_id;
store->get(Collection::get_meta_key("collection_meta"), collection_meta_json);
store->get(Collection::get_next_seq_id_key("collection_meta"), next_seq_id);
//LOG(INFO) << collection_meta_json;
nlohmann::json expected_meta_json = R"(
{
"created_at":1705482381,
"default_sorting_field":"",
"enable_nested_fields":true,
"fallback_field_type":"",
"fields":[
{
"facet":true,
"index":true,
"infix":false,
"locale":"",
"name":"value.color",
"nested":true,
"nested_array":2,
"optional":false,
"sort":false,
"store":true,
"type":"string",
"range_index":false,
"stem":false
},
{
"facet":true,
"index":true,
"infix":false,
"locale":"",
"name":"value.r",
"nested":true,
"nested_array":2,
"optional":false,
"sort":true,
"store":true,
"type":"int32",
"range_index":false,
"stem":false
},{
"facet":true,
"index":true,
"infix":false,
"locale":"",
"name":"value.g",
"nested":true,
"nested_array":2,
"optional":false,
"sort":true,
"store":true,
"type":"int32",
"range_index":false,
"stem":false
},{
"facet":true,
"index":true,
"infix":false,
"locale":"",
"name":"value.b",
"nested":true,
"nested_array":2,
"optional":false,
"sort":true,
"store":true,
"type":"int32",
"range_index":false,
"stem":false
}
],
"id":1,
"metadata":{
"batch_job":"",
"indexed_from":"2023-04-20T00:00:00.000Z",
"total_docs":0
},
"name":"collection_meta",
"num_memory_shards":4,
"symbols_to_index":[],
"token_separators":[]
})"_json;
auto actual_json = nlohmann::json::parse(collection_meta_json);
expected_meta_json["created_at"] = actual_json["created_at"];
ASSERT_EQ(expected_meta_json.dump(), actual_json.dump());
// metadata should exist as part of collection summary
auto coll_summary = coll1->get_summary_json();
ASSERT_EQ(expected_meta_json["metadata"].dump(), coll_summary["metadata"].dump());
// if no metadata is given, the key should not be present in response
schema2 = R"({
"name": "coll2",
"enable_nested_fields": true,
"fields": [
{"name": "value.color", "type": "string"}
]
})"_json;
op = collectionManager.create_collection(schema2);
ASSERT_TRUE(op.ok());
Collection* coll2 = op.get();
ASSERT_EQ(0, coll2->get_summary_json().count("metadata"));
}
TEST_F(CollectionManagerTest, PopulateReferencedIns) {
std::vector<std::string> collection_meta_jsons = {
R"({
"name": "A",
"fields": [
{"name": "a_id", "type": "string"}
]
})"_json.dump(),
R"({
"name": "B",
"fields": [
{"name": "b_id", "type": "string"},
{"name": "a_ref", "type": "string", "reference": "A.a_id"},
{"name": "c_ref", "type": "string", "reference": "C.c_id", "async_reference": true}
]
})"_json.dump(),
R"({
"name": "C",
"fields": [
{"name": "c_id", "type": "string"}
]
})"_json.dump(),
};
std::map<std::string, spp::sparse_hash_map<std::string, std::string>> referenced_ins;
std::map<std::string, spp::sparse_hash_map<std::string, std::set<reference_pair_t>>> async_referenced_ins;
for (const auto &collection_meta_json: collection_meta_jsons) {
CollectionManager::_populate_referenced_ins(collection_meta_json, referenced_ins, async_referenced_ins);
}
ASSERT_EQ(2, referenced_ins.size());
ASSERT_EQ(1, referenced_ins.count("A"));
ASSERT_EQ(1, referenced_ins["A"].size());
ASSERT_EQ(1, referenced_ins["A"].count("B"));
ASSERT_EQ("a_ref", referenced_ins["A"]["B"]);
ASSERT_EQ(1, referenced_ins.count("C"));
ASSERT_EQ(1, referenced_ins["C"].size());
ASSERT_EQ(1, referenced_ins["C"].count("B"));
ASSERT_EQ("c_ref", referenced_ins["C"]["B"]);
ASSERT_EQ(1, async_referenced_ins.count("C"));
ASSERT_EQ(1, async_referenced_ins["C"].size());
ASSERT_EQ(1, async_referenced_ins["C"].count("c_id"));
ASSERT_EQ(1, async_referenced_ins["C"]["c_id"].size());
ASSERT_EQ("B", async_referenced_ins["C"]["c_id"].begin()->collection);
ASSERT_EQ("c_ref", async_referenced_ins["C"]["c_id"].begin()->field);
}
TEST_F(CollectionManagerTest, CollectionPagination) {
//remove all collections first
auto collections = collectionManager.get_collections().get();
for(auto collection : collections) {
collectionManager.drop_collection(collection->get_name());
}
//create few collections
for(size_t i = 0; i < 5; i++) {
nlohmann::json coll_json = R"({
"name": "cp",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
coll_json["name"] = coll_json["name"].get<std::string>() + std::to_string(i + 1);
auto coll_op = collectionManager.create_collection(coll_json);
ASSERT_TRUE(coll_op.ok());
}
uint32_t limit = 0, offset = 0;
//limit collections by 2
limit=2;
auto collection_op = collectionManager.get_collections(limit);
auto collections_vec = collection_op.get();
ASSERT_EQ(2, collections_vec.size());
ASSERT_EQ("cp2", collections_vec[0]->get_name());
ASSERT_EQ("cp5", collections_vec[1]->get_name());
//get 2 collection from offset 3
offset=3;
collection_op = collectionManager.get_collections(limit, offset);
collections_vec = collection_op.get();
ASSERT_EQ(2, collections_vec.size());
ASSERT_EQ("cp1", collections_vec[0]->get_name());
ASSERT_EQ("cp4", collections_vec[1]->get_name());
//get all collection except first
offset=1; limit=0;
collection_op = collectionManager.get_collections(limit, offset);
collections_vec = collection_op.get();
ASSERT_EQ(4, collections_vec.size());
ASSERT_EQ("cp5", collections_vec[0]->get_name());
ASSERT_EQ("cp3", collections_vec[1]->get_name());
ASSERT_EQ("cp1", collections_vec[2]->get_name());
ASSERT_EQ("cp4", collections_vec[3]->get_name());
//get last collection
offset=4, limit=1;
collection_op = collectionManager.get_collections(limit, offset);
collections_vec = collection_op.get();
ASSERT_EQ(1, collections_vec.size());
ASSERT_EQ("cp4", collections_vec[0]->get_name());
//if limit is greater than number of collection then return all from offset
offset=0; limit=8;
collection_op = collectionManager.get_collections(limit, offset);
collections_vec = collection_op.get();
ASSERT_EQ(5, collections_vec.size());
ASSERT_EQ("cp2", collections_vec[0]->get_name());
ASSERT_EQ("cp5", collections_vec[1]->get_name());
ASSERT_EQ("cp3", collections_vec[2]->get_name());
ASSERT_EQ("cp1", collections_vec[3]->get_name());
ASSERT_EQ("cp4", collections_vec[4]->get_name());
offset=3; limit=4;
collection_op = collectionManager.get_collections(limit, offset);
collections_vec = collection_op.get();
ASSERT_EQ(2, collections_vec.size());
ASSERT_EQ("cp1", collections_vec[0]->get_name());
ASSERT_EQ("cp4", collections_vec[1]->get_name());
//invalid offset
offset=6; limit=0;
collection_op = collectionManager.get_collections(limit, offset);
ASSERT_FALSE(collection_op.ok());
ASSERT_EQ("Invalid offset param.", collection_op.error());
}
TEST_F(CollectionManagerTest, HideQueryFromAnalytics) {
std::vector<field> fields = {field("title", field_types::STRING, false, false, true, "", -1, 1),
field("year", field_types::INT32, false),
field("points", field_types::INT32, false),};
Collection* coll3 = collectionManager.create_collection("coll3", 1, fields, "points").get();
nlohmann::json doc1;
doc1["id"] = "0";
doc1["title"] = "Tom Sawyer";
doc1["year"] = 1876;
doc1["points"] = 100;
ASSERT_TRUE(coll3->add(doc1.dump()).ok());
Config::get_instance().set_enable_search_analytics(true);
nlohmann::json analytics_rule = R"({
"name": "hide_search_queries",
"type": "popular_queries",
"params": {
"limit": 100,
"source": {
"collections": ["coll3"],
"events": [{"type": "search", "name": "coll_search3"}]
},
"destination": {
"collection": "top_queries2"
}
}
})"_json;
auto create_op = AnalyticsManager::get_instance().create_rule(analytics_rule, false, true);
ASSERT_TRUE(create_op.ok());
nlohmann::json embedded_params;
std::string json_res;
std::map<std::string, std::string> req_params;
req_params["collection"] = "coll3";
req_params["q"] = "tom";
req_params["query_by"] = "title";
req_params["enable_analytics"] = "false";
auto now_ts = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
auto search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
auto popular_queries = AnalyticsManager::get_instance().get_popular_queries();
ASSERT_EQ(0, popular_queries["top_queries2"]->get_user_prefix_queries().size());
req_params["enable_analytics"] = "true";
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_TRUE(search_op.ok());
popular_queries = AnalyticsManager::get_instance().get_popular_queries();
ASSERT_EQ(1, popular_queries["top_queries2"]->get_user_prefix_queries().size());
collectionManager.drop_collection("coll3");
}
| 73,614
|
C++
|
.cpp
| 1,597
| 37.405761
| 162
| 0.595926
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,746
|
collection_vector_search_test.cpp
|
typesense_typesense/test/collection_vector_search_test.cpp
|
#include <gtest/gtest.h>
#include <string>
#include <vector>
#include <collection_manager.h>
#include "collection.h"
#include <cstdlib>
#include <ctime>
#include "conversation_manager.h"
#include "conversation_model_manager.h"
#include "index.h"
#include "core_api.h"
#include "vq_model_manager.h"
#include "conversation_model.h"
class CollectionVectorTest : public ::testing::Test {
protected:
Store *store;
CollectionManager & collectionManager = CollectionManager::get_instance();
std::atomic<bool> quit = false;
std::vector<std::string> query_fields;
std::vector<sort_by> sort_fields;
void setupCollection() {
std::string state_dir_path = "/tmp/typesense_test/collection_vector_search";
LOG(INFO) << "Truncating and creating: " << state_dir_path;
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
store = new Store(state_dir_path);
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
ConversationModelManager::init(store);
nlohmann::json schema_json = R"({
"name": "conversation_store",
"fields": [
{
"name": "conversation_id",
"type": "string"
},
{
"name": "role",
"type": "string",
"index": false
},
{
"name": "message",
"type": "string",
"index": false
},
{
"name": "timestamp",
"type": "int32",
"sort": true
},
{
"name": "model_id",
"type": "string"
}
]
})"_json;
collectionManager.create_collection(schema_json);
}
virtual void SetUp() {
setupCollection();
}
virtual void TearDown() {
collectionManager.dispose();
EmbedderManager::get_instance().delete_all_text_embedders();
delete store;
}
};
TEST_F(CollectionVectorTest, BasicVectorQuerying) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"},
{"name": "points", "type": "int32", "facet": true},
{"name": "vec", "type": "float[]", "num_dim": 4}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
auto coll_summary = coll1->get_summary_json();
ASSERT_EQ("cosine", coll_summary["fields"][2]["vec_dist"].get<std::string>());
std::vector<std::vector<float>> values = {
{0.851758, 0.909671, 0.823431, 0.372063},
{0.97826, 0.933157, 0.39557, 0.306488},
{0.230606, 0.634397, 0.514009, 0.399594}
};
for (size_t i = 0; i < values.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = std::to_string(i) + " title";
doc["points"] = i;
doc["vec"] = values[i];
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([0.96826, 0.94, 0.39557, 0.306488])").get();
ASSERT_EQ(3, results["found"].get<size_t>());
ASSERT_EQ(3, results["hits"].size());
ASSERT_STREQ("1", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("0", results["hits"][1]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("2", results["hits"][2]["document"]["id"].get<std::string>().c_str());
ASSERT_FLOAT_EQ(3.409385681152344e-05, results["hits"][0]["vector_distance"].get<float>());
ASSERT_FLOAT_EQ(0.04329806566238403, results["hits"][1]["vector_distance"].get<float>());
ASSERT_FLOAT_EQ(0.15141665935516357, results["hits"][2]["vector_distance"].get<float>());
// with filtering
results = coll1->search("*", {}, "points:[0,1]", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([0.96826, 0.94, 0.39557, 0.306488], flat_search_cutoff: 0)").get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
ASSERT_STREQ("1", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("0", results["hits"][1]["document"]["id"].get<std::string>().c_str());
// with filtering + flat search
results = coll1->search("*", {}, "points:[0,1]", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([0.96826, 0.94, 0.39557, 0.306488], flat_search_cutoff: 1000)").get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
ASSERT_STREQ("1", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("0", results["hits"][1]["document"]["id"].get<std::string>().c_str());
// must trim space after field name
results = coll1->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec :([0.96826, 0.94, 0.39557, 0.306488])").get();
ASSERT_EQ(3, results["found"].get<size_t>());
// validate wrong dimensions in query
auto res_op = coll1->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([0.96826, 0.94, 0.39557])");
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Query field `vec` must have 4 dimensions.", res_op.error());
// validate bad vector query field name
res_op = coll1->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "zec:([0.96826, 0.94, 0.39557, 0.4542])");
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Field `zec` does not have a vector query index.", res_op.error());
// pass `id` of existing doc instead of vector, query doc should be omitted from results
results = coll1->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([], id: 1)").get();
ASSERT_EQ(2, results["found"].get<size_t>());
ASSERT_EQ(2, results["hits"].size());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
ASSERT_STREQ("2", results["hits"][1]["document"]["id"].get<std::string>().c_str());
// when id does not match filter, don't return k+1 hits
results = coll1->search("*", {}, "id:!=1", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([], id: 1, k:1)").get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
// `k` value should overrides per_page
results = coll1->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([0.96826, 0.94, 0.39557, 0.306488], k: 1)").get();
ASSERT_EQ(1, results["hits"].size());
results = coll1->search("*", {}, "", {"points"}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([0.96826, 0.94, 0.39557, 0.306488], k: 1)",
true, 0, max_score, 100,
0, 0, "top_values").get();
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(1, results["facet_counts"].size());
ASSERT_EQ(1, results["facet_counts"][0]["counts"].size());
ASSERT_EQ("1", results["facet_counts"][0]["counts"][0]["value"]);
// when k is not set, should use per_page
results = coll1->search("*", {}, "", {}, {}, {0}, 2, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([0.96826, 0.94, 0.39557, 0.306488])").get();
ASSERT_EQ(2, results["hits"].size());
// when `id` does not exist, return appropriate error
res_op = coll1->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([], id: 100)");
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Document id referenced in vector query is not found.", res_op.error());
// support num_dim on only float array fields
schema = R"({
"name": "coll2",
"fields": [
{"name": "title", "type": "string"},
{"name": "vec", "type": "float", "num_dim": 4}
]
})"_json;
auto coll_op = collectionManager.create_collection(schema);
ASSERT_FALSE(coll_op.ok());
ASSERT_EQ("Property `num_dim` is only allowed on a float array field.", coll_op.error());
// bad value for num_dim
schema = R"({
"name": "coll2",
"fields": [
{"name": "title", "type": "string"},
{"name": "vec", "type": "float", "num_dim": -4}
]
})"_json;
coll_op = collectionManager.create_collection(schema);
ASSERT_FALSE(coll_op.ok());
ASSERT_EQ("Property `num_dim` must be a positive integer.", coll_op.error());
collectionManager.drop_collection("coll1");
}
TEST_F(CollectionVectorTest, VectorDistanceConfig) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"},
{"name": "points", "type": "int32"},
{"name": "vec", "type": "float[]", "num_dim": 4, "vec_dist": "ip"}
]
})"_json;
Collection *coll1 = collectionManager.create_collection(schema).get();
auto coll_summary = coll1->get_summary_json();
ASSERT_EQ("ip", coll_summary["fields"][2]["vec_dist"].get<std::string>());
}
TEST_F(CollectionVectorTest, VectorQueryByIDWithZeroValuedFloat) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"},
{"name": "points", "type": "int32"},
{"name": "vec", "type": "float[]", "num_dim": 3}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
auto coll_summary = coll1->get_summary_json();
ASSERT_EQ("cosine", coll_summary["fields"][2]["vec_dist"].get<std::string>());
nlohmann::json doc = R"(
{
"title": "Title 1",
"points": 100,
"vec": [0, 0, 0]
}
)"_json;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto res_op = coll1->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([], id: 0)");
ASSERT_TRUE(res_op.ok());
}
TEST_F(CollectionVectorTest, VectorUnchangedUpsert) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"},
{"name": "points", "type": "int32"},
{"name": "vec", "type": "float[]", "num_dim": 3}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
std::vector<float> vec = {0.12, 0.45, 0.64};
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "Title";
doc["points"] = 100;
doc["vec"] = vec;
auto add_op = coll1->add(doc.dump());
ASSERT_TRUE(add_op.ok());
auto results = coll1->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([0.12, 0.44, 0.55])").get();
ASSERT_EQ(1, results["found"].get<size_t>());
// upsert unchanged doc
add_op = coll1->add(doc.dump(), index_operation_t::UPSERT);
ASSERT_TRUE(add_op.ok());
results = coll1->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([0.12, 0.44, 0.55])").get();
ASSERT_EQ(1, results["found"].get<size_t>());
// emplace unchanged doc
add_op = coll1->add(doc.dump(), index_operation_t::EMPLACE);
ASSERT_TRUE(add_op.ok());
results = coll1->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([0.12, 0.44, 0.55])").get();
ASSERT_EQ(1, results["found"].get<size_t>());
}
TEST_F(CollectionVectorTest, VectorChangedUpsert) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"},
{"name": "points", "type": "int32"},
{"name": "vec", "type": "float[]", "num_dim": 2}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "Title";
doc["points"] = 100;
doc["vec"] = {0.15, 0.25};
auto add_op = coll1->add(doc.dump());
ASSERT_TRUE(add_op.ok());
auto results = coll1->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([0.44, 0.44])").get();
ASSERT_FLOAT_EQ(0.029857516288757324, results["hits"][0]["vector_distance"].get<float>());
// upsert changed doc
doc["id"] = "0";
doc["title"] = "Title";
doc["points"] = 100;
doc["vec"] = {0.75, 0.95};
add_op = coll1->add(doc.dump(), index_operation_t::UPSERT);
ASSERT_TRUE(add_op.ok());
results = coll1->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([0.44, 0.44])").get();
ASSERT_FLOAT_EQ(0.006849408149719238, results["hits"][0]["vector_distance"].get<float>());
// put old doc back using update
doc["id"] = "0";
doc["title"] = "Title";
doc["points"] = 100;
doc["vec"] = {0.15, 0.25};
add_op = coll1->add(doc.dump(), index_operation_t::UPDATE);
ASSERT_TRUE(add_op.ok());
results = coll1->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([0.44, 0.44])").get();
ASSERT_FLOAT_EQ(0.029857516288757324, results["hits"][0]["vector_distance"].get<float>());
// revert using emplace
doc["id"] = "0";
doc["title"] = "Title";
doc["points"] = 100;
doc["vec"] = {0.75, 0.95};
add_op = coll1->add(doc.dump(), index_operation_t::EMPLACE);
ASSERT_TRUE(add_op.ok());
results = coll1->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([0.44, 0.44])").get();
ASSERT_FLOAT_EQ(0.006849408149719238, results["hits"][0]["vector_distance"].get<float>());
}
TEST_F(CollectionVectorTest, VectorManyUpserts) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"},
{"name": "points", "type": "int32"},
{"name": "vec", "type": "float[]", "num_dim": 3}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
size_t d = 3;
size_t n = 50;
std::mt19937 rng;
rng.seed(47);
std::uniform_real_distribution<> distrib;
std::vector<std::string> import_records;
// first insert n docs
for (size_t i = 0; i < n; i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = std::to_string(i) + " title";
doc["points"] = i;
std::vector<float> values;
for (size_t j = 0; j < d; j++) {
values.push_back(distrib(rng));
}
doc["vec"] = values;
import_records.push_back(doc.dump());
}
nlohmann::json document;
nlohmann::json import_response = coll1->add_many(import_records, document);
ASSERT_TRUE(import_response["success"].get<bool>());
ASSERT_EQ(n, import_response["num_imported"].get<int>());
import_records.clear();
size_t num_new_records = 0;
// upsert mix of old + new docs50
for (size_t i = 0; i < n; i++) {
nlohmann::json doc;
auto id = i;
if(i % 2 != 0) {
id = (i + 1000);
num_new_records++;
}
doc["id"] = std::to_string(id);
doc["title"] = std::to_string(id) + " title";
doc["points"] = id;
std::vector<float> values;
for (size_t j = 0; j < d; j++) {
values.push_back(distrib(rng) + 0.01);
}
doc["vec"] = values;
import_records.push_back(doc.dump());
}
import_response = coll1->add_many(import_records, document, UPSERT);
ASSERT_TRUE(import_response["success"].get<bool>());
ASSERT_EQ(n, import_response["num_imported"].get<int>());
import_records.clear();
/*for(size_t i = 0; i < 100; i++) {
auto results = coll1->search("*", {}, "", {}, {}, {0}, 200, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([0.12, 0.44, 0.55])").get();
if(results["found"].get<size_t>() != n+num_new_records) {
LOG(INFO) << results["found"].get<size_t>();
}
}*/
//LOG(INFO) << "Expected: " << n + num_new_records;
//ASSERT_EQ(n + num_new_records, results["found"].get<size_t>());
//ASSERT_EQ(n + num_new_records, results["hits"].size());
}
TEST_F(CollectionVectorTest, VectorPartialUpdate) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"},
{"name": "points", "type": "int32"},
{"name": "vec", "type": "float[]", "num_dim": 3}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
std::vector<float> vec = {0.12, 0.45, 0.64};
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "Title";
doc["points"] = 100;
doc["vec"] = vec;
auto add_op = coll1->add(doc.dump());
ASSERT_TRUE(add_op.ok());
auto results = coll1->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([0.12, 0.44, 0.55])").get();
ASSERT_EQ(1, results["found"].get<size_t>());
// emplace partial doc
doc.erase("vec");
doc["title"] = "Random";
add_op = coll1->add(doc.dump(), index_operation_t::EMPLACE);
ASSERT_TRUE(add_op.ok());
results = coll1->search("Random", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([0.12, 0.44, 0.55])").get();
ASSERT_EQ(1, results["found"].get<size_t>());
// update portial doc
doc.erase("vec");
doc["title"] = "Random";
add_op = coll1->add(doc.dump(), index_operation_t::UPDATE);
ASSERT_TRUE(add_op.ok());
results = coll1->search("Random", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([0.12, 0.44, 0.55])").get();
ASSERT_EQ(1, results["found"].get<size_t>());
}
TEST_F(CollectionVectorTest, NumVectorGreaterThanNumDim) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"},
{"name": "points", "type": "int32"},
{"name": "vec", "type": "float[]", "num_dim": 3}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
srand (static_cast <unsigned> (time(0)));
for(size_t i = 0; i < 10; i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = "Title";
doc["points"] = 100;
doc["vec"] = std::vector<float>();
for(size_t j = 0; j < 100; j++) {
float r = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
doc["vec"].push_back(r);
}
auto add_op = coll1->add(doc.dump());
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `vec` must have 3 dimensions.", add_op.error());
}
}
TEST_F(CollectionVectorTest, IndexGreaterThan1KVectors) {
// tests the dynamic resizing of graph
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"},
{"name": "points", "type": "int32"},
{"name": "vec", "type": "float[]", "num_dim": 4}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
size_t d = 4;
size_t n = 1500;
std::mt19937 rng;
rng.seed(47);
std::uniform_real_distribution<> distrib;
for (size_t i = 0; i < n; i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = std::to_string(i) + " title";
doc["points"] = i;
std::vector<float> values;
for (size_t j = 0; j < d; j++) {
values.push_back(distrib(rng));
}
doc["vec"] = values;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "").get();
ASSERT_EQ(1500, results["found"].get<size_t>());
}
TEST_F(CollectionVectorTest, InsertDocWithEmptyVectorAndDelete) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "vec", "type": "float[]", "num_dim": 4, "optional": true}
]
})"_json;
Collection *coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["id"] = "0";
doc["vec"] = {};
ASSERT_TRUE(coll1->add(doc.dump()).ok());
ASSERT_TRUE(coll1->remove("0").ok());
}
TEST_F(CollectionVectorTest, VecSearchWithFiltering) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"},
{"name": "points", "type": "int32"},
{"name": "vec", "type": "float[]", "num_dim": 4}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
std::mt19937 rng;
rng.seed(47);
std::uniform_real_distribution<> distrib;
size_t num_docs = 20;
for (size_t i = 0; i < num_docs; i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = std::to_string(i) + " title";
doc["points"] = i;
std::vector<float> values;
for(size_t j = 0; j < 4; j++) {
values.push_back(distrib(rng));
}
doc["vec"] = values;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto results = coll1->search("*", {}, "", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([0.96826, 0.94, 0.39557, 0.306488])").get();
ASSERT_EQ(num_docs, results["found"].get<size_t>());
ASSERT_EQ(num_docs, results["hits"].size());
// with points:<10, non-flat-search
results = coll1->search("*", {}, "points:<10", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([0.96826, 0.94, 0.39557, 0.306488], flat_search_cutoff: 0)").get();
ASSERT_EQ(10, results["found"].get<size_t>());
ASSERT_EQ(10, results["hits"].size());
// with points:<10, flat-search
results = coll1->search("*", {}, "points:<10", {}, {}, {0}, 3, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([0.96826, 0.94, 0.39557, 0.306488], flat_search_cutoff: 1000)").get();
ASSERT_EQ(10, results["found"].get<size_t>());
ASSERT_EQ(3, results["hits"].size());
ASSERT_FLOAT_EQ(3.409385e-05, results["hits"][0]["vector_distance"].get<float>());
ASSERT_EQ("1", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_FLOAT_EQ(0.016780376, results["hits"][1]["vector_distance"].get<float>());
ASSERT_EQ("5", results["hits"][1]["document"]["id"].get<std::string>());
results = coll1->search("*", {}, "points:<10", {}, {}, {0}, 3, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([], id: 3, flat_search_cutoff: 1000)").get();
ASSERT_EQ(3, results["hits"].size());
LOG(INFO) << results["hits"][0];
LOG(INFO) << results["hits"][1];
ASSERT_EQ("9", results["hits"][0]["document"]["id"].get<std::string>());
ASSERT_FLOAT_EQ(0.050603985, results["hits"][0]["vector_distance"].get<float>());
ASSERT_EQ("5", results["hits"][1]["document"]["id"].get<std::string>());
ASSERT_FLOAT_EQ(0.100155532, results["hits"][1]["vector_distance"].get<float>());
// single point
results = coll1->search("*", {}, "points:1", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([0.96826, 0.94, 0.39557, 0.306488], flat_search_cutoff: 0)").get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
results = coll1->search("*", {}, "points:1", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([0.96826, 0.94, 0.39557, 0.306488], flat_search_cutoff: 1000)").get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
}
TEST_F(CollectionVectorTest, VecSearchWithFilteringWithMissingVectorValues) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"},
{"name": "points", "type": "int32"},
{"name": "vec", "type": "float[]", "num_dim": 4, "optional": true}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
std::mt19937 rng;
rng.seed(47);
std::uniform_real_distribution<> distrib;
size_t num_docs = 20;
std::vector<std::string> json_lines;
for (size_t i = 0; i < num_docs; i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = std::to_string(i) + " title";
doc["points"] = i;
std::vector<float> values;
for(size_t j = 0; j < 4; j++) {
values.push_back(distrib(rng));
}
if(i != 5 && i != 15) {
doc["vec"] = values;
}
json_lines.push_back(doc.dump());
}
nlohmann::json insert_doc;
auto res = coll1->add_many(json_lines, insert_doc, UPSERT);
ASSERT_TRUE(res["success"].get<bool>());
auto results = coll1->search("*", {}, "", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([0.96826, 0.94, 0.39557, 0.306488])").get();
ASSERT_EQ(18, results["found"].get<size_t>());
ASSERT_EQ(18, results["hits"].size());
// with points:<10, non-flat-search
results = coll1->search("*", {}, "points:<10", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([0.96826, 0.94, 0.39557, 0.306488], flat_search_cutoff: 0)").get();
ASSERT_EQ(9, results["found"].get<size_t>());
ASSERT_EQ(9, results["hits"].size());
// with points:<10, flat-search
results = coll1->search("*", {}, "points:<10", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([0.96826, 0.94, 0.39557, 0.306488], flat_search_cutoff: 1000)").get();
ASSERT_EQ(9, results["found"].get<size_t>());
ASSERT_EQ(9, results["hits"].size());
// single point
results = coll1->search("*", {}, "points:1", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([0.96826, 0.94, 0.39557, 0.306488], flat_search_cutoff: 0)").get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
results = coll1->search("*", {}, "points:1", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([0.96826, 0.94, 0.39557, 0.306488], flat_search_cutoff: 1000)").get();
ASSERT_EQ(1, results["found"].get<size_t>());
ASSERT_EQ(1, results["hits"].size());
ASSERT_EQ(1, coll1->_get_index()->_get_numerical_index().size());
ASSERT_EQ(1, coll1->_get_index()->_get_numerical_index().count("points"));
// should not be able to filter / sort / facet on vector fields
auto res_op = coll1->search("*", {}, "vec:1", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>());
ASSERT_FALSE(res_op.ok());
ASSERT_EQ("Cannot filter on vector field `vec`.", res_op.error());
schema = R"({
"name": "coll2",
"fields": [
{"name": "title", "type": "string"},
{"name": "vec", "type": "float[]", "num_dim": 4, "facet": true}
]
})"_json;
auto coll_op = collectionManager.create_collection(schema);
ASSERT_FALSE(coll_op.ok());
ASSERT_EQ("Property `facet` is not allowed on a vector field.", coll_op.error());
schema = R"({
"name": "coll2",
"fields": [
{"name": "title", "type": "string"},
{"name": "vec", "type": "float[]", "num_dim": 4, "sort": true}
]
})"_json;
coll_op = collectionManager.create_collection(schema);
ASSERT_FALSE(coll_op.ok());
ASSERT_EQ("Property `sort` cannot be enabled on a vector field.", coll_op.error());
}
TEST_F(CollectionVectorTest, VectorSearchTestDeletion) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"},
{"name": "points", "type": "int32"},
{"name": "vec", "type": "float[]", "num_dim": 4}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
std::mt19937 rng;
rng.seed(47);
std::uniform_real_distribution<> distrib;
size_t num_docs = 10;
for (size_t i = 0; i < num_docs; i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = std::to_string(i) + " title";
doc["points"] = i;
std::vector<float> values;
for(size_t j = 0; j < 4; j++) {
values.push_back(distrib(rng));
}
doc["vec"] = values;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
ASSERT_EQ(16, coll1->_get_index()->_get_vector_index().at("vec")->vecdex->getMaxElements());
ASSERT_EQ(10, coll1->_get_index()->_get_vector_index().at("vec")->vecdex->getCurrentElementCount());
ASSERT_EQ(0, coll1->_get_index()->_get_vector_index().at("vec")->vecdex->getDeletedCount());
// now delete these docs
for (size_t i = 0; i < num_docs; i++) {
ASSERT_TRUE(coll1->remove(std::to_string(i)).ok());
}
ASSERT_EQ(16, coll1->_get_index()->_get_vector_index().at("vec")->vecdex->getMaxElements());
ASSERT_EQ(10, coll1->_get_index()->_get_vector_index().at("vec")->vecdex->getCurrentElementCount());
ASSERT_EQ(10, coll1->_get_index()->_get_vector_index().at("vec")->vecdex->getDeletedCount());
for (size_t i = 0; i < num_docs; i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i + num_docs);
doc["title"] = std::to_string(i + num_docs) + " title";
doc["points"] = i;
std::vector<float> values;
for(size_t j = 0; j < 4; j++) {
values.push_back(distrib(rng));
}
doc["vec"] = values;
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
ASSERT_EQ(16, coll1->_get_index()->_get_vector_index().at("vec")->vecdex->getMaxElements());
ASSERT_EQ(10, coll1->_get_index()->_get_vector_index().at("vec")->vecdex->getCurrentElementCount());
ASSERT_EQ(0, coll1->_get_index()->_get_vector_index().at("vec")->vecdex->getDeletedCount());
// delete those docs again and ensure that while reindexing till 1024 live docs, max count is not changed
for (size_t i = 0; i < num_docs; i++) {
ASSERT_TRUE(coll1->remove(std::to_string(i + num_docs)).ok());
}
ASSERT_EQ(16, coll1->_get_index()->_get_vector_index().at("vec")->vecdex->getMaxElements());
ASSERT_EQ(10, coll1->_get_index()->_get_vector_index().at("vec")->vecdex->getCurrentElementCount());
ASSERT_EQ(10, coll1->_get_index()->_get_vector_index().at("vec")->vecdex->getDeletedCount());
for (size_t i = 0; i < 1014; i++) {
nlohmann::json doc;
doc["id"] = std::to_string(10000 + i);
doc["title"] = std::to_string(10000 + i) + " title";
doc["points"] = i;
std::vector<float> values;
for(size_t j = 0; j < 4; j++) {
values.push_back(distrib(rng));
}
doc["vec"] = values;
const Option<nlohmann::json>& add_op = coll1->add(doc.dump());
if(!add_op.ok()) {
LOG(ERROR) << add_op.error();
}
ASSERT_TRUE(add_op.ok());
}
ASSERT_EQ(1271, coll1->_get_index()->_get_vector_index().at("vec")->vecdex->getMaxElements());
ASSERT_EQ(1014, coll1->_get_index()->_get_vector_index().at("vec")->vecdex->getCurrentElementCount());
ASSERT_EQ(0, coll1->_get_index()->_get_vector_index().at("vec")->vecdex->getDeletedCount());
}
TEST_F(CollectionVectorTest, VectorWithNullValue) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "vec", "type": "float[]", "num_dim": 4}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
std::vector<std::string> json_lines;
nlohmann::json doc;
doc["id"] = "0";
doc["vec"] = {0.1, 0.2, 0.3, 0.4};
json_lines.push_back(doc.dump());
doc["id"] = "1";
doc["vec"] = nullptr;
json_lines.push_back(doc.dump());
auto res = coll1->add_many(json_lines, doc);
ASSERT_FALSE(res["success"].get<bool>());
ASSERT_EQ(1, res["num_imported"].get<size_t>());
ASSERT_TRUE(nlohmann::json::parse(json_lines[0])["success"].get<bool>());
ASSERT_FALSE(nlohmann::json::parse(json_lines[1])["success"].get<bool>());
ASSERT_EQ("Field `vec` must have 4 dimensions.",
nlohmann::json::parse(json_lines[1])["error"].get<std::string>());
}
TEST_F(CollectionVectorTest, EmbeddedVectorUnchangedUpsert) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"},
{"name": "points", "type": "int32"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["title"],
"model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "Title";
doc["points"] = 100;
auto add_op = coll1->add(doc.dump());
ASSERT_TRUE(add_op.ok());
auto results = coll1->search("title", {"embedding"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>()).get();
ASSERT_EQ(1, results["found"].get<size_t>());
auto embedding = results["hits"][0]["document"]["embedding"].get<std::vector<float>>();
ASSERT_EQ(384, embedding.size());
// upsert unchanged doc
doc.clear();
doc["id"] = "0";
doc["title"] = "Title";
doc["points"] = 100;
add_op = coll1->add(doc.dump(), index_operation_t::UPSERT);
ASSERT_TRUE(add_op.ok());
results = coll1->search("title", {"embedding"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>()).get();
ASSERT_EQ(1, results["found"].get<size_t>());
embedding = results["hits"][0]["document"]["embedding"].get<std::vector<float>>();
ASSERT_EQ(384, embedding.size());
// update
doc.clear();
doc["id"] = "0";
doc["title"] = "Title";
doc["points"] = 100;
add_op = coll1->add(doc.dump(), index_operation_t::UPDATE);
ASSERT_TRUE(add_op.ok());
results = coll1->search("title", {"embedding"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>()).get();
ASSERT_EQ(1, results["found"].get<size_t>());
embedding = results["hits"][0]["document"]["embedding"].get<std::vector<float>>();
ASSERT_EQ(384, embedding.size());
// emplace
doc.clear();
doc["id"] = "0";
doc["title"] = "Title";
doc["points"] = 100;
add_op = coll1->add(doc.dump(), index_operation_t::EMPLACE);
ASSERT_TRUE(add_op.ok());
results = coll1->search("title", {"embedding"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>()).get();
ASSERT_EQ(1, results["found"].get<size_t>());
embedding = results["hits"][0]["document"]["embedding"].get<std::vector<float>>();
ASSERT_EQ(384, embedding.size());
}
TEST_F(CollectionVectorTest, EmbeddOptionalFieldNullValueUpsert) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"},
{"name": "desc", "type": "string", "optional": true},
{"name": "tags", "type": "string[]", "optional": true},
{"name": "embedding", "type":"float[]", "embed":{"from": ["title", "desc", "tags"],
"model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "Title";
doc["desc"] = nullptr;
doc["tags"] = {"foo", "bar"};
auto add_op = coll1->add(doc.dump(), UPSERT);
ASSERT_TRUE(add_op.ok());
auto results = coll1->search("title", {"embedding"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>()).get();
ASSERT_EQ(1, results["found"].get<size_t>());
auto embedding = results["hits"][0]["document"]["embedding"].get<std::vector<float>>();
ASSERT_EQ(384, embedding.size());
// upsert doc
add_op = coll1->add(doc.dump(), index_operation_t::UPSERT);
ASSERT_TRUE(add_op.ok());
// try with null values in array: not allowed
doc["tags"] = {"bar", nullptr};
add_op = coll1->add(doc.dump(), index_operation_t::UPSERT);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `tags` must be an array of string.", add_op.error());
}
TEST_F(CollectionVectorTest, SortKeywordSearchWithAutoEmbedVector) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"},
{"name": "points", "type": "int32"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["title"],
"model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["id"] = "0";
doc["title"] = "The Lord of the Rings";
doc["points"] = 100;
auto add_op = coll1->add(doc.dump());
ASSERT_TRUE(add_op.ok());
std::vector<sort_by> sort_by_list = {sort_by("_vector_query(embedding:([]))", "asc")};
auto results = coll1->search("lord", {"title"}, "", {}, sort_by_list, {0}, 10, 1, FREQUENCY, {true},
Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>()).get();
ASSERT_EQ(1, results["found"].get<size_t>());
auto actual_dist = results["hits"][0]["vector_distance"].get<float>();
ASSERT_LE(0.173, actual_dist);
ASSERT_GE(0.175, actual_dist);
}
TEST_F(CollectionVectorTest, HybridSearchWithExplicitVector) {
nlohmann::json schema = R"({
"name": "objects",
"fields": [
{"name": "name", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll = op.get();
nlohmann::json object;
object["name"] = "butter";
auto add_op = coll->add(object.dump());
ASSERT_TRUE(add_op.ok());
object["name"] = "butterball";
add_op = coll->add(object.dump());
ASSERT_TRUE(add_op.ok());
object["name"] = "butterfly";
add_op = coll->add(object.dump());
ASSERT_TRUE(add_op.ok());
nlohmann::json model_config = R"({
"model_name": "ts/e5-small"
})"_json;
auto query_embedding = EmbedderManager::get_instance().get_text_embedder(model_config).get()->Embed("butter");
std::string vec_string = "[";
for(size_t i = 0; i < query_embedding.embedding.size(); i++) {
vec_string += std::to_string(query_embedding.embedding[i]);
if(i != query_embedding.embedding.size() - 1) {
vec_string += ",";
}
}
vec_string += "]";
auto search_res_op = coll->search("butter", {"name"}, "", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, "embedding:(" + vec_string + ")");
ASSERT_TRUE(search_res_op.ok());
auto search_res = search_res_op.get();
ASSERT_EQ(3, search_res["found"].get<size_t>());
ASSERT_EQ(3, search_res["hits"].size());
// Hybrid search with rank fusion order:
// 1. butter (1/1 * 0.7) + (1/1 * 0.3) = 1
// 2. butterfly (1/2 * 0.7) + (1/3 * 0.3) = 0.45
// 3. butterball (1/3 * 0.7) + (1/2 * 0.3) = 0.383
ASSERT_EQ("butter", search_res["hits"][0]["document"]["name"].get<std::string>());
ASSERT_EQ("butterfly", search_res["hits"][1]["document"]["name"].get<std::string>());
ASSERT_EQ("butterball", search_res["hits"][2]["document"]["name"].get<std::string>());
ASSERT_FLOAT_EQ((1.0/1.0 * 0.7) + (1.0/1.0 * 0.3), search_res["hits"][0]["hybrid_search_info"]["rank_fusion_score"].get<float>());
ASSERT_FLOAT_EQ((1.0/2.0 * 0.7) + (1.0/3.0 * 0.3), search_res["hits"][1]["hybrid_search_info"]["rank_fusion_score"].get<float>());
ASSERT_FLOAT_EQ((1.0/3.0 * 0.7) + (1.0/2.0 * 0.3), search_res["hits"][2]["hybrid_search_info"]["rank_fusion_score"].get<float>());
// hybrid search with empty vector (to pass distance threshold param)
std::string vec_query = "embedding:([], distance_threshold: 0.13)";
search_res_op = coll->search("butter", {"embedding"}, "", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, vec_query);
ASSERT_TRUE(search_res_op.ok());
search_res = search_res_op.get();
ASSERT_EQ(2, search_res["found"].get<size_t>());
ASSERT_EQ(2, search_res["hits"].size());
ASSERT_NEAR(0.04620, search_res["hits"][0]["vector_distance"].get<float>(), 0.0001);
ASSERT_NEAR(0.12133, search_res["hits"][1]["vector_distance"].get<float>(), 0.0001);
// to pass k param
vec_query = "embedding:([], k: 1)";
search_res_op = coll->search("butter", {"embedding"}, "", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, vec_query);
ASSERT_TRUE(search_res_op.ok());
search_res = search_res_op.get();
ASSERT_EQ(1, search_res["found"].get<size_t>());
ASSERT_EQ(1, search_res["hits"].size());
// allow wildcard with empty vector (for convenience)
search_res_op = coll->search("*", {"embedding"}, "", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, vec_query);
ASSERT_TRUE(search_res_op.ok());
search_res = search_res_op.get();
ASSERT_EQ(3, search_res["found"].get<size_t>());
ASSERT_EQ(1, search_res["hits"].size());
// when no embedding field is passed, it should not be allowed
search_res_op = coll->search("butter", {"name"}, "", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, vec_query);
ASSERT_FALSE(search_res_op.ok());
ASSERT_EQ("Vector query could not find any embedded fields.", search_res_op.error());
// when no vector matches distance threshold, only text matches are entertained and distance score should be
// 2 in those cases
vec_query = "embedding:([], distance_threshold: 0.01)";
search_res_op = coll->search("butter", {"name", "embedding"}, "", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, vec_query);
ASSERT_TRUE(search_res_op.ok());
search_res = search_res_op.get();
ASSERT_EQ(3, search_res["found"].get<size_t>());
ASSERT_EQ(3, search_res["hits"].size());
ASSERT_TRUE(search_res["hits"][0].count("vector_distance") == 0);
ASSERT_TRUE(search_res["hits"][1].count("vector_distance") == 0);
ASSERT_TRUE(search_res["hits"][2].count("vector_distance") == 0);
}
TEST_F(CollectionVectorTest, HybridSearchOnlyVectorMatches) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "name", "type": "string", "facet": true},
{"name": "vec", "type": "float[]", "embed":{"from": ["name"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["name"] = "john doe";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results_op = coll1->search("zzz", {"name", "vec"}, "", {"name"}, {}, {0}, 20, 1, FREQUENCY, {true},
Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2);
ASSERT_EQ(true, results_op.ok());
ASSERT_EQ(1, results_op.get()["found"].get<size_t>());
ASSERT_EQ(1, results_op.get()["hits"].size());
ASSERT_EQ(1, results_op.get()["facet_counts"].size());
ASSERT_EQ(4, results_op.get()["facet_counts"][0].size());
ASSERT_EQ("name", results_op.get()["facet_counts"][0]["field_name"]);
}
TEST_F(CollectionVectorTest, DistanceThresholdTest) {
nlohmann::json schema = R"({
"name": "test",
"fields": [
{"name": "vec", "type": "float[]", "num_dim": 3}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["vec"] = {0.1, 0.2, 0.3};
ASSERT_TRUE(coll1->add(doc.dump()).ok());
// write a vector which is 0.5 away from the first vector
doc["vec"] = {0.6, 0.7, 0.8};
ASSERT_TRUE(coll1->add(doc.dump()).ok());
auto results_op = coll1->search("*", {}, "", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([0.3,0.4,0.5])");
ASSERT_EQ(true, results_op.ok());
ASSERT_EQ(2, results_op.get()["found"].get<size_t>());
ASSERT_EQ(2, results_op.get()["hits"].size());
ASSERT_FLOAT_EQ(0.6, results_op.get()["hits"][0]["document"]["vec"].get<std::vector<float>>()[0]);
ASSERT_FLOAT_EQ(0.7, results_op.get()["hits"][0]["document"]["vec"].get<std::vector<float>>()[1]);
ASSERT_FLOAT_EQ(0.8, results_op.get()["hits"][0]["document"]["vec"].get<std::vector<float>>()[2]);
ASSERT_FLOAT_EQ(0.1, results_op.get()["hits"][1]["document"]["vec"].get<std::vector<float>>()[0]);
ASSERT_FLOAT_EQ(0.2, results_op.get()["hits"][1]["document"]["vec"].get<std::vector<float>>()[1]);
ASSERT_FLOAT_EQ(0.3, results_op.get()["hits"][1]["document"]["vec"].get<std::vector<float>>()[2]);
results_op = coll1->search("*", {}, "", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([0.3,0.4,0.5], distance_threshold:0.01)");
ASSERT_EQ(true, results_op.ok());
ASSERT_EQ(1, results_op.get()["found"].get<size_t>());
ASSERT_EQ(1, results_op.get()["hits"].size());
ASSERT_FLOAT_EQ(0.6, results_op.get()["hits"][0]["document"]["vec"].get<std::vector<float>>()[0]);
ASSERT_FLOAT_EQ(0.7, results_op.get()["hits"][0]["document"]["vec"].get<std::vector<float>>()[1]);
ASSERT_FLOAT_EQ(0.8, results_op.get()["hits"][0]["document"]["vec"].get<std::vector<float>>()[2]);
}
TEST_F(CollectionVectorTest, HybridSearchSortByGeopoint) {
nlohmann::json schema = R"({
"name": "objects",
"fields": [
{"name": "name", "type": "string"},
{"name": "location", "type": "geopoint"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
auto coll = op.get();
nlohmann::json doc;
doc["name"] = "butter";
doc["location"] = {80.0, 150.0};
auto add_op = coll->add(doc.dump());
ASSERT_TRUE(add_op.ok());
doc["name"] = "butterball";
doc["location"] = {40.0, 100.0};
add_op = coll->add(doc.dump());
ASSERT_TRUE(add_op.ok());
doc["name"] = "butterfly";
doc["location"] = {130.0, 200.0};
add_op = coll->add(doc.dump());
ASSERT_TRUE(add_op.ok());
spp::sparse_hash_set<std::string> dummy_include_exclude;
std::vector<sort_by> sort_by_list = {{"location(10.0, 10.0)", "asc"}};
auto search_res_op = coll->search("butter", {"name", "embedding"}, "", {}, sort_by_list, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD, dummy_include_exclude, dummy_include_exclude, 10);
ASSERT_TRUE(search_res_op.ok());
auto search_res = search_res_op.get();
ASSERT_EQ("butterfly", search_res["hits"][0]["document"]["name"].get<std::string>());
ASSERT_EQ("butterball", search_res["hits"][1]["document"]["name"].get<std::string>());
ASSERT_EQ("butter", search_res["hits"][2]["document"]["name"].get<std::string>());
search_res_op = coll->search("butter", {"name", "embedding"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {false}, Index::DROP_TOKENS_THRESHOLD, dummy_include_exclude, dummy_include_exclude, 10);
ASSERT_TRUE(search_res_op.ok());
search_res = search_res_op.get();
ASSERT_EQ("butter", search_res["hits"][0]["document"]["name"].get<std::string>());
ASSERT_EQ("butterball", search_res["hits"][1]["document"]["name"].get<std::string>());
ASSERT_EQ("butterfly", search_res["hits"][2]["document"]["name"].get<std::string>());
}
TEST_F(CollectionVectorTest, HybridSearchWithEvalSort) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "name", "type": "string", "facet": true},
{"name": "category", "type": "string", "facet": true},
{"name": "vec", "type": "float[]", "embed":{"from": ["name"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["id"] = "0";
doc["name"] = "Apple Fruit";
doc["category"] = "Fresh";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "1";
doc["name"] = "Apple";
doc["category"] = "Phone";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "2";
doc["name"] = "Apple Pie";
doc["category"] = "Notebook";
ASSERT_TRUE(coll1->add(doc.dump()).ok());
std::vector<sort_by> sort_fields;
CollectionManager::parse_sort_by_str("_eval([(category:Fresh):3,(category:Notebook):2,(category:Phone):1]):desc", sort_fields);
auto results_op = coll1->search("apple", {"name", "vec"}, "", {"name"}, sort_fields, {0}, 20, 1, FREQUENCY, {true},
Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2);
ASSERT_EQ(true, results_op.ok());
ASSERT_EQ(3, results_op.get()["found"].get<size_t>());
ASSERT_EQ(3, results_op.get()["hits"].size());
ASSERT_EQ("0", results_op.get()["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("2", results_op.get()["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results_op.get()["hits"][2]["document"]["id"].get<std::string>());
}
TEST_F(CollectionVectorTest, VectorSearchWithEvalSort) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "name", "type": "string", "facet": true},
{"name": "category", "type": "string", "facet": true},
{"name": "vec", "type": "float[]", "num_dim": 4}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc;
doc["id"] = "0";
doc["name"] = "Apple Fruit";
doc["category"] = "Fresh";
doc["vec"] = {0.1, 0.2, 0.3, 0.4};
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "1";
doc["name"] = "Apple";
doc["category"] = "Phone";
doc["vec"] = {0.2, 0.3, 0.1, 0.1};
ASSERT_TRUE(coll1->add(doc.dump()).ok());
doc["id"] = "2";
doc["name"] = "Apple Pie";
doc["category"] = "Notebook";
doc["vec"] = {0.1, 0.3, 0.2, 0.4};
ASSERT_TRUE(coll1->add(doc.dump()).ok());
std::vector<sort_by> sort_fields;
CollectionManager::parse_sort_by_str("_eval([(category:Fresh):3,(category:Notebook):2,(category:Phone):1]):desc", sort_fields);
auto results_op = coll1->search("*", {"vec"}, "", {"name"}, sort_fields, {0}, 20, 1, FREQUENCY, {true},
Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([0.1, 0.4, 0.2, 0.3])");
ASSERT_EQ(true, results_op.ok());
ASSERT_EQ(3, results_op.get()["found"].get<size_t>());
ASSERT_EQ(3, results_op.get()["hits"].size());
ASSERT_EQ("0", results_op.get()["hits"][0]["document"]["id"].get<std::string>());
ASSERT_EQ("2", results_op.get()["hits"][1]["document"]["id"].get<std::string>());
ASSERT_EQ("1", results_op.get()["hits"][2]["document"]["id"].get<std::string>());
}
TEST_F(CollectionVectorTest, EmbedFromOptionalNullField) {
nlohmann::json schema = R"({
"name": "objects",
"fields": [
{"name": "text", "type": "string", "optional": true},
{"name": "embedding", "type":"float[]", "embed":{"from": ["text"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
auto coll = op.get();
nlohmann::json doc = R"({
})"_json;
auto add_op = coll->add(doc.dump());
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("No valid fields found to create embedding for `embedding`, please provide at least one valid field or make the embedding field optional.", add_op.error());
doc["text"] = "butter";
add_op = coll->add(doc.dump());
ASSERT_TRUE(add_op.ok());
// drop the embedding field and reindex
nlohmann::json alter_schema = R"({
"fields": [
{"name": "embedding", "drop": true},
{"name": "embedding", "type":"float[]", "embed":{"from": ["text"], "model_config": {"model_name": "ts/e5-small"}}, "optional": true}
]
})"_json;
auto update_op = coll->alter(alter_schema);
ASSERT_TRUE(update_op.ok());
doc = R"({
})"_json;
add_op = coll->add(doc.dump());
ASSERT_TRUE(add_op.ok());
}
TEST_F(CollectionVectorTest, HideCredential) {
auto schema_json =
R"({
"name": "Products",
"fields": [
{"name": "product_name", "type": "string", "infix": true},
{"name": "embedding", "type":"float[]", "embed":{"from": ["product_name"],
"model_config": {
"model_name": "ts/e5-small",
"api_key": "ax-abcdef12345",
"access_token": "ax-abcdef12345",
"refresh_token": "ax-abcdef12345",
"client_id": "ax-abcdef12345",
"client_secret": "ax-abcdef12345",
"project_id": "ax-abcdef12345"
}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto coll1 = collection_create_op.get();
auto coll_summary = coll1->get_summary_json();
ASSERT_EQ("ax-ab*********", coll_summary["fields"][1]["embed"]["model_config"]["api_key"].get<std::string>());
ASSERT_EQ("ax-ab*********", coll_summary["fields"][1]["embed"]["model_config"]["access_token"].get<std::string>());
ASSERT_EQ("ax-ab*********", coll_summary["fields"][1]["embed"]["model_config"]["refresh_token"].get<std::string>());
ASSERT_EQ("ax-ab*********", coll_summary["fields"][1]["embed"]["model_config"]["client_id"].get<std::string>());
ASSERT_EQ("ax-ab*********", coll_summary["fields"][1]["embed"]["model_config"]["client_secret"].get<std::string>());
ASSERT_EQ("ax-ab*********", coll_summary["fields"][1]["embed"]["model_config"]["project_id"].get<std::string>());
// small api key
schema_json =
R"({
"name": "Products2",
"fields": [
{"name": "product_name", "type": "string", "infix": true},
{"name": "embedding", "type":"float[]", "embed":{"from": ["product_name"],
"model_config": {
"model_name": "ts/e5-small",
"api_key": "ax1",
"access_token": "ax1",
"refresh_token": "ax1",
"client_id": "ax1",
"client_secret": "ax1",
"project_id": "ax1"
}}}
]
})"_json;
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto coll2 = collection_create_op.get();
coll_summary = coll2->get_summary_json();
ASSERT_EQ("***********", coll_summary["fields"][1]["embed"]["model_config"]["api_key"].get<std::string>());
ASSERT_EQ("***********", coll_summary["fields"][1]["embed"]["model_config"]["access_token"].get<std::string>());
ASSERT_EQ("***********", coll_summary["fields"][1]["embed"]["model_config"]["refresh_token"].get<std::string>());
ASSERT_EQ("***********", coll_summary["fields"][1]["embed"]["model_config"]["client_id"].get<std::string>());
ASSERT_EQ("***********", coll_summary["fields"][1]["embed"]["model_config"]["client_secret"].get<std::string>());
ASSERT_EQ("***********", coll_summary["fields"][1]["embed"]["model_config"]["project_id"].get<std::string>());
}
TEST_F(CollectionVectorTest, UpdateOfFieldReferencedByEmbedding) {
nlohmann::json schema = R"({
"name": "objects",
"fields": [
{"name": "name", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"],
"model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll = op.get();
nlohmann::json object;
object["id"] = "0";
object["name"] = "butter";
auto add_op = coll->add(object.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto results = coll->search("butter", {"embedding"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
auto original_embedding = results["hits"][0]["document"]["embedding"].get<std::vector<float>>();
nlohmann::json update_object;
update_object["id"] = "0";
update_object["name"] = "ghee";
auto update_op = coll->add(update_object.dump(), EMPLACE);
ASSERT_TRUE(update_op.ok());
results = coll->search("ghee", {"embedding"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
auto updated_embedding = results["hits"][0]["document"]["embedding"].get<std::vector<float>>();
ASSERT_NE(original_embedding, updated_embedding);
// action = update
update_object["name"] = "milk";
update_op = coll->add(update_object.dump(), UPDATE);
ASSERT_TRUE(update_op.ok());
results = coll->search("milk", {"embedding"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
updated_embedding = results["hits"][0]["document"]["embedding"].get<std::vector<float>>();
ASSERT_NE(original_embedding, updated_embedding);
// action = upsert
update_object["name"] = "cheese";
update_op = coll->add(update_object.dump(), UPSERT);
ASSERT_TRUE(update_op.ok());
results = coll->search("cheese", {"embedding"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
updated_embedding = results["hits"][0]["document"]["embedding"].get<std::vector<float>>();
ASSERT_NE(original_embedding, updated_embedding);
}
TEST_F(CollectionVectorTest, UpdateOfFieldNotReferencedByEmbedding) {
// test updates to a field that's not referred by an embedding field
nlohmann::json schema = R"({
"name": "objects",
"fields": [
{"name": "name", "type": "string"},
{"name": "about", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll = op.get();
nlohmann::json object;
object["id"] = "0";
object["name"] = "butter";
object["about"] = "about butter";
auto add_op = coll->add(object.dump(), CREATE);
ASSERT_TRUE(add_op.ok());
auto results = coll->search("butter", {"embedding"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
nlohmann::json update_object;
update_object["id"] = "0";
update_object["about"] = "something about butter";
auto update_op = coll->add(update_object.dump(), EMPLACE);
ASSERT_TRUE(update_op.ok());
results = coll->search("butter", {"embedding"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
// action = update
update_object["about"] = "something about butter 2";
update_op = coll->add(update_object.dump(), UPDATE);
ASSERT_TRUE(update_op.ok());
results = coll->search("butter", {"embedding"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
// action = upsert
update_object["name"] = "butter";
update_object["about"] = "something about butter 3";
update_op = coll->add(update_object.dump(), UPSERT);
ASSERT_TRUE(update_op.ok());
results = coll->search("butter", {"embedding"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}).get();
ASSERT_EQ(1, results["found"].get<size_t>());
}
TEST_F(CollectionVectorTest, FreshEmplaceWithOptionalEmbeddingReferencedField) {
auto schema = R"({
"name": "objects",
"fields": [
{"name": "name", "type": "string", "optional": true},
{"name": "about", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll = op.get();
nlohmann::json object;
object["id"] = "0";
object["about"] = "about butter";
auto add_op = coll->add(object.dump(), EMPLACE);
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("No valid fields found to create embedding for `embedding`, please provide at least one valid field "
"or make the embedding field optional.", add_op.error());
}
TEST_F(CollectionVectorTest, EmbeddingFieldWithIdFieldPrecedingInSchema) {
auto schema = R"({
"name": "objects",
"fields": [
{"name": "id", "type": "string"},
{"name": "name", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll = op.get();
auto fs = coll->get_fields();
ASSERT_EQ(2, fs.size());
ASSERT_EQ(384, fs[1].num_dim);
}
TEST_F(CollectionVectorTest, SkipEmbeddingOpWhenValueExists) {
nlohmann::json schema = R"({
"name": "objects",
"fields": [
{"name": "name", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
nlohmann::json model_config = R"({
"model_name": "ts/e5-small"
})"_json;
// will be roughly 0.1110895648598671,-0.11710234731435776,-0.5319093465805054, ...
auto op = collectionManager.create_collection(schema);
ASSERT_TRUE(op.ok());
Collection* coll = op.get();
// document with explicit embedding vector
nlohmann::json doc;
doc["name"] = "FOO";
std::vector<float> vec;
for(size_t i = 0; i < 384; i++) {
vec.push_back(0.345);
}
doc["embedding"] = vec;
auto add_op = coll->add(doc.dump());
ASSERT_TRUE(add_op.ok());
// get the vector back
auto res = coll->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true},
Index::DROP_TOKENS_THRESHOLD).get();
// let's check the first few vectors
auto stored_vec = res["hits"][0]["document"]["embedding"];
ASSERT_NEAR(0.345, stored_vec[0], 0.01);
ASSERT_NEAR(0.345, stored_vec[1], 0.01);
ASSERT_NEAR(0.345, stored_vec[2], 0.01);
ASSERT_NEAR(0.345, stored_vec[3], 0.01);
ASSERT_NEAR(0.345, stored_vec[4], 0.01);
// what happens when vector contains invalid value, like string
doc["embedding"] = "foo"; //{0.11, 0.11};
add_op = coll->add(doc.dump());
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `embedding` contains an invalid embedding.", add_op.error());
// when dims don't match
doc["embedding"] = {0.11, 0.11};
add_op = coll->add(doc.dump());
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `embedding` contains an invalid embedding.", add_op.error());
// invalid array value
doc["embedding"].clear();
for(size_t i = 0; i < 384; i++) {
doc["embedding"].push_back(0.01);
}
doc["embedding"][5] = "foo";
add_op = coll->add(doc.dump());
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Field `embedding` contains invalid float values.", add_op.error());
}
TEST_F(CollectionVectorTest, SemanticSearchReturnOnlyVectorDistance) {
auto schema_json =
R"({
"name": "Products",
"fields": [
{"name": "product_name", "type": "string", "infix": true},
{"name": "category", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["product_name", "category"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto coll1 = collection_create_op.get();
auto add_op = coll1->add(R"({
"product_name": "moisturizer",
"category": "beauty"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
auto results = coll1->search("moisturizer", {"embedding"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
0, spp::sparse_hash_set<std::string>()).get();
ASSERT_EQ(1, results["hits"].size());
// Return only vector distance
ASSERT_EQ(0, results["hits"][0].count("text_match_info"));
ASSERT_EQ(0, results["hits"][0].count("hybrid_search_info"));
ASSERT_EQ(1, results["hits"][0].count("vector_distance"));
}
TEST_F(CollectionVectorTest, KeywordSearchReturnOnlyTextMatchInfo) {
auto schema_json =
R"({
"name": "Products",
"fields": [
{"name": "product_name", "type": "string", "infix": true},
{"name": "category", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["product_name", "category"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto coll1 = collection_create_op.get();
auto add_op = coll1->add(R"({
"product_name": "moisturizer",
"category": "beauty"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
auto results = coll1->search("moisturizer", {"product_name"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
0, spp::sparse_hash_set<std::string>()).get();
ASSERT_EQ(1, results["hits"].size());
// Return only text match info
ASSERT_EQ(0, results["hits"][0].count("vector_distance"));
ASSERT_EQ(0, results["hits"][0].count("hybrid_search_info"));
ASSERT_EQ(1, results["hits"][0].count("text_match_info"));
}
TEST_F(CollectionVectorTest, GroupByWithVectorSearch) {
nlohmann::json schema = R"({
"name": "coll1",
"fields": [
{"name": "title", "type": "string"},
{"name": "group", "type": "string", "facet": true},
{"name": "vec", "type": "float[]", "num_dim": 4}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
std::vector<std::vector<float>> values = {
{0.851758, 0.909671, 0.823431, 0.372063},
{0.97826, 0.933157, 0.39557, 0.306488},
{0.230606, 0.634397, 0.514009, 0.399594}
};
for (size_t i = 0; i < values.size(); i++) {
nlohmann::json doc;
doc["id"] = std::to_string(i);
doc["title"] = std::to_string(i) + " title";
doc["group"] = "0";
doc["vec"] = values[i];
ASSERT_TRUE(coll1->add(doc.dump()).ok());
}
auto res = coll1->search("title", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {"group"}, 3,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([0.96826, 0.94, 0.39557, 0.306488])").get();
ASSERT_EQ(1, res["grouped_hits"].size());
ASSERT_EQ(3, res["grouped_hits"][0]["hits"].size());
ASSERT_EQ(1, res["grouped_hits"][0]["hits"][0].count("vector_distance"));
res = coll1->search("*", {"title"}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {"group"}, 1,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([0.96826, 0.94, 0.39557, 0.306488])").get();
ASSERT_EQ(1, res["grouped_hits"].size());
ASSERT_EQ(1, res["grouped_hits"][0]["hits"].size());
ASSERT_EQ(1, res["grouped_hits"][0]["hits"][0].count("vector_distance"));
}
TEST_F(CollectionVectorTest, HybridSearchReturnAllInfo) {
auto schema_json =
R"({
"name": "Products",
"fields": [
{"name": "product_name", "type": "string", "infix": true},
{"name": "category", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["product_name", "category"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto coll1 = collection_create_op.get();
auto add_op = coll1->add(R"({
"product_name": "moisturizer",
"category": "beauty"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
auto results = coll1->search("moisturizer", {"product_name", "embedding"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
0, spp::sparse_hash_set<std::string>()).get();
ASSERT_EQ(1, results["hits"].size());
// Return all info
ASSERT_EQ(1, results["hits"][0].count("vector_distance"));
ASSERT_EQ(1, results["hits"][0].count("text_match_info"));
ASSERT_EQ(1, results["hits"][0].count("hybrid_search_info"));
}
TEST_F(CollectionVectorTest, DISABLED_HybridSortingTest) {
auto schema_json =
R"({
"name": "TEST",
"fields": [
{"name": "name", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto coll1 = collection_create_op.get();
auto add_op = coll1->add(R"({
"name": "john doe"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
add_op = coll1->add(R"({
"name": "john legend"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
add_op = coll1->add(R"({
"name": "john krasinski"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
add_op = coll1->add(R"({
"name": "john abraham"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
// first do keyword search
auto results = coll1->search("john", {"name"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
0, spp::sparse_hash_set<std::string>()).get();
ASSERT_EQ(4, results["hits"].size());
// now do hybrid search with sort_by: _text_match:desc,_vector_distance:asc
std::vector<sort_by> sort_by_list = {{"_text_match", "desc"}, {"_vector_distance", "asc"}};
auto hybrid_results = coll1->search("john", {"name", "embedding"},
"", {}, sort_by_list, {2}, 10,
1, FREQUENCY, {true},
0, spp::sparse_hash_set<std::string>()).get();
// first 4 results should be same as keyword search
ASSERT_EQ(results["hits"][0]["document"]["name"].get<std::string>(), hybrid_results["hits"][0]["document"]["name"].get<std::string>());
ASSERT_EQ(results["hits"][1]["document"]["name"].get<std::string>(), hybrid_results["hits"][1]["document"]["name"].get<std::string>());
ASSERT_EQ(results["hits"][2]["document"]["name"].get<std::string>(), hybrid_results["hits"][2]["document"]["name"].get<std::string>());
ASSERT_EQ(results["hits"][3]["document"]["name"].get<std::string>(), hybrid_results["hits"][3]["document"]["name"].get<std::string>());
}
TEST_F(CollectionVectorTest, TestDifferentOpenAIApiKeys) {
if (std::getenv("api_key_1") == nullptr || std::getenv("api_key_2") == nullptr) {
LOG(INFO) << "Skipping test as api_key_1 or api_key_2 is not set";
return;
}
auto api_key1 = std::string(std::getenv("api_key_1"));
auto api_key2 = std::string(std::getenv("api_key_2"));
auto embedder_map = EmbedderManager::get_instance()._get_text_embedders();
ASSERT_EQ(embedder_map.find("openai/text-embedding-ada-002:" + api_key1), embedder_map.end());
ASSERT_EQ(embedder_map.find("openai/text-embedding-ada-002:" + api_key2), embedder_map.end());
ASSERT_EQ(embedder_map.find("openai/text-embedding-ada-002"), embedder_map.end());
nlohmann::json model_config1 = R"({
"model_name": "openai/text-embedding-ada-002"
})"_json;
nlohmann::json model_config2 = model_config1;
model_config1["api_key"] = api_key1;
model_config2["api_key"] = api_key2;
size_t num_dim;
EmbedderManager::get_instance().validate_and_init_remote_model(model_config1, num_dim);
EmbedderManager::get_instance().validate_and_init_remote_model(model_config2, num_dim);
embedder_map = EmbedderManager::get_instance()._get_text_embedders();
ASSERT_NE(embedder_map.find("openai/text-embedding-ada-002:" + api_key1), embedder_map.end());
ASSERT_NE(embedder_map.find("openai/text-embedding-ada-002:" + api_key2), embedder_map.end());
ASSERT_EQ(embedder_map.find("openai/text-embedding-ada-002"), embedder_map.end());
}
TEST_F(CollectionVectorTest, TestMultilingualE5) {
auto schema_json =
R"({
"name": "TEST",
"fields": [
{"name": "name", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"], "model_config": {"model_name": "ts/multilingual-e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto coll1 = collection_create_op.get();
auto add_op = coll1->add(R"({
"name": "john doe"
})"_json.dump());
auto hybrid_results = coll1->search("john", {"name", "embedding"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
0, spp::sparse_hash_set<std::string>());
ASSERT_TRUE(hybrid_results.ok());
auto semantic_results = coll1->search("john", {"embedding"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
0, spp::sparse_hash_set<std::string>());
ASSERT_TRUE(semantic_results.ok());
}
TEST_F(CollectionVectorTest, TestTwoEmbeddingFieldsSamePrefix) {
nlohmann::json schema = R"({
"name": "docs",
"fields": [
{
"name": "title",
"type": "string"
},
{
"name": "embedding",
"type": "float[]",
"embed": {
"from": [
"title"
],
"model_config": {
"model_name": "ts/e5-small"
}
}
},
{
"name": "embedding_en",
"type": "float[]",
"embed": {
"from": [
"title"
],
"model_config": {
"model_name": "ts/e5-small"
}
}
}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema);
ASSERT_TRUE(collection_create_op.ok());
auto coll1 = collection_create_op.get();
auto add_op = coll1->add(R"({
"title": "john doe"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
auto semantic_results = coll1->search("john", {"embedding"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
0, spp::sparse_hash_set<std::string>());
ASSERT_TRUE(semantic_results.ok());
}
TEST_F(CollectionVectorTest, TestOneEmbeddingOneKeywordFieldsHaveSamePrefix) {
nlohmann::json schema = R"({
"name": "test",
"fields": [
{
"name": "title",
"type": "string"
},
{
"name": "title_vec",
"type": "float[]",
"embed": {
"from": [
"title"
],
"model_config": {
"model_name": "ts/e5-small"
}
}
}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema);
ASSERT_TRUE(collection_create_op.ok());
auto coll1 = collection_create_op.get();
auto add_op = coll1->add(R"({
"title": "john doe"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
auto keyword_results = coll1->search("john", {"title"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
0, spp::sparse_hash_set<std::string>());
ASSERT_TRUE(keyword_results.ok());
}
TEST_F(CollectionVectorTest, HybridSearchOnlyKeyworMatchDoNotHaveVectorDistance) {
nlohmann::json schema = R"({
"name": "test",
"fields": [
{
"name": "title",
"type": "string"
},
{
"name": "embedding",
"type": "float[]",
"embed": {
"from": [
"title"
],
"model_config": {
"model_name": "ts/e5-small"
}
}
}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema);
ASSERT_TRUE(collection_create_op.ok());
auto coll1 = collection_create_op.get();
auto add_op = coll1->add(R"({
"title": "john doe"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
// hybrid search with empty vector (to pass distance threshold param)
std::string vec_query = "embedding:([], distance_threshold: 0.05)";
auto hybrid_results = coll1->search("john", {"title", "embedding"}, "", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, vec_query);
ASSERT_TRUE(hybrid_results.ok());
ASSERT_EQ(1, hybrid_results.get()["hits"].size());
ASSERT_EQ(0, hybrid_results.get()["hits"][0].count("vector_distance"));
}
TEST_F(CollectionVectorTest, QueryByNotAutoEmbeddingVectorField) {
nlohmann::json schema = R"({
"name": "test",
"fields": [
{
"name": "title",
"type": "string"
},
{
"name": "embedding",
"type": "float[]",
"num_dim": 384
}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema);
ASSERT_TRUE(collection_create_op.ok());
auto coll = collection_create_op.get();
auto search_res = coll->search("john", {"title", "embedding"}, "", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, "embedding:([0.96826, 0.94, 0.39557, 0.306488])");
ASSERT_FALSE(search_res.ok());
ASSERT_EQ("Vector field `embedding` is not an auto-embedding field, do not use `query_by` with it, use `vector_query` instead.", search_res.error());
}
TEST_F(CollectionVectorTest, TestUnloadingModelsOnCollectionDelete) {
nlohmann::json actual_schema = R"({
"name": "test",
"fields": [
{
"name": "title",
"type": "string"
},
{
"name": "title_vec",
"type": "float[]",
"embed": {
"from": [
"title"
],
"model_config": {
"model_name": "ts/e5-small"
}
}
}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto schema = actual_schema;
auto collection_create_op = collectionManager.create_collection(schema);
ASSERT_TRUE(collection_create_op.ok());
auto coll = collection_create_op.get();
auto text_embedders = EmbedderManager::get_instance()._get_text_embedders();
ASSERT_EQ(1, text_embedders.size());
auto delete_op = collectionManager.drop_collection("test", true);
ASSERT_TRUE(delete_op.ok());
text_embedders = EmbedderManager::get_instance()._get_text_embedders();
ASSERT_EQ(0, text_embedders.size());
// create another collection
schema = actual_schema;
collection_create_op = collectionManager.create_collection(schema);
ASSERT_TRUE(collection_create_op.ok());
coll = collection_create_op.get();
text_embedders = EmbedderManager::get_instance()._get_text_embedders();
ASSERT_EQ(1, text_embedders.size());
// create second collection
schema = actual_schema;
schema["name"] = "test2";
collection_create_op = collectionManager.create_collection(schema);
ASSERT_TRUE(collection_create_op.ok());
auto coll2 = collection_create_op.get();
text_embedders = EmbedderManager::get_instance()._get_text_embedders();
ASSERT_EQ(1, text_embedders.size());
delete_op = collectionManager.drop_collection("test", true);
ASSERT_TRUE(delete_op.ok());
text_embedders = EmbedderManager::get_instance()._get_text_embedders();
ASSERT_EQ(1, text_embedders.size());
delete_op = collectionManager.drop_collection("test2", true);
ASSERT_TRUE(delete_op.ok());
text_embedders = EmbedderManager::get_instance()._get_text_embedders();
ASSERT_EQ(0, text_embedders.size());
}
TEST_F(CollectionVectorTest, TestUnloadingModelsOnDrop) {
nlohmann::json actual_schema = R"({
"name": "test",
"fields": [
{
"name": "title",
"type": "string"
},
{
"name": "title_vec",
"type": "float[]",
"embed": {
"from": [
"title"
],
"model_config": {
"model_name": "ts/e5-small"
}
}
}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto schema = actual_schema;
auto collection_create_op = collectionManager.create_collection(schema);
ASSERT_TRUE(collection_create_op.ok());
auto coll = collection_create_op.get();
auto text_embedders = EmbedderManager::get_instance()._get_text_embedders();
ASSERT_EQ(1, text_embedders.size());
nlohmann::json drop_schema = R"({
"fields": [
{
"name": "title_vec",
"drop": true
}
]
})"_json;
auto drop_op = coll->alter(drop_schema);
ASSERT_TRUE(drop_op.ok());
LOG(INFO) << "After alter";
text_embedders = EmbedderManager::get_instance()._get_text_embedders();
ASSERT_EQ(0, text_embedders.size());
// create another collection
schema = actual_schema;
schema["name"] = "test2";
collection_create_op = collectionManager.create_collection(schema);
ASSERT_TRUE(collection_create_op.ok());
auto coll2 = collection_create_op.get();
nlohmann::json alter_schema = R"({
"fields": [
{
"name": "title_vec",
"type": "float[]",
"embed": {
"from": [
"title"
],
"model_config": {
"model_name": "ts/e5-small"
}
}
}
]
})"_json;
auto alter_op = coll->alter(alter_schema);
ASSERT_TRUE(alter_op.ok());
LOG(INFO) << "After alter";
text_embedders = EmbedderManager::get_instance()._get_text_embedders();
ASSERT_EQ(1, text_embedders.size());
drop_op = coll2->alter(drop_schema);
ASSERT_TRUE(drop_op.ok());
text_embedders = EmbedderManager::get_instance()._get_text_embedders();
ASSERT_EQ(1, text_embedders.size());
drop_op = coll->alter(drop_schema);
ASSERT_TRUE(drop_op.ok());
text_embedders = EmbedderManager::get_instance()._get_text_embedders();
ASSERT_EQ(0, text_embedders.size());
}
TEST_F(CollectionVectorTest, TestUnloadModelsCollectionHaveTwoEmbeddingField) {
nlohmann::json actual_schema = R"({
"name": "test",
"fields": [
{
"name": "title",
"type": "string"
},
{
"name": "title_vec",
"type": "float[]",
"embed": {
"from": [
"title"
],
"model_config": {
"model_name": "ts/e5-small"
}
}
},
{
"name": "title_vec2",
"type": "float[]",
"embed": {
"from": [
"title"
],
"model_config": {
"model_name": "ts/e5-small"
}
}
}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto schema = actual_schema;
auto collection_create_op = collectionManager.create_collection(schema);
ASSERT_TRUE(collection_create_op.ok());
auto coll = collection_create_op.get();
auto text_embedders = EmbedderManager::get_instance()._get_text_embedders();
ASSERT_EQ(1, text_embedders.size());
nlohmann::json drop_schema = R"({
"fields": [
{
"name": "title_vec",
"drop": true
}
]
})"_json;
auto alter_op = coll->alter(drop_schema);
ASSERT_TRUE(alter_op.ok());
text_embedders = EmbedderManager::get_instance()._get_text_embedders();
ASSERT_EQ(1, text_embedders.size());
drop_schema = R"({
"fields": [
{
"name": "title_vec2",
"drop": true
}
]
})"_json;
alter_op = coll->alter(drop_schema);
ASSERT_TRUE(alter_op.ok());
text_embedders = EmbedderManager::get_instance()._get_text_embedders();
ASSERT_EQ(0, text_embedders.size());
// create another collection
schema = actual_schema;
schema["name"] = "test2";
collection_create_op = collectionManager.create_collection(schema);
ASSERT_TRUE(collection_create_op.ok());
auto coll2 = collection_create_op.get();
text_embedders = EmbedderManager::get_instance()._get_text_embedders();
ASSERT_EQ(1, text_embedders.size());
// drop collection
auto drop_op = collectionManager.drop_collection("test2", true);
ASSERT_TRUE(drop_op.ok());
text_embedders = EmbedderManager::get_instance()._get_text_embedders();
ASSERT_EQ(0, text_embedders.size());
}
TEST_F(CollectionVectorTest, TestHybridSearchAlphaParam) {
nlohmann::json schema = R"({
"name": "test",
"fields": [
{
"name": "name",
"type": "string"
},
{
"name": "embedding",
"type": "float[]",
"embed": {
"from": [
"name"
],
"model_config": {
"model_name": "ts/e5-small"
}
}
}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema);
ASSERT_TRUE(collection_create_op.ok());
auto coll = collection_create_op.get();
auto add_op = coll->add(R"({
"name": "soccer"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
add_op = coll->add(R"({
"name": "basketball"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
add_op = coll->add(R"({
"name": "volleyball"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
// do hybrid search
auto hybrid_results = coll->search("sports", {"name", "embedding"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
0, spp::sparse_hash_set<std::string>()).get();
ASSERT_EQ(3, hybrid_results["hits"].size());
// check scores
ASSERT_FLOAT_EQ(0.3, hybrid_results["hits"][0]["hybrid_search_info"]["rank_fusion_score"].get<float>());
ASSERT_FLOAT_EQ(0.15, hybrid_results["hits"][1]["hybrid_search_info"]["rank_fusion_score"].get<float>());
ASSERT_FLOAT_EQ(0.10, hybrid_results["hits"][2]["hybrid_search_info"]["rank_fusion_score"].get<float>());
// do hybrid search with alpha = 0.5
hybrid_results = coll->search("sports", {"name", "embedding"}, "", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, "embedding:([], alpha:0.5)").get();
ASSERT_EQ(3, hybrid_results["hits"].size());
// check scores
ASSERT_FLOAT_EQ(0.5, hybrid_results["hits"][0]["hybrid_search_info"]["rank_fusion_score"].get<float>());
ASSERT_FLOAT_EQ(0.25, hybrid_results["hits"][1]["hybrid_search_info"]["rank_fusion_score"].get<float>());
ASSERT_FLOAT_EQ(0.16666667, hybrid_results["hits"][2]["hybrid_search_info"]["rank_fusion_score"].get<float>());
}
TEST_F(CollectionVectorTest, TestHybridSearchInvalidAlpha) {
nlohmann::json schema = R"({
"name": "test",
"fields": [
{
"name": "name",
"type": "string"
},
{
"name": "embedding",
"type": "float[]",
"embed": {
"from": [
"name"
],
"model_config": {
"model_name": "ts/e5-small"
}
}
}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema);
ASSERT_TRUE(collection_create_op.ok());
auto coll = collection_create_op.get();
// do hybrid search with alpha = 1.5
auto hybrid_results = coll->search("sports", {"name", "embedding"}, "", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, "embedding:([], alpha:1.5)");
ASSERT_FALSE(hybrid_results.ok());
ASSERT_EQ("Malformed vector query string: "
"`alpha` parameter must be a float between 0.0-1.0.", hybrid_results.error());
// do hybrid search with alpha = -0.5
hybrid_results = coll->search("sports", {"name", "embedding"}, "", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, "embedding:([], alpha:-0.5)");
ASSERT_FALSE(hybrid_results.ok());
ASSERT_EQ("Malformed vector query string: "
"`alpha` parameter must be a float between 0.0-1.0.", hybrid_results.error());
// do hybrid search with alpha as string
hybrid_results = coll->search("sports", {"name", "embedding"}, "", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, "embedding:([], alpha:\"0.5\")");
ASSERT_FALSE(hybrid_results.ok());
ASSERT_EQ("Malformed vector query string: "
"`alpha` parameter must be a float between 0.0-1.0.", hybrid_results.error());
}
TEST_F(CollectionVectorTest, TestSearchNonIndexedEmbeddingField) {
nlohmann::json schema = R"({
"name": "test",
"fields": [
{
"name": "name",
"type": "string"
},
{
"name": "embedding",
"type": "float[]",
"index": false,
"optional": true,
"embed": {
"from": [
"name"
],
"model_config": {
"model_name": "ts/e5-small"
}
}
}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema);
ASSERT_TRUE(collection_create_op.ok());
auto coll = collection_create_op.get();
auto add_op = coll->add(R"({
"name": "soccer"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
auto search_res = coll->search("soccer", {"name", "embedding"}, "", {}, {}, {0});
ASSERT_FALSE(search_res.ok());
ASSERT_EQ("Field `embedding` is marked as a non-indexed field in the schema.", search_res.error());
}
TEST_F(CollectionVectorTest, TestSearchNonIndexedVectorField) {
nlohmann::json schema = R"({
"name": "test",
"fields": [
{
"name": "vec",
"type": "float[]",
"index": false,
"optional": true,
"num_dim": 2
}
]
})"_json;
auto collection_create_op = collectionManager.create_collection(schema);
ASSERT_TRUE(collection_create_op.ok());
auto coll = collection_create_op.get();
auto add_op = coll->add(R"({
"vec": [0.1, 0.2]
})"_json.dump());
ASSERT_TRUE(add_op.ok());
auto search_result = coll->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "vec:([0.96826, 0.94])");
ASSERT_FALSE(search_result.ok());
ASSERT_EQ("Field `vec` is marked as a non-indexed field in the schema.", search_result.error());
}
TEST_F(CollectionVectorTest, TestSemanticSearchAfterUpdate) {
nlohmann::json schema = R"({
"name": "test",
"fields": [
{
"name": "name",
"type": "string"
},
{
"name": "embedding",
"type": "float[]",
"embed": {
"from": [
"name"
],
"model_config": {
"model_name": "ts/e5-small"
}
}
}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema);
ASSERT_TRUE(collection_create_op.ok());
auto coll = collection_create_op.get();
auto add_op = coll->add(R"({
"name": "soccer",
"id": "0"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
add_op = coll->add(R"({
"name": "basketball",
"id": "1"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
add_op = coll->add(R"({
"name": "typesense",
"id": "2"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
add_op = coll->add(R"({
"name": "potato",
"id": "3"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
auto result = coll->search("*", {}, "", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, "embedding:([], id:0, k:1)");
ASSERT_TRUE(result.ok());
ASSERT_EQ(1, result.get()["hits"].size());
ASSERT_EQ("basketball", result.get()["hits"][0]["document"]["name"]);
auto update_op = coll->add(R"({
"name": "onion",
"id": "0"
})"_json.dump(), index_operation_t::UPDATE, "0");
ASSERT_TRUE(update_op.ok());
result = coll->search("*", {}, "", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, "embedding:([], id:0, k:1)");
ASSERT_TRUE(result.ok());
ASSERT_EQ(1, result.get()["hits"].size());
ASSERT_EQ("potato", result.get()["hits"][0]["document"]["name"]);
}
TEST_F(CollectionVectorTest, TestQAConversation) {
auto schema_json =
R"({
"name": "Products",
"fields": [
{"name": "product_name", "type": "string", "infix": true},
{"name": "category", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["product_name", "category"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
if (std::getenv("api_key") == nullptr) {
LOG(INFO) << "Skipping test as api_key is not set.";
return;
}
auto api_key = std::string(std::getenv("api_key"));
auto conversation_model_config = R"({
"model_name": "openai/gpt-3.5-turbo",
"max_bytes": 1000,
"history_collection": "conversation_store"
})"_json;
conversation_model_config["api_key"] = api_key;
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto coll = collection_create_op.get();
auto model_add_op = ConversationModelManager::add_model(conversation_model_config, "", true);
ASSERT_TRUE(model_add_op.ok());
auto add_op = coll->add(R"({
"product_name": "moisturizer",
"category": "beauty"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
add_op = coll->add(R"({
"product_name": "shampoo",
"category": "beauty"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
add_op = coll->add(R"({
"product_name": "shirt",
"category": "clothing"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
add_op = coll->add(R"({
"product_name": "pants",
"category": "clothing"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
auto results_op = coll->search("how many products are there for clothing category?", {"embedding"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
0, spp::sparse_hash_set<std::string>(), spp::sparse_hash_set<std::string>(),
10, "", 30, 4, "", 1, "", "", {}, 3, "<mark>", "</mark>", {}, 4294967295UL, true, false,
true, "", false, 6000000UL, 4, 7, fallback, 4, {off}, 32767UL, 32767UL, 2, 2, false, "",
true, 0, max_score, 100, 0, 0, "exhaustive", 30000, 2, "", {}, {}, "right_to_left", true, true, true,
conversation_model_config["id"].get<std::string>());
ASSERT_TRUE(results_op.ok());
auto results = results_op.get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_TRUE(results.contains("conversation"));
ASSERT_TRUE(results["conversation"].is_object());
ASSERT_EQ("how many products are there for clothing category?", results["conversation"]["query"]);
std::string conversation_id = results["conversation"]["conversation_id"];
// test getting conversation history
auto history_op = ConversationManager::get_instance().get_conversation(conversation_id);
ASSERT_TRUE(history_op.ok());
auto history = history_op.get();
ASSERT_TRUE(history.is_object());
ASSERT_TRUE(history.contains("conversation"));
ASSERT_TRUE(history["conversation"].is_array());
ASSERT_EQ("how many products are there for clothing category?", history["conversation"][0]["user"]);
results_op = coll->search("what are the sizes?", {"embedding"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
0, spp::sparse_hash_set<std::string>(), spp::sparse_hash_set<std::string>(),
10, "", 30, 4, "", 1, "", "", {}, 3, "<mark>", "</mark>", {}, 4294967295UL, true, false,
true, "", false, 6000000UL, 4, 7, fallback, 4, {off}, 32767UL, 32767UL, 2, 2, false, "",
true, 0, max_score, 100, 0, 0, "exhaustive", 30000, 2, "", {}, {}, "right_to_left", true, true, true,
conversation_model_config["id"].get<std::string>(), conversation_id);
ASSERT_TRUE(results_op.ok());
results = results_op.get();
ASSERT_TRUE(results.contains("conversation"));
ASSERT_TRUE(results["conversation"].is_object());
ASSERT_TRUE(results["conversation"].contains("conversation_history"));
ASSERT_TRUE(results["conversation"]["conversation_history"].is_object());
ASSERT_EQ(4, results["conversation"]["conversation_history"]["conversation"].size());
}
TEST_F(CollectionVectorTest, TestImageEmbeddingWithWrongModel) {
auto schema_json =
R"({
"name": "Images",
"fields": [
{"name": "image", "type": "image"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["image"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto coll = collection_create_op.get();
auto add_op = coll->add(R"({
"image": "test"
})"_json.dump());
ASSERT_FALSE(add_op.ok());
ASSERT_EQ("Could not find image embedder for model: ts/e5-small", add_op.error());
}
TEST_F(CollectionVectorTest, TestImageEmbedding) {
auto schema_json =
R"({
"name": "Images",
"fields": [
{"name": "name", "type": "string"},
{"name": "image", "type": "image", "store": false},
{"name": "embedding", "type":"float[]", "embed":{"from": ["image"], "model_config": {"model_name": "ts/clip-vit-b-p32"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto coll = collection_create_op.get();
auto add_op = coll->add(R"({
"name": "dog",
"image": "/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBwgHBgkIBwgKCgkLDRYPDQwMDRsUFRAWIB0iIiAdHx8kKDQsJCYxJx8fLT0tMTU3Ojo6Iys/RD84QzQ5OjcBCgoKDQwNGg8PGjclHyU3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3N//AABEIAJsAmwMBIgACEQEDEQH/xAAbAAACAgMBAAAAAAAAAAAAAAACAwEEAAUGB//EADUQAAICAQMCBAQDBwUBAAAAAAECAAMRBBIhBTETQVFhBiJxgRQjMkKRobHB0fEVJDNS4fD/xAAZAQADAQEBAAAAAAAAAAAAAAAAAQIDBAX/xAAmEQACAgMAAgICAQUAAAAAAAAAAQIRAxIhMUEEEyJRkSMyYXGB/9oADAMBAAIRAxEAPwDrMQguYAMapnjmRG2YBCMHzgIkCGBBEkRjCxJxMmQAnEniDmQYWAWBMwIGTI3RbA2GcQGImGLaPYVkkiASJBgmS5hZJYQciCcyCYtwslmEDdIJgZhsFlocRgMgLCAlUFEZmAycScSaCiQZgMjEkCMdBZkFpMjEYjMyczAJJWFALJmZh7DI2yKYAloDGN2SCkqgoRumGMZIJXMnUKFQWjtkFki1ChEGP2QdkWrHoyyDCEriz3hB5qXaHARmABEB5jWQJbHSRiVxb7wHuIibFZc4xIHeV67CxAmx0ukerOo1O1EQZAZu8uK2HFbOkTRotRbtKVEqfM8CWh0m3blra19iZT1fXbKVIVQi+RH7X95pOq/G9PSrKF1FlbM2C9YbLKhONxHl95soxOhYUvJ0N/T9RUM4DL6qcyiZs+n332KLa8tWwymJr+o/JcSK3RW5AYYI9ZnOKStEZMevRe6TulcWcyd8yTMbDcwF7xVlkWLSIOSsVltmxFk5iDbmR4sNkGw3dB3CKZ4vf7yXMe4zGDGA47SnZY6nOIK6snjEnctyrlF8HmS5wJTW0kiNLNjmVuifIyvJaS6gwaySOxkkN3xJ2XsFFtXQ7TkK6k9hzOZ+J/iC2wrTpyfzbCqKDwccf1m36neaOn3PnaduB9TOG1p/3uiwMhAPPzIz/Ob4+8Or46qLkdEv4jUJXp6rdqou0v33Yj9P0Lp+nZtZ1FKHsOSCyl7HY+npNfpdQ9LqQVXHr6zmupfFet/1i6jwiyVkj3OPSbwTfg0lL9nfWfEF+l0zV13Cqwjg4yF/vOF+Guude1fxfo9JrOoWPpLdTtu4G0j7xGu6zqNTWTXprCx4IJxg+mJb6V0s6fSdO6rm5ta9521VcKQOeR37+k1SUIvYiSc3SPSra2ptZG7qcRZY57xm6y47ypy3J4izWxbBBE8yTp8ONwldUQeYGBLBq2r6xbVnPEPKsbxyXBRWDiMetwOBFhXLYxEpoX1yuqBc+8XmNvQoJXw3pDj6DhJOqNrVpVdcExg6fUvfGZWqtdACG4jG1ZYzCSl4R7MI45PaSH/hqU5AEILWeMZ+koLqfEYjnIh+O1LZYcROEvZSePtIvolYyMSfy1znGJSXX1seQJj3m5sKpxEsbu5FbRS/EDrOjTqPT7KaiA/6l+0866vptRp9WlFy4uVQcr7ec9J09LG9c5Ckyh8QVUaq9ryi5FYrX2UTv+PF+V4OWc41VHF6YvdViwkOvvK3Uun6fXAm1SlwHDjgzbjTmp/yxwe8ix2dWV0XjsfWdPV1GXk5Na+pdO1A251dXBPiAc+3/s774I1+q1avVboRRXWpJcv3PHYev9oPR9PVvU31o6/9WE7zpmjqt6aa9Pp6kzyCBiVKcpQaYLhSqIzjEaVVSGYSv43gs25ckHkmA2tDqWAnkTizswpXUhuqtXyEimyr9vAiK7vEB3LiLc7mi2TRTxSUupFi2xSwVYsvWr8cmUNVqSFwin0zK+60AsM8ydG0TOWlWumzuHijiSKlAAI5lBbL66t65wO8zxrX+bd39oNtcNIY4y/OvJS0Wt1C1fmDJz5y1+LB78GVfw23hTk59Zg0tm7D+fadEYxTs4s2aeTjX8FyvVVJ27+ccth1bbdvE1q6GxbNzNkY5xLtFbHO1ivGDiV9fsyeSLVMwGpSRjkHEtaXXU1HDGVDpdp5Pn3kipS20jgjgyZQUkVino7N/p2Nmkuv42qOPrOc1NjlSDzmb3UOKdBp9MOGYhm95qdWFrz/AGndix6QpDnPaVmn2ZyNwxEvV+eF25X1HaWNWEVSckfQTNHaozkggdjNUIHcy60oOCmAnHH3nofw6y1UAF927y9PpOG1SgOrV/q7k5/hOo6BYtKjeQScZ5gvIn4J6+tGk1p3AgOM4moqsrtJXaVHvN58UMgvosevI8McmaV6G1W00stbAZIJ7ieV8huGVr0d2FKWOzLQFOK7Ih7lUhmbJB5HrBRjW1ni1biOMZkuiWAHhDjtEo7ypoiWRqOyf/Bl2qpdAFr+8F2LVYoXJimVQoGO/nBrvao4TiPJiuNRJx/JSl/URi3uFfTsMMfWZstT5fl4kWct4zgMwOdvrMZg7FtmM+WZP1SaR1Y88bduhDuucg4BxyfWWQjWIXOCOAPYyiLBYoBQEn5sHnaY78Qa0VQxK54AM6HE8aM1rx9LIHgllZuQPWTXYBUQX2s3GYNdumakeNU29s/tQ0p09iEhnbJwqZ+b7yHlV0a/RJpP3/syhgeC3y+ZMM0ObQ1LErvUYxx3xAN2k09a2MXUjhvMg9vv5x2icNbuV1cIu84+nGR5S4Si2khPDk/ul1C7dSza4gkkKccxWpuBtKgNkegzEKf98Sx4zmSw8a8Hcwz3HrOy6LoqanUonHJz5+hms0+o32MvkD8xA95seqaZdLTZgDaylvvNPpSiVKDnB8xLFZd1OoH44ofTH1nSdD1Tm5atw3eXPlOMvsqXW6esD5ic/bE6/wCGNI19y2/pzgn2EGvY07Ow1nSH6pVp7Gt2BFwynjPMo3/CV6KbNLqFdu5T/wBm7u1H4bTVkknBx9eJY0ur8QcTOWDHN/kNZJxVJnEtUr6i1LDsdcLhuJmq6bdsR/EpTjHLS78a116fVpqi21bl9P2h/mc+uqrNrC5g2xcgHtOOUPpk23Zpus0dar/JcAtpwWspZc9wcgQLmFx/4dx/7DiUtZdgoyJ+VxkLyT7Ae8TdrNU5dlsO0HkMMMolWp00jKpQuMnRaZMhmPDkZA9oxNFY6BvEQZHrK12qtYVh1X5FwcDknMNLVKgjIBHaOmZqSt+ygjIp3MBuJ7jgd+8OnStW48TVK28AhlU5AwcZB+/74Oa1Hh2sgyMcjlYVaqitcwZhjhiPbGYSv9mWNL9WLFllbEFQ4TI2pyZDvrF2tWuR3ZV/UV5yR6dxHLam8FsrgkZAznn/ABCr1aiwfLlgTjHocwUULZpiKbLN4yoQIMknzBj+n6wbSypYpsTncOIdlaNXu4DYxkg+/l5GVPwj1nxPG7HkHnj3gkk7KjKUeJ8CTVE6tqWXDleMnuPWWtIMOGIx95Tr0n+4FjtuI/Qdx49DLml/MsetjtweDibOaZrHIvY7r6q/Rr24/wCNsEes84p6gyNsz3GfpPRNSDqumajT1kF2Hyg8TnLPhGlLaHa/dWqt+XjktkEc/vE1WSNdJlJGs0BOo6rTqLRtVV+UeuRPTfh9kXSodxGTzx3nI29EO2uxbEBStV2BMDPIB+n6eJu9NbdTTTQxFaoMZ9fWEsiocJKzuLk8XQGvPcjDenMNKV09OScegM03TOp+FQTc+4ZO0Z54A/nH3avx62NQRXClhg8gZIOfUcQU1qXxvyK6+W1mirUISys3BXtObao1AYP5m3a2VyCI3/VepaXxyuSP0hQMkY7/AOZVTU6qy26y9amxgoQMfvM58jTdik1XGHZUXNTFWawV4bbyCPp/WLNdDIwryeOctnHPftxC8XULWfBD1Oc42+hxn/EVQHFFewDxBk9uceh/vIhGm6Cc94q2TqKggNhYlyCd273lBjrEO2vaVHAJGZYuGpa5iagFICqfrn/77iLYahmJNTD6IT/WWo2ZKeo06evUjwyWBPYgc9+0ahZs1h2zznB4/jEaW90fcufExlFPkfeS6ucEg8/qAPaTSZMJSiuGeEM2FFO8H5Qe0LQ0hQzWFnf9RDY/dALEMjF+c/KAeMRh25NmSDjAx5xoWoxWw5TbxxyZDnbWGZS2eBzxArsZd24nHYZ5Mx7Du8tv84tSqpFgbbPm4C4Cn+ghquzaDuUkd+5AxKouG0BQAM8kd45tWdmdxKnsT3EprnCWhhqOAKnU7h6wUZktc7shOAfcSqlpVtoOQ0YzMqfKQMYyPWZyteBwim+hOWZ87DwcAE8CRZZsXavPpx2MTZqAzrwEO7PB7TDqa2DgHt29zGotroNJNjltcoSNuMENk5+8YlwRlUHaOdxlJeOF5A/jAssG47GPfOJWrFw2VOo5ZyrFhnaeOcnt/OA5r3uSQvHI9CR2lFr25O7j0EXZaLlCjduJ3ZEbTY+ezYtmqtGrBYhSC2efvMrvVS424JXkjyJ85QbVONO2CBaTwzDt9pNGqXehuGSvBI84ga/RbWwFXUn5gCO/bEM6mrPzuQ3mFTjMpNdWu7aCwPme+Jm+s8sQT58SkyWjNMQSeP0do06jAZgvtKtB+Ro2jmzB7cRXQ7GOK0rIAO08/eM07oKH3LuJPBMBwCQPLJkNxUMesSdIm+C3arToWvcDIihrtHUPzGtuz/1WFq60dFLqCfeMrqrAUhBkSk0XZXGu8T56KztJ27bBgj3jfmH6gffEJlUcgDPMtafndnmTKZpGLkVLHbZkeXbiA1j2bc9/rLiqu1hjzigoAOB5xJ30j3QqwqtJ+QknvI0aM4xgnmWUUE4Ih0AA8cfNBy4OK2kokHSXKGYVnbKqqC5PbHlN7RY5JUscHymr6hWiaj5VAnPg+Q5yo6vk/F+lWmVWqLJ3wM94S6cj51yPL6wn7geWe0tr5jyE6rOPU19wYhVABPnAOnduQRNjqcC4ADjErooNbEjnMZSVmua3wXanZlm/aheEB+q0Z85YZVJHA4gvWhYkqItWjV5IOk0f/9k="
})"_json.dump());
ASSERT_TRUE(add_op.ok());
LOG(INFO) << "Searching for image";
add_op = coll->add(R"({
"name": "teddy bear",
"image": "/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAoHCBYWFRgVFhYZGBgaHR8eHBwcHBwZHBwfHBwaHhoaGiEcIS4lHSErHx0dJzgmKy8xNTU1HCQ7QDs0Py40NTEBDAwMEA8QHxISHjQrJSs0NDQ3NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NP/AABEIAPcAzAMBIgACEQEDEQH/xAAbAAACAgMBAAAAAAAAAAAAAAAABQMEAQIGB//EAD0QAAECBAQEBAUEAAQFBQAAAAECEQADITEEEkFRBWFxgQYikaETMrHB8EJS0eEHFHLxI2KCkrIVM0Oiwv/EABkBAAMBAQEAAAAAAAAAAAAAAAABAgMEBf/EACIRAAMBAAICAgMBAQAAAAAAAAABAhEDIRIxBEEiMlETYf/aAAwDAQACEQMRAD8A9mggggAIIIIAMQQRDPxCUtmLPaE3gJaTQqxfGUIcCpBaFeO4kpVi3Q0/GhItTa1Nh9zHJyfIzqTq4+De6OiVx8vRNOd/aGmCx4XyP32jhsOskeYNyublvaLMnFFCndmqdG5xnHyKT7NL+PLXR30EK+H8QBT5i3M0/DE6+Jyh+r0BMdquWt043FJ5hdghWvjcoB3JtYb2v39I1PH5IuSG/wCUn6Qf6T/Q8K/g2ghfJ4zIVaYkf6vL/wCTRnieKyylKSQeYI1h+c5uh4vcKPGOMZAQg+ZJ81Lcg/5SOdTx6YP1F3s7kcm0pvvFPFTSpJNSpyHLnX5vr6Qrw86WlklZd8rOSQakq9hWPPvlqnqO2OKUsO4wXiOnnDjfWHmEx8uYHQoHlY+keZ4eZmNyHNtgPwlzvDFU0pTQ+YlgzludNg5i4+RU++yL4E/XR6NBCrguP+InKo+dIGbc84ax2zSpajkqXLxmYIIIoQQQQQAEEEEABBBBABiOd4/iPNl2H1joo4/jXzqfcxz/ACHkm3BO0KJk8uQP7/3jJwBLGYojZIv3OnSCQsZwVVao6i0ThZWfzWPPXZ6HojOGFGJAG9XjdaASzAudaiLQw4F6n6REhDRWYS60ygkXjRSrxIo0aNDTSkN0T4hLAAJUHffTK7N3J94rTVvQAfjt0/qDEVI61H36OfaBCQdLj6ke2veEqG5K6pjGg3rzDfnrEeJK2YKKRyvrvQ0+kTLQQd/z6UtbrGhLht/zvXVm5RRGEHwyA5XXYa3Fd6E6QnShMqbnW5QWYmpBa0NMTMYsBoGY8qjf3ctC6dhc6CAwewAq4H8gw8KTw6XDzJak+Rt41xcxSUpyhJzFg9nNnGvTpCTh2KShA+IACNR/MNpWJStLvTQHkb9YjML1Mv8AC8ZkKZj1FFUIf9wrpt/Ud2kuHEechbg/goatsaj8rHb8DnFclLhiKelo6vjV25OT5E+qGUEEEdhyhBBBAAQQQQAEEEEAGI5XxRhyCVAUI946qKmPwaZgYiuh2jLmjzlo04r8a082AUVAB3NNh+aw/wANICABy9dzArhmRdRUWq93FW/KxKp+keeoc+zvd+XoyUu7xhbCADtGF/n9QNiSIFrAiGauhY1iSdQ/xEKlat9IzbNEiMOahx1HRtWP53xIINLH+9N9PUPEcwGpYEXLDM40Zrm+zwYdZNTQW7Cn43K8NAwmMBXnU0cgtt7xXXl6mjj/APRpt9Cwiwvy5iRR9AAA9HLU/PWlMQEkgVJID/wQKks3WsWjNlaclLlTPXQnfViCbWislg5LMTSjij3J2+0W1uxBd20buX/LG1Iqy8yiQaNYD+YslkGJq+uaxpQ8os4HGWQsAEX2bfpEaZZKspActUU2/O8WRw5S8ik3eo3SoC/tA10NMZ4QZmSgGpper9dI9D4fh/hoCfXrCvgPBkywFqqo8mA6CHsdPBxufyf2cvNyKniNoIII6TAIIIIACCCCAAggggAxBBEOKm5UKVsKddPeE3i0EtOfxkx5ijzYdBSK4MRuamJwfxo86q8np6ErxRVmrMaGaEgqNKEk7ARKUpJtbk3+8LeK4RS5akJ1vp0B2H9xj9mq9GmH4xLmKyJWCqhA/Ux1a4HURcWAB1pend6x53wPwrjDiE+RKAiaFmeo1UmnkT+4EPQD9RfaPSlYJz5lqNbBgDys7PzjS4U5j0ibb9orFspBszA2Z7NV9XflC3G4hCCVHys5KiohLM9zcADkLVLw9VgkuFVBGzfm8cr4y8P4icgKkLQsIJORYAzeXKPm8pIuHYEs9hBEJvGF1i1DHhWITNTmlqSpJoClTg6NelfykazxRRo4FdxyJvb6Qj8B8PnYZEwzkKSV5UhFB8rutemoG5bpHS4xRIKVBnYAUYirvR9faHUqaxCltrWK5wdP+4LV7D/aKmDYKUf21oKOwrTXTvaJSCXINAsA130FNAN9DEaJYDpc1rptWjPUOPaGgYIWQSr7VZNq6VaL3Dp7rHVtaE6cwwHrFJbl0pAfmHuRQua2+oaL+FociQQABlJqUkMeps3Y9wk9I4Wt5ae49DFyKHBi8pJ3rF+PQj9UcVfszMEEEUSEEEEABBBBAAQQQQAYhbxonIBuofQn6wyhfxhJMumhBiOT9WVH7I55Jr+fmsZmEtTWA3jVTG5+0eaz0EazSEAl2A1jbC8OKwFroLhApf8Adz5RhPnmoQ1GzHtXvVvWHS7GLiE+2TVtdIV8V4jKw6M61hCBR9zoABUnkI4XFf4kyQrySlqD/MSlHcCvu0I/8RMWuZi1IUTkR5UJ2oCVdS99gI5FUnSOieKaWsxdtPEemSv8RcMospMxHMhKgOuVT+0dRhOJoWgLQsLQRQioO/cHvHhPw46bwNjFonFAqg1I0ez+n0ieThUrUVHI6eM9IxUwZSDq76ggwpM7zlCicqqJZzXblQDrFnHrcgClIVYkEgsWO40LXjBGzLqCS9NRm6s33HvvFfEqKXa53pTMKONS5NNo1w+JzhOhIDgkBilQswqLxvjTVL3uz6X/AJvDEVDNUrMbNl3NRduzaxc4Yr/i3LNT7A9Q5ikAKgfqFOqbs8a4AH4ictGbQXp7awCPWvDg/wCAjvz1MNYp8Ll5ZSEs1HbZ6t7xcj0JWSjgp7TMwQQRQgggggAIIIIACCCCADERYiXmSpO4IiaMQmtA46ahixuPtSIis6+t+XbpzhpxyQUH4n6TfkefX6wo+bdPM2+sebyS5po9DjpVOlvhjGYVf8jD1H9Q1KxaEGDm5FhTUNCeuvq3vDRU17Gv5WL4n+JHJP5HN+MfCaMQPiIIRMG75VjQFqgjQ9unl2O4DiJZZUlZqACBnSXIAYpflHuKl3EIuIzHTQjylKn/ANKgT9Iv/Ry8XoFCpHk+H8OYlZbIUDdflbt83tHX8D4OjD0NVak0JPIbQ9xIYFqmKOcOCak2G45bdbRFclV0y5iZ7La0P6RSnSrtFpC1MxZz7cu0RTlBCSo6D8aJGJsCQmYsV+a2lgSWi+skrJbWtLCoH2MK8JVZWoXrrT/baHKkkJoK09iNdQ1fSACumV5kXrQ6b2JttHQ+GMDmnCjpCnNNq+jNCjADMoDQVNq19rfTaPR/DvDEyZdBVTKPpQRpxT5UZcleMjiMwQR3HGEEEEABBBBAAQQQQAEEEYgAIqYniEuXRSgDtc+0LeK8aCfLLLq1NwOQ3McopRU5J1uX9BHNy/IU9T2zo4uB12+jrpvHJRdJBIIqWDfWOYMxOfIKJ0Cqlms4/LxFLkvZVu/11jZdTRV/Xsf5jkrmd+zqjimPRnGKQwALN1c3pyFP61jErFKQGPy7EuR+CKi8HMJdK20YgFPdqt3i1LwU3Kyig8wlvUPUcuV4UpvtDrF0y0MchVjXar+hrFTFSwUqH7htFXiHBpiw3xMm5SgAnup94RY3gs6WkqRiJoap8xq2n4DGvi37M/JL0OZ5WUtmFbsmv1aKoSEk5U9T9q/7RzC8ViQkkzVIFWzFJJ9qX3il8PErVlVOW7fuPrlBY3Gm3KBQPyOvOMCCcynP7RU/0IV43FldVEAD9P0PMwql8LngBImNzAqesSp4BNWXKyOoBt9IagXkXUz01U5oG6EuCx6RalYgFiBrSrvYC9XYGOfxWBnSTlUtICg4NNL168tY3lyVuVEhbWIJ6VbTTtyiXODT07rAcQw8ghcwZwSDlS3lp+qtmNq8717TA+KZExyMwALORva0eSOtUvKoJrsc3swa/PSGPDApDBhd9WLOHUDRwBDnkc+ia45r2ezS5gUHBBHKN44rgPFig+ZKgGqDlrzSx9o7GVMCkhQLghwY7OPkVo5LhyyWCCCNCAggggAIIIIAMQo45xISksn5lCnIbmGOJnBCSo2AeOAx+KKySSSX1q20c/Py+E4vbNuDj8q1+iGZmUGBIO7ezPSkYRIGhN2J26A840C/KADXXrYWaIULCEgqIGUqFSA4dQHJi1qCPNPRGKFaD1taIs4JNbHX86xYkVFGY21pzPOCZL5Ac4GCZmSvMWB++hi+cVLQPNMQDtmD+kJ5kgKTlIFb6RTRw5CfmN7pzKLDlUMGpaNY5PFYRfH5PRzM41IqM7kXCQpR9hSOX4vx9S3QgfDSaFawc3/SkW6k9odplpSXCWfSp+toxxJSPhk5ApZ8qARdRs/LU8gY38m1pj4pPDgJXDVrWChReWlJdRUoFVWoosPLlPeGKcLiVLSVJQoJ1AKVdiP9o6bDYJMshLA7qo6lF3f0fuWhj8OlGgVeXY2s6FGFl+QKUnLp52SX6uxHN4r47G5BZki6jryQB8yob4+Wn4agtyLs5AZwT7/WFZwiJiEpXLCdQAGIAZ6ipt7xFcmdMcxvYhEleJWpZATQBKXCsqRZwWuHJ7xeRwcpAOZiNN3ajAb8obEBDpSAE8gU7WvvpX7VMQgag75tKNyrE+TZWYRmWEqSlRFyH0LJfYbv94volABjehoWV1DdxCTMCk5jmPzAZQGYM9PTvDzB1KfJ5Muln1Hby1/uEwRiWjIpqjV0sKHrcGOw8M8Tf/hKc3YnfbvfqY5Weh00FQbhy439CYl4bPyroa+UvyNe9K23i4rxeonkjyWHpkEV8FPC0g+vXWLEegnq1HA1nRmCCCGIIIIjmLCQSSwAc9oAEfijFhKAh6qLkchb3+kcWVl62csa/wAflYvcTnFUxS3DE61IGgvtFK357x5XNflWnp8MeM4RIJKsgpY5tA1+W0YWiXmEtRCzmFDoFE5c3YH05xohQBWv9qRTZjUbVANoi4XJK5qS7VCzSpCTT3alKGIlGjZ0qMOAkAMANqD2gVKSbqN7+zf784nWQBUsBEKFBdQCwsW+lOUNolMXTpjGmZuQoAInwyXHXQ1Pev5SJVSQWYfTpv8AaNsMgipP0p6QRO0O6ySKZIVcV5GK0h1zjTyS3G5zqZ2/0incwwxOJShCl3yglt2FB3NO8LeHAhCUqUVVJLgVJLqPQEmvPpG3K8WGPH29JlDMsEJLJJ/TclgFAvT00i18Mm3qIrGYQEuDq9APlYuQ+wH4YsJxSkpHl9aEDR9bNC4X3g+T+mVyvL5jTXZjd4XTw4dCXVmZtKmuYtQByb2iycao0ZIHf+YXS5JXnK0ouf1E0BvlDM4D/Yw+We0xcdfRqsLzLdLgDZ9KClT/ANVniriZ6EfMq4HNqhgSFMKFte0WsVhWRlR5c4/ScxFCxcsAL/KLm8c7KQS+V0AkNmcl6AXoBQd9olFMixeJ+GpJIACydWLFLZS+5GuoG0P8FOKUBRUXUGZswJGWwFLBXr0hIpIUt1kKQKAKDZjX0r9Yjw2IEtaJYzPmJFSXFARzp7dBB7GdfhphUnMkEEmoJ7UpYe7xFKl5VUc5w50p02vbeJpSVEhhmGzU0BoKWEXGSQ7Cl+R+8SMYeFcbkJSqpURQG2gJf3rHZx5ciatCnQFElQINAxKvwetI9E4ZiviICiz6gR2fHrrDj5478i9BBBHSc5iFPiGfllZf3Fuwqft6w2jmfExJUlOyadzX6RlzPxhmnEttHNTR+PFOYSHOt7hLkNQva9OhhhNwu6i52FKddYqzJJTZXmFWblp71fkY8s9NEOJfJ8ocj5aNWr25GM8BkvOWuzJYC3zEOTr+mNcdLIQUu7VdtK2d/wAMHhmY65iaMhKPUlTA9MvvFyKjpFj6jtr/AHBMUB0+kYc9hz9vpGiTQ8uXpAyUbiW71NPzb8eKxVoLROpbJLC33+/8xXQd6RtxL7MuR/RT40jMhKKnMXUBcpQyj/8AbJ7xYlBOViObG16GIB55xLuEJSkilFLL82fy+kXvgpyerl7tqTraI5O6LjqSrNQynJ8zaEkhwAxA1LU6QLXqE1ZiAerPtrp9IkKQgmjlZ81auW0FaPpvGVpVSjA0IpSr5ga6vTYiFHT0K7WFdDm6QOtYhnTUIWcz+eoyk1oxDDkBp+qMqmuWjTHJCUpmKFEGrbKGU9RUR0Wtkxl4yPHraWoPlJBAcBVrA12cfffnFoWMoKzVIcJARRNU83B6GukPeIIzZEuAFqBYBgEhiLm7J+kKELICi5p6UD76xzm4uQgpNAwNW6sWHoekSTsMpTKaqS6Tcgh6/l4mXOZQDuUhIJYsWJcEbtryjdOKuFA/pZgNyTrsYWhh0HB8SFywqtQHataW7w2kIZRdm0J7sw736xzHh7FJWpaGYJUDc0dyLikdGpiCz6H6sN7j3gfQ0bTkhIe/50i54exikz8pAyKAANmdvuIXKXm2Zt6kl6Nvb3jSRPy3cMbGxc+wq7xXHePTPknVh6VBC3hGP+ImrZgzsX/P7hlHoy1S1HBScvGEcvxxTz22SPd46iOT4sGnrPT/AMRGPyP0NeD9yiZVtIoYqW1QA9g9vMQ4PV/YxfmGK0wJJCSQFPmZ9iQ57tHntHehZxCoIBcsX9SD3hd4KmvMxD2BQkdWL9qiGHECtwySQdg46UfX82R+FZi5c+clSVJdVHSQ9AKUtzi4+wr6O2XPD5RUva9t2tAhwncVf+oE6qapNd2Br3+ZoMpAOr8/zWJwDTETwwD6jlufsPSJLikUOLLZKQ3mKr9rRTViSlCiotlST6B42isRlU6yXhCM4mrbM805dvKyUn0B9odzCEjzWB/HhF4VU0tKQ+6jzNde3rDjE6XNQe5p9Ij/AKX/AMMBFnYlhXUlh/EZnruRWlPZz7CK60uol281W2AA7DV4rS8OotmWWBJpR60B5NprCDDASyiHeLa8MFy1JJooEezeoipiQoJKxeoTudi3aEfDJS0ElJWVK0FQ76jXqfaNlyJLszcNvotYecSlJUAVIDNzdm5faNf/AE0qoVB2dydW19eUbL4BPmLUsBKHLkFQrqaAFnNYvYbgExJJUtJBqzs5paoa3T75F6KFcIQls8wBRfytVtTUxFJRJQSHK1WcOwvYC/WOgHAE1oS9HcE+qq839DvRTwFCWzS5irgmqRS5LaaducIelXgKEImKIP8A7ihlBFWSLH0MdGpAYm7/AMNCHGYBCSlQWoZSCA4ykUoWDw+RNC0Am50F7O1IT7Q0V5c0ppTU9t2eg09Y1WlwywAFAkVq+nPewiSRI/U7VcgAF9a7394yvECwoxI05HTlCkKGfhSYZawjKSCGB2eo+jPHax59whZExKgKAi7OR5i5aosI9BEd/wAd/icPOvyCOW48cs4k2IS30+sdLMmBIJJYDWOT4zjxMLpozpBuTva0HyGvHGHAm60olQBrdqdHr9ogVVbpRnUKOmpY3fn/ADF7DcPoVLYk/KnQAb8ztDCSUoTlSnKL3udSdTHCp/p2us9CcYeZrLVU3GgO76xBiFnOEBC8ofMpSVNYNQByL/1r0MnFJOoPSo+sSLmPFKF/SXb/AIIxN8rmxF6MecRLWeRFrgH3LGGGP4bLmADKARUXZ+aQQDCubLMtkBJvuqpNyTYCv06QqloapMr46WF0B8yXID2KgAH7iFAwiwhctaiXBZRLmtCPv3h/iUkACjnflcXtb1hfNmaUNwzi/wBX7wtwog8MYpKEKQssxZ3LWe+lXh8taVlwRQ+lielD7xy8grlBWRlEn1Lk+b+RvEicYlEsZnBKDmKBRzmoRbQB6WhgPFCjmzDpZre8Kf8ANKK0JlhRbRPOuvMdqxHxCeRLSorJfK4NAXJckC3yimjc4acIwoQkLXRcxqGhCdAxN6110vAMMLgFqGacf+gVD7lgA/SGCChPlFBsBoB/y7CLGCQpacygUJJoLKKef7X9ekMMqUnygCjfm8LCWxbIC1NlQcv7leW3L5j6Rmfh1pIdJI5ZT7X9434nxqVISFTVhL2Fyd2Ar9oi4TxqViQoyl5stwQQQ7sa6FrjaK8OtJ8uzBKwklSFCtLFtictgIklrUS+a1/6GnvDEKox/qKmISEmhIBagYN0MPxzsN3oX4goW4UzKTUsHBZiVPQhmhchBkqKVEUSVUdiACQ1N4scSW3lAyguSSQC1aGjCtTyjVUxE5AQT8zpN3SrLcFtniRp4S4hByslTAN9vtSKyASk3qCzhiwNKXFPtBLngBkqzMAASKk6KPcbGJMOwKk2oAdy4r3c+0JIpvolwJAWz33rYOLltRb7x6Bhy6Um9BXePNpM7zhqbafho3aPQeFzs8tJ7ekdnx37RyfIXplDjc8k5Em1Te7UFPWFshAcmlKd9ekWcWofEXXUn0oYgSDkS1KOR1rp1jHke02zXjWSkbKUY5H/ABDmTBhk5CQkrAWR+0gs/LM3tHRcS4iiQgrmqypFBqSToBqYSYDxdhMSr4SgpJVQCYlOVT6UUR2MKU0/LC6pZh5jg8dNkrExCyFDnQsbKAuGpHtsucopSSClwk5XqHAJBhXJ8PYVC86JKcwqCSpQB0ISokA9qQyQXg5LVNYTMNeyyJlHjWbLdApUmmhHTUUhRxTjUjDBPxVfMaJSCpR3LCw5mncxc4fxmViU5pSwoC9wU9QaiDHmsWrSLGSw5R5SrYltKM1h2uLwl4hKdSVkEE0L2cAhi51A7sI6DFzEgMkZWq7Avv1pCzi8shTlqsA1SFVY8gzDmDEUjSWc6tRCiCxBtWxrSpseXPeFsviYSr4a0gJ1PJxS/URcxCyz0JIf+Lxz/E1HMlY2r0vBI6Oz4MEzpiyXyS1JIdmPk8qQf9Tlo6zh8pyZqqn5U8hdZHVVH2SIReGZGTDS6+ecQugslYFf+2vdo6lVABsPpCfsN6NSY1eEI8W4UzfhfE8zsFMQgnbNbvaHKpkJpr2Umn6PKPHYm/5pecKYtk1GVtO7w/8A8OuGrQFzlgpCwEofUO5V0oK61jtFEG9Y5Pxp4jXIyy5dFqGYrYEpDsMoNHJeps0bLkdLxSMXCl+TZ2JW0QqmHSpMeb+GvFOIViES5izMRMUEsQHSTQKBSBbV6MDHoBmU7Xialy8ZU0qWoh4jKCknLVYAFKPmYOaPeKeGmpQgM2YEGqQSKVPI3pFhU4BCypLmhfkmov8AjwvkIK1BNvM5OgA19IlDZXnYhf8AmFJBSEgvt8yQoE9E0prvpIicc4uA9SWrQnT8qYW8ewhRPM1C0rQWGUVIZITUa2uIlwk50EVqCB/Q3vFOcEnoxJ/YX0ezXr+bx6NwL/2JfT7mPO8BIUshIcs2ln1MemYNOVCU7AD2jf46etmHO+kjkuPyVZ1VDO6iHzM1gdL3i2tTHpaLfiTBZxmFiGV2sT6/SEvD55WChVFoorm1AodYx5Jc00a8dKpTEfjfhi8TIAlh1IL5dVBmLc+XWPMcLwfEKmBCJaytxTKoNW5ceUczHr/HOMowqAtYUXLJCQCSWfUgCKnBfGUiesS/OhZ+ULZlHYEEh+RaL46pT6JtS69jxCCAAouWDnctUxhQYxNGijGWGmnlfjzN/mlE/sQ3TzU9c3rEPgbGFGLQkHyzHQoaEEEjuFN7xB4t4imfiFLQXQAEoO6Uv5uhUpRHJoz4JkleMl7JdR6JST9WjrzI7/hzPuuj1pE0sWAT2ijiMy0KQ4qCAX8wyk6aDbWNJs9gVNGq8QpEpLFOZYKjQEuSSQ52BZ+UcTZ1pYcxj05AS9uccricW6wi7sCwtmZ+9YZcaxbrKU312H9xSwmCzH7xpEr7FTPY8FJaamoCUJKUJGjD+KQceUoSJ5T82RbNvlNucU+GYorQhRIMwJr/AKgNRsRtuYtKxIUSlmIDsXqCLjQxL66Ys30eAzFVrHsnhPHGdhJayXUBlV1SWrzZo8/8X+Hjh5mZAeWs+U3ym5Qdm03HeIfDPH5mEUzZ5ai6ka/6k7FtLFu8dPJPnOoxivCuz15JhJ4n8OIxWVQXkWkMC2YEXYhw1decM8DikTpaZktWZCg4P1B2INCOUTGjnlHInUPo6aSpHM8A8IIwyxNUv4i0vkYZUJcMTclRYkaCsPZw3pyEeV8V8R4mYsq+ItA0ShSkBI0HlIc2qY63wbxKdPlL+Kc+VQSlRFTRyC12pX/mje5eeTZjFTvijoEIBLGj2F31r6RUxCiiVMI+ZflHLODSuyR7xPNQAUi5JoB5Utq7aWeIONzUBQQBUHMWZqgNbW/rEQtoqniEcjhBX8yjDLD+HUaLWOhiTCzHLCOp4VgrE1MdShM56tos+HOBplpuo9Y6dCQA0RYZDCJ41mUl0Y1Tb7NVpBBBtHHY/hRkTM6HYlidCDdJ56iOwmKYQqxuNLENEcvGqRXHbl9HFcc4XLxaMilZFguk6g9P1A8o5CR4FxKZqRmRlCgc4VYA3Au/31juceou4l5gNO7xVXxLImpYftWC7f6mp1NI5fyjo6smux4FPCjxPLmLw01EtysiwuU5hnSNyU5g0Qo8RSLLWEbHMFJLXYp+4ETzOKSb/GR3UB9YhPOy83o8f+AtS8oSSp/lAJV6Xj0XwrwE4ZBWthMXcfsT+3qTU9ANIb/+qSr50nmlQUfYvGhx6T8iSvmGSB1zt7PFXzOlmCniUvWybFnyMKrNAL13YbRQ8YYwS0JQB5rBhd2Pt94kRishK5hAWKIQg5j3INXF3ZoU4nh+IxK86kKrYMQkDvGcw6KqkjlZWHJLmpMdDwjhxJtHR8N8HkMV+kdHhuFIQGAjpjjf2Y1yL6FGDwJAiwtCjYkLScw209aQ5EoDSIZuBCou+PyRE8mMWrkomoUlaRstBAIcXb+Y4riXgB1kyZgSn9q3Ldxf0jv18NXQDR6ihGx5xAiViK5kA7EeU9wzP0jnXnL9Gz8K+xZ4e4MMNJEvPnOZSiflFWDAHSn1i/Nd4JhWP/imHokH7xUxOJnhsuHWXH6noX1YHfeFSdd4NNLrShifDOGWvOqUAomrKUkE7sC0XQiXJQEhKUpFkpHc0HM3ihNGJWD5FIfQJKvVyIlwnD8QbSlAkCqyDVq2u0Hjb6DYXZvMm5XmKY/tRz07bmFmGwKlmOiw3hpZOZYcmHOG4GRG8cblGNciYjwHDgmwrHW8LwjBzEuG4alMMEpaN5nDCq0yBGYIIsg1UHitMwoMEEAERwY2ERr4ek3SDBBE4itZXXwSUboT6CK8zw1h1XlJPYQQQ/CR+dGE+GMOP/iR6CJkcDki0pH/AGiCCD/Of4HnRZl4BCaJQhPRIH0jf4BgghYhaZ/yxjIwsZggAx/lYyMLBBDFpuMLEicMIIIQazPwBGP8uIzBAGgMMnYRuJQjMEUI2yCMgQQQAZggggAIIIIAP//Z"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
LOG(INFO) << "Waiting for indexing to complete";
auto results = coll->search("dog", {"embedding"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
0, spp::sparse_hash_set<std::string>()).get();
ASSERT_EQ(results["hits"].size(), 2);
ASSERT_EQ(results["hits"][0]["document"]["id"], "0");
ASSERT_EQ(results["hits"][1]["document"]["id"], "1");
auto results2 = coll->search("teddy bear", {"embedding"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {false},
0, spp::sparse_hash_set<std::string>()).get();
ASSERT_EQ(results2["hits"].size(), 2);
ASSERT_EQ(results2["hits"][0]["document"]["id"], "1");
ASSERT_EQ(results2["hits"][1]["document"]["id"], "0");
}
TEST_F(CollectionVectorTest, TestHybridSearchHiddenHits) {
nlohmann::json schema = R"({
"name": "test",
"fields": [
{
"name": "name",
"type": "string"
},
{
"name": "embedding",
"type": "float[]",
"embed": {
"from": [
"name"
],
"model_config": {
"model_name": "ts/e5-small"
}
}
}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema);
ASSERT_TRUE(collection_create_op.ok());
auto coll = collection_create_op.get();
auto add_op = coll->add(R"({
"name": "soccer",
"id": "0"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
add_op = coll->add(R"({
"name": "guitar",
"id": "1"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
add_op = coll->add(R"({
"name": "typesense",
"id": "2"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
add_op = coll->add(R"({
"name": "potato",
"id": "3"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
auto results = coll->search("sports", {"name", "embedding"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
0, spp::sparse_hash_set<std::string>()).get();
ASSERT_EQ(4, results["hits"].size());
ASSERT_STREQ("0", results["hits"][0]["document"]["id"].get<std::string>().c_str());
// do hybrid search with hidden_hits
auto hybrid_results = coll->search("sports", {"name", "embedding"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
0, spp::sparse_hash_set<std::string>(), spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "", 1, "", "0").get();
ASSERT_EQ(3, hybrid_results["hits"].size());
ASSERT_FALSE(hybrid_results["hits"][0]["document"]["id"] == 0);
}
TEST_F(CollectionVectorTest, TryAddingMultipleImageFieldToEmbedFrom) {
auto schema_json =
R"({
"name": "Images",
"fields": [
{"name": "name", "type": "string"},
{"name": "image", "type": "image", "store": false},
{"name": "image2", "type": "image", "store": false},
{"name": "embedding", "type":"float[]", "embed":{"from": ["image", "image2"], "model_config": {"model_name": "ts/clip-vit-b-p32"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
}
TEST_F(CollectionVectorTest, TestLongTextForImageEmbedding) {
auto schema_json = R"({
"name": "images2",
"fields": [
{
"name": "name",
"type": "string"
},
{
"name": "embedding",
"type": "float[]",
"embed": {
"from": [
"name"
],
"model_config": {
"model_name": "ts/clip-vit-b-p32"
}
}
}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto coll = collection_create_op.get();
const std::string long_text = "Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas nulla pariatur?";
nlohmann::json doc;
doc["name"] = long_text;
auto add_op = coll->add(doc.dump());
ASSERT_TRUE(add_op.ok());
}
TEST_F(CollectionVectorTest, TestMultipleFieldsForImageEmbedding) {
auto schema_json = R"({
"name": "images",
"fields": [
{
"name": "name",
"type": "string"
},
{
"name": "image",
"type": "image",
"store": false
},
{
"name": "embedding",
"type": "float[]",
"embed": {
"from": [
"image",
"name"
],
"model_config": {
"model_name": "ts/clip-vit-b-p32"
}
}
}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto coll = collection_create_op.get();
const std::string kitten_image = "iVBORw0KGgoAAAANSUhEUgAAAQAAAAC3CAYAAAD9yoAfAAAAIGNIUk0AAHomAACAhAAA+gAAAIDoAAB1MAAA6mAAADqYAAAXcJy6UTwAAAAGYktHRAD/AP8A/6C9p5MAAAAHdElNRQfoBwgMNBDu0N1HAACAAElEQVR42sz917Pk2JbmB/62AuDiiIiMFFdUVRdZ3dPk2JA02ggzPs7/PQ/zygfOA2fMusnuZnd1Vd17M0Md4e4AtpqHLbAB94ibeUt0edrJOMcFHNjYS33rW2uJ+7t9NEaz2+3Sz37Pbrdnv09/D8NA13UYY+j7jr7v6fserRQIQfABFzxutkzTzDhNTNNECIH0EAgBCIFAIIVACIGQMv0rBFJKpBQIubxe/kUIJOn3KECQnkOAhNWx0nHSj1IKrTRSabRWaK1RWqOVRimJVOk9cnMu6dASIZbjCJmPLWR+rwQgAjEGQggE7wne45zHeYd3Du893vv0evMTYyCGiG9+X153OB8IwRN8wOfnYoggJErpdG1aY4xGa4NSKl2Lkml9gBA93npcPgfv0/lEAgSIIkJ7P0S5B+k6pWzXIK+V0iiV11Dn75SqrlvMdzytR8B7j3MWZy3WzlhrsdYyTRPjOHK5nDmdTpxeT7y8vvKaf06nV87nM5fLyDzPzV5qHjHWe3DrEeP2FbF+txD1GOQ1E2J7+Lj91N/7Eeu3/f0f5dyEWB9Rbi8mf7MQon63KPIIEWIkhEiIMW20GIkxEiP53/J783ddpfz+ZR0Bkd8Tl+Vb/8P6yVgPVX6aT65+Xx9F0G6D9hgxUr+/nHd7XVl6l+tov79+LhJiWK6f9vpj3UACquAh8t9ZSW1vw9V9+eJ2EKvfl+tcVqWcD+Va20WK118uRHo+1l3d3EvicukIWvmJq0PH7anlv5vNVZX+9j3N5hNl1bKSr4ooKaP0I26sVzkHwRdeJN760q+t+Y23psP/aaIav/LcP6RCqTuhuVnl+Mt9Lc+L1e0o+19ShTpkJRDSpg/5pwpytlZZAOr+j9slXb4o7TRxdcXbE2w3d1UbzaZens3vq5u+LEAj7KxkuFFg6RrTkfK/sf1M893lmLH5rtBef1gpv2UTFSUglpUQ7cqs1+B6fzXbJC63TcS8llcKuTnr2H6eRShF3hL17sd67EX/xkVB1nW5tePi+t+VVC9/l3W8qetE82vxDLMwi5s/8saJiKrMv7iCov3SuuLXb7yxV9ZfJf4kRXDL/7j1+z/YI7ZStJzFxq4SqzFJW0omcSgbvrij2RuIoW72ZA3Ta60VbJd1dVNXp0DztduFbwW7PBnq3/USWotd378Iomg3Z8xCHkO97EWgl+uJV+fQ3r3YHK7xMBrvYO2j5DVoPICqD27c8nbNWtUTEWmtRKOWRHMzVwotrjbul+R2kQaxWOfWa9psl1ZBxq2iazyG5Z6tTX1xL9vvFtVPWgS+hIfF2qZwbvmRclnDuD7RL1rRuu9i82bR/N6e+3ojLn/9PU3013yPf6xH9QZXPnH6TeSf66BJpHCBEDfWLSuC1WbL7yN7BtemP2vLxf1Ngpn+/WKslv8fI/n71ta/VQyti7r6fH3LYhEXK14sZSP4tzR+EwbE1Xk1VrE5v7K07SKIDUbBss+bTQ9CLF8Uaa9HrNZkFc+wtVBtSLM48csmaN3CRSBjXLv36Vza8Kl853KPl6VZhwsbn6te9nIpYgmHyhncCAXaH9n8LJ7Bapt/ZTetNtXyy013pDmZL3hiq3Xin16of8ljOb/Ws2v3KqttEfL+kdUCRarQ+8bVD238TEwAUmN9yte3mlzK9XLddNRiu7GbNzVnnKz4RiBXiiB9d6sE6mFXGER6MZTrpFEI2/BCtFu7tXLXuvVq9UVCJosiEDe3jcih0frjWx8pRsF6220Ff+udbGIyvhI/19/W1rwNn2L9nkX5VUW8BgVWx7vGBxpNKKoaaKy+vLb+LQ7Q7qN8GrdWdWvTr0/u1mKsbuz1bd1EDF8+/tewCH7xa3/KY7l31+pxuV1lt2djICIVzi4eQAjFS95avcUahmYDbGO5gpKXuOmWtl7tn3bTRlYbcvn8sgFF/Uy2wA0esP2u7XmXjb0Gzprnl0NfeQPx5tW0VraAX3Kx9q1H1H5q+zet9o7ro8etN3MdBqxwgdWSXsev6TxjWb3NZTRhR7NWq5CvemNteLB83xpuanRjDm4o+6WipdQMhCyCv8rKXIN9XxPGlYd0U4FuH+JnPfd1D+D2sb+icv7RvIkvGqnVWS2vyxr1tUBZc9Pr8zQWJzaCmQ9abl5Kq6VNHzfuerUeN5YtNhsvtMas3djt+25daGyPs1iw9FpryTYCsxLzTRy1lqjmyfbrBWSwqsS4NRxYRHtzqkuYRHMOKyV0tVCBVvja8GSFC2zWpdFHGUsUq/u5svx1CURzn7lSNK33dgXQNluNfKi1EC84wbJey/6RRSHI5X1/7NEahq/4/DceW0WRfeJ44z6In3MmP+M8/wkeV3IWF5XfRr2y+cRG8MNqk920yq3lz67/Er/JlfUpe+am69bG1o2QhlZoG9e3nv1KETVC2YQAa2FZOfYrpRBvHEa0B6zf3XgBm1RfslZrHV/XYJMN+DKw3FqsuH6qrlVzb0IDILbZkc3xFz32Bbu0ePj53obFa2q8QIibv8sRbocFzUqsvKKyNqLxkqRsOSELD+HngPBrY7ON7YvL+4XLr2u9WMer72zu4+Zb+NIR/5TX/rEecfNvuQLZvqPdSAUTIK4Fbw2MLPneqtFl47rlD1RXm0Xw6sHi+tSqDlg9t1jrNtEUN6+3N2VRLBsPpiQY2rCgCn1rgTdgYnM+YvV7Y+GFWHsAFfRjObnG6lc3/frMWV3NFolf3dbt/VmE9uohWDIMi1nIC9v6WOXwbfwdv/rThmHXYV/rdqa72K7RNu0nKxlps5e+8rhO+7FcYxtkrU7thqvffN8Xv7Z4JV85r6+d8T+UF7BaG3F93Ks9EONql0EGAReAr7WIbSiQ3rwSyli+tLl5xQNoUzibE6tAUYwLbNwIYN1GBYWuHsGC4q9i1mrVSvqqSWHVtzSKbLNBWxd3WZo2rr22rmuFEKtgN1j3dbzLNjqmKoPVsYjNZ7YRo7iKx6+scHuuN93x2Kz3dh3jamlbRdt6A2V/rLzGusarA2+/KAsON4RfVusvxcLoXAzKlwVhsSO3Q7lyEtfCH6/+XodS67Vj+/F/gscXYOQb2M7icS4KbH2NKwWdb5WsNu0q7l9b3lXo2z7ZKIGWzVVuYLo55TLiWhm039NaskZ4b538iqDTXNASwy4eRtwKfuPrF5c4Lhe2uth2odq/1ysv1oLdpEFL+uvrt1RcK8lrz59K4GhDoEZ5xS8J39UWWZ8Jq2sSK/3XLHx2Gpo1L0qg9RoXn2l15hCuouyqLpvwUeT4XzSCX9OBPwtp/6LJ/qOfXG6n+OqR/qkeW/hylRMqAHss4cqNMFMIFuO9HHO51ek3Wb6iCtjK+rMRtrW7vPUApJRIpXLcttXc6wW/Wv6NsFeLVJVBY2HKGWzObX3QRvNdIdmtJ7GNbRZxv32yt1zctLyL5Ra0HsDXXdhWyWx9JXHra9aubbw62rJm7bnVNyzbqt5L0Vj2evg19tN6Uzdxk1Uo1fwvfmmttmFAqbVoFMHq5/YarnCaK4v+Sx6Nxd9kHr521H80RdFiDi2KuxHyP3aIK08hLkBgJG5AwK0LlDV8bIRu+TtW1bSANxIlF1du+fJFuL4GFq3865U72jrJ8Uqz1Ti4ehGNJ7EBEtcxayPMYruNbqBo7Vps9lsR/qr0Gg/gymW7um8Vl7+xILQZs3y0JQy78sxuCgPX4Fer91YhQWPyuaEQKvqfQ65sLNJptgKfV3bJE9/coDVkzAZkKUoS6310Y6/fpO5uL7D+3iQkr47Vhlh89bEYo/LHL1MB8We/sbkpP/tDt6+q/e5FqhLFXK5ejIv1iDEs6bjmgwsSnJe1sf6iVQJtPnxzUsv1LJTfRSCbfyMbhbS2KKsIdZMVWD3fHKv80bqq68zZGkRa+QONVYvLSd/YA60HcAud2abE2qe2OMGNeGD1zltu92bNV54JzXu32mwTHrXbRpR1235sFSDd2KtfsNw1VMoWXqa9UjGAShDKSmBznPjHJPXqsVybiLde+yMgw9XJk43GLzuPX5KcXIeev1wL3Pa91g+58tAreLTE0ltBWe55XsziypWDCwEteLM5nRVYU2mp16nG8pmVhSvvXFl5Gh2yxgjqsau12n5OrL/n1rK1a9GsTzl+G3sXOVtnRpbnljWK9U5UBKI9F9H6IMvarrIJVx7KjVjgi7e9VTjx+oVVVBRX+6INy9p/VyHaTS+pueZaB9CkABvhL3UASyi5SSl/SRi+IF3xZ0ndev/9cgXzj/PYqurmxvyMJfh66BkhVQMuYFD5WQgnNGi8WLnS5TxuVHNxmwG3vbKbF9cGlK17usIA1oHn4p1sDhaX81wri3ilLJYPXoctV3p+hUUsNyRSDPY6dVVFuOENbAqZq6VfowBfusNLMrS+U1B5/ULEq5Bx7YlvU4/X8tuy/NoVWDz8Jdy62nBtnLQxty1CXWL9VS+IjQfQ4kl/9PElvfBHDPzNz6xCthvKtoY9f39y0Op7N7+vjl3R4S/jIT/H6rcPWdatavQVwBOvBIcaJly75PWkbwl/dR2K0Dbf3IJ5V7K/Bt1Eu/FW+iCuNmV7ju2/N/PY65NcrmSzeVeCsHrmNjC4KgAq61K3TAMSssQAixIpZ9FKcuvGX636F0yd2Ag/KyVS17AmalqhXmdY6nPtPb+pmBfj0Lg6m9RoPreW998oAdEyARtcJcZ4vbd+jgT+oxn0f9gDx83vre6Ky8L+ojPZHmPxwkFWjy+mty1yuq4GDJQUYWgQ9OaRd9k25G0ii3X8WDfWF067UUSN3iioQT3GWlGUj8W1oG7d1+K6N+dwpcu2tLFbaba4/bl9LQsO0EpgtmpxeWb1Gr/YaK0+vzpq/Sd7au2rxWxsHaDrG9KAr+tc+RXeko/ThoYl/1/PRHzhR26bgyzksuo73Qp31sDJL1yvX7rEaR1jjOnfP+k+/YIzE5v98KV99keO0ziA9a8VCBgaYa8FPysArnEPS/xXvNuVW5mEp/K41+FV/aO1m2v5ub7AtajTCOTGY2kAg+1mXaUUm3+3UUW6jNuL3Oa660fE+oxb97rduMVVr7QI4tpHv7pVDQrQ7IAvanpxdRI3NlG9YdfHaZVRqzRX17zOwFwRZ2JcO08bCnRzOishLzUAomm7trACi5ewWpab5/7lFfqaSl0v3A3H9suPf0Ss4Ou25jqA/poHsNrejWxIEZsPt250tfi3XOrGHY7Xy9ruZZF3U3p/0palAUm74ABRFEt8W6uW7xRxLYJXzSIaQW6ButWGbo8R17jAtaIqf20WO69V6XlwtQbl9yq/5f/bGLwREtEuaBv6NEfYpNXEre8sa9+kImlqFVagYvtdVde32nBZo206ddWEoirYhSBUFmAN/K1/F0LWbIAU8oZnIBtjslQV/vHHlS/6R55f/v6jkENrPP6RH1/r0PSn6J8WG6oewKIIYirHC203oNBcaU49VaG6cRL5C5amDjfPfrWQK4yApRnj2nKvV7wNRWKrtqvXEpob1Qr4FitYL/JaCYjNKV+HArF538oTaGD7hcyxWaf6yVaMYwsacOujN/ze1XGXw7XU4lsIwu37sxyijcGu70kbMl4L03oF10LdPketIi3KQq56A5Tf09F+PkIfv1L880c+2WaMbq7xz69T+Id6XDlAWxbqFz7ztYdsrU0b74cI10DZVTRx9UWtjRNybWVWAphWeeVatn7KSqDze9cuexv5U59fAVgb4K/FAJZKw+ZYjUVbMIS4FtCrmL24zPHq+XSPrgumyhsXYKaEC7cD8bj964vClix8vQcb4K1Nu31ZTNu1XL6i9Y62mQBymvWmJWgzIJvXVtkjueT8b3oBudPKL5W3W5Hcz0sLfvmx3U//VI/tXY853Ao3FdVXPJTmHjZpQCgpwEoFDovAbGkeV5hLQ1lsQZvU1LEVnuuYZW1xt6e9dssXkG91R1aKoA1Tyv8i63Bm5QGsXNm1ohGrc/rKzd4SnlabdZMWFAl+WRD+chMad31zt2+v/LX7urIHBZu5YmS24cgGftjulSuJW5/YsvJNrULcvnf5jrbSrjo5V6DfUhG4btf+yyV3wXIbXyv+/ZTA0uzmv8Bjc95bLKAlvn39GtK/sj1gErQCgnyZOLNGdBdrsy4GatFccuzGDUGPjZA2G6qYoMgmLbZZg2J5S3y/CRe2nkWLAawArrWnuzJ/t4Tvlq27Wt2rmJfqxrbgiYCG8LfxAsTq5Os63d4Z6bjNt1OT4E0q8XoNuYoolmNsvb/Vdmvu07qHflzOYLVP0vKIuofagp9VE5C2IvCqruTnP0TdPxsDdqUEbh/85nfG+Ec+9Y/zaPfJ1fMNv6LK2s94yPYgZMEvHOAlG7AIy/WBS8y+BW3kJn2zWbIm9i/bWjTCuuYgNgvfcghaVL91zVuL3oJXqzTgxpVj7ems3d3t2TS/N1a+urPlRrQCtfIGlidaKG9tvTd3+pbiXB1hfU5b+m8Ny25sgoWEuAY2VqjBF9Kyy9ub+9JmOfiCELUhkWibyMimNmDTJETc2EdXf9/YoeL23+JqTa9P8qYlbe7dP4UXsPX5Vld7K5z7WsKjOVCMLD0Byz/r2P/a5a42qFrc9ksbd0026O3WZary2u6g9kLWfu8CMq1OohqtKrKrDEU571jfW1NY1fJDK+zrDb4BDVkfu3Fc6vW3NyU91XgANd6lea3cKMGX0o7UfN3au1mEMG7ObK1EWv278oSbqGydxYjrr22FuJig6pV86SGadVmEZQ1mbjMBSypw3QxE3thLt7CoWLbR9dnE23//8TAgLut+I97/pwoBNnem/n5dutzuAfHHjykaDKAITIV7ahgQ1gSgankLeh5y6nfNWS9WsBX+hb237L5VUrHx1m8tcesEF8u/rkOn8SwaYd+0N1vF/iE2Sm7t7aQDBFrFwRc3wdoj2CLEjdG/SuAtzkG8vm8Vj2g9p83PygNqQMDWA1j+d3UGrZIs13Iz0FiH/9tTXK39ch75W9rvbsDI6gm0wi4bL1I07EAhG+/m9smI+GWx3Ar8H+sOVJ7fhpz/pesEWpc/PXEbnP/SFZWhUbUacKVZKvAX1uSaaoVYAYS31WBj7Tb+X2ysx1oRlDNZ/xYb61ssXv331sVt/25i+oRxNgoh3FAIcWE9LopwOcd6rK0PXF2ADQi3qZHY5vMWRkADAG5d1najt3J/deVX/X7XFOAVdiOaYy+HaD2cdv1iXH9+Ua7XQhhvrEOzLZYrFyyWXcpVHUBbWi6a50RFSX5+9N2SOm+XAouv/L1Whf9lRb85hz+ihL4anjReq1xf75oAVFqExWYaUIueb3PpqxPIm0AsweQaxGh22VW0v7G0tyLem7f/Rmpw5bFcIf9hUQjlfU24EWOrgFocoTmhZi1WX9sCXfWpZvu2xvhGkHzl6DaSGZvrupUaXT4ll+/duOLLd4iNxLMKma52Tlzfte3aE1srnA1Bux4bMLQ2kmlbgpWmoCUboDb9AVchy62V2zwTl39vR1qrG8o/DzH/8uPnqL74cz4fcwhAXId2SzPQcFP4r9zqjcAuLigrAYg3zi5u130T6i/vW8fqG5hgrRi2Lmk9/iYDsPJm1lmP9prWQOEmlPjC0i+hUOPm0ngEzRpt1+lLuYblxsUb67T1b6FWBbbn8zXyyDrx34Ad25tzHYasfbfrm7LGR1pFsM4ataBfEf7kCbSdpvjK4zYW8AWKBf/chf2Ljy8YjfaCSxiWsKDYhILLT2lmX/nsxbVOE4JaS7kMCq01A2HdgPMKuRcLEFaXu1qxDbRxA7dYQr22m+k26m4UE2sIZNEVG4ygdfc3wrxY+uaEyzFuAINXrlhs47MmDCrxLpJEamkHiNy+jW2TkGal6u+r82UhLG2TbwsTCVbSWI4pln/bo7d6feGD3Bqx1iz2di2WjbAiIjW46IYQJJsR5Ut9QB3VvlAIv7T1y/6/+Yjw9yYC/Rd/5FTf6qnm3+QxNghMiYFuXPdq9GoV4kKhLSAga+tXYvA1FtACUesYdLF4bey/RTDLzbmFBjRvYL3xxeblNsSoYrNx/Vpsga3Vb9eh/rcGQEuPhLVwfNklXSywbHGwmgm4xueu/IOrtUo3dm2RQ9aYC2azBooK+agtMf451nTlprXCvwmryt+39ms1BC0eQiKJLX0BmrHgDfIvpVz9iEaXrc6z/CW+LOTF0/05SuCfm6JYgX/iy/ti9f7VL9cPWUxla/BCTB5ALekIXxD+5vfQuMNbJLps7tssgrU5WlrWF7d7bcm3lqa1+BAbBXIN3qxITfWYNP9bK7OrCLiNQBoAcXn9C5u/uN0NS3LBB9Y+mWhgruXMlxvYhgfVQ4fGhYOWTCTYNNRcQ9rbm3EdAm9Sq6so7ApbWdYhNoXbrRe4UKPXIUDbBXiLB6zqAvLcyS8D0F9/fB0QZMV8FH/C8f8xH7/0dNp7Vq9p4yHKzf5anPjYCswtIJCVK70CpFpNLxZNfuux3puxEejt+9NrQSwb8lbWbAGwtksWV0eKdTdvcuvtO9tYhvb9FSyhbTW2PtMF6FqyImJtCatrvHHcryzzjVvfekNxuW81GGi9AdrYf/nOL5mFqmNjs46b9GjqGbnGgZYQ6dYpt5a/XYvm3JppQCkLoFBSrTgAtdfkxqx9KeZPAr8W6i+GB4KvphH/Sz9+kUOyIK03jEf5S6yzAKK90U0lYBX6ogTgChSrh2ziOrnp7PL1C9oKUD5mY5FqjCsEbRnzFp5aMIMvwGmN+0q5lk1Mv2x8bn1Dgys06qYFQivK3aLwLVOQRgls4b/2+26rw/p9V2m4NS9CtPe26qKbOyJ9aqt4GhfsuplKg71ErgzBrcfVNJsV2edGFeCGVl4Gzqy3k1jTxTe/ixhvKAhRW2Rff+6fme/PTczvxpuan8LFaPG3TRAOGQNoowlxw7ULLVhG4/Jvbn5srEzL7rpFiqE5rfpbA3ptXfPy1sL5X7JXt8GQJYxYwoRysu3nVp7kKqTebuKNkLZh8dZvjuv3bwGv4sZWz+DGtxSXXuTbVg7ZWldRrieyKOqqxNdru13rRaks0CFVsS7exPbqFm7E7fj/yotiDf6tnmtd0Ur42fQF2NQGtN7k17yY7eN2J+AvKapbz/+XVgpXWm+911eXdIOExrKX2odcg2NxAchCvLrJoXoH7Q1fNotsAeeN+3/dIbjFDFgDdXHZ4ds4popVNWhbAVrvwBIqrAViozS2sktcL/bWfdhqjI3RW1W8NQjfth6+buBGH1RfauXhXifuqpIWrG5qjOttUXVRRdvXu2fxL65HZ4lG2RVlw0bYtzTZNkvSPrc6cssNWLEA5Y0agPWciYVavbknv+jxFVTsn9GjgH7XlN+r27h+PsYvvArtWgniuhagbp0K9LQxXlj4ARvgZ9ULrtlSWy+gNZ4F6mq7/8StoG7Oq41pV8a6vbxGOCtOUT2XW0b+KnnZnMINV3m1+f5YP7gG2GtwEa6UQHvDb92y9F+7bauirveAlTIuRxS1X9l6y3x5S8UFiLyhA0Xz1xU3vm6c5j23hL+CgLd+5OIJyOXvFUlotUhfuwP/kEL+T48NbD3c+KX3NJdbAc7Nu9s90X5g1RFoFYWvNHxYeQHrUK8VqbSjq/bebvj6taIB8eLmNIugNqBWBd7ECtC5SjvH9v3NMtyy4BUHuF7ITXh549cbH9o+mmzNku7PCrFa11Wu5Na3N4dbtGdRBs2KrRej/hpAhJXQikYZNHpsddpSRJQARbxSSuvtuKRIb/7Utf6Ku92szzURqGEFNrUBbfhwXSDUXtDXXPxboec/s8cfcVSu7HyLl93aDjfWQIvk/S2u8goIXHL9bQedm/y3uGxyhCSIkKmcxQMop9r65lz/3pzfLe5CLW+HLyxOfkNzzNhc08qDbxfohrexPbHYfH9583UcWvwbASIS4zUQuLJ+NFbxi1cjVn6KKOdR07IN9HclbGKDijX7auXgRDoJBx3Z9xpFZPKRFyuwBS3exO3t4i14UAaQVwrrWllfpUc3PwkTUEttQMEDGhxg7YF86ff8zGYZ2ud+brqvfOcW0P7HLQ4SX31ltSfaKxeiQGXN+S4NY8r79OZjy/pV5D9UTKDGdatkesXkaFFv2htZK7nyAtZTXv5tN/rKPkWWGLpe6MZkFQmusVLxJESeIyAaHkGsYcEm19Ww3dp1gCs/vXk9Kb589k2uO20KkUlbkqthHSV234YXN77ulqaPInJ171ah2GazrAC45W8pQMfAY+/54a7jqCzRPTOeTjjZ0Zl7fnKCKNreMVv/YxsWtvdhOa9b2YFFGW0IQTd4AAtHQHxNLm4+bjbS+BPk9p+iB+CX4PL2cctjjV99x02zh4bsAjWvl56AFf3PQhMaV3ztNy7PVsCJJgOAqABhAfzSZxZLLVbmtVUK5RJEoyYWXba9+FattN7AVaS/0mOt5Yy3JYiN4lm9UFGypOiatSj6o3VdF3LUmu13y5EtefvyTGx2bV3LDROzufJVbNeGIgBaSo4Gfj1YfvXYoYTg9DIzWc94GbmcP7B/8NyZ73gt37GJj26pnGQlF3kPTfr4ltVerdGmFFg0IGD1BEqL8H8mj1ry/g/kCbRmcbvnviTKV4ai6NwtSFwMbP6AvvXl6U4FllAgOwBNzLwcbnGrI5XcuQJ61rTT5rJyfrYRmZsXVS9OLCy5IhwxQmHYXl9ItuoiA1ub0KY6zo2BuoUJXJ/N+oYQtyFOvuaCcVSlyLIWZQ0E2TtYu2ZtfC5IVjDEuMFqNpcrtkqk+PiLuiz3xhB53An+63d7vn94JETJ89MTPkqcD0RpmG3Avzxz/3aPDQOT2q2vXFwrrXL9a3wIWlxgyxxcC7+oZcGimQlQioFSZeBSFLQc45d0C/7THrfc/+bFm8L5pzxap3YJ05LcfA3VWP99HQqv3ptD7EUBtN9aXOEQN17AV760vEdICgGhxB9yGwNfnb1YzjVbmULSWNa7ABXy+hgFgFj9uSiaFhhs4cEQI7JNbW5+L5tVZBcpVqGmZd6mv5tQoF3SdOtaYb/l4m3c8yqoiyKRUtZziTFWl5i8tkopjDYorVFKoZRaNkJMnkMJx3oROWjLv/ztW97eDbw8PXN6emJ8+cx8PuEuF6L1dMExnV9R3WeG3Tscqd5g7aUIitpf2a6yOVpgdpVGbkKh1kOSEpXPU7VhgGjHh7dswJsBD79UFBeM4MvH/KLXkV2YP0UBfdk5X4cbcbNr4leOd0vqb2EgongAyZK2bkK6YaUxxgoQvBIWVha1OYvr1M4KTs7EoSgWsK8xtFEUjXqVAW98AKoAbLC59s4sBSAVNLumMl+TWrJ1ESUFVzUUVVvdCsS2+ERd/QZAy97RCsws1rTBEYySKBGxMYdTMWCDQOfXolRoY+j6gd0w0PdDVQBCyKTkvMd7h3cOFQO9jPx6cPz67QP744Hn9z9xfnllfP7M9PqMmya8s5DfL33k8vSRYffApCRWlPbcjYJqVr5NwwI313cl8NXdj0sFoJIoVVx/tRQBtdwAIRHCbzzSPz0uEJvzvnr9q4eOv1Tf/LzHxrB9/Qzb124oi6sPpVfWHkDzQlv8Q0MHboGmL6Z+aDsEyw21sxyiOKWxVXn52Guxv3KLy5NVWSxlje17S5jByhtolRrXSuBK6Yk8iSjVVBdrKqoGiOvzaRe9qrASy6+TfY0eSHZUyoS1SIGSik4q8HCJIX1GKiYBnQSlNE517HZ7dsOO3W6H6jq01igpQGpCBO9mvJ3BXujdyLvB89t3D5jhyOcPn7Llf8JeLkQ7I0Q6dhQCZKDH4e0EbmI4KJxfvI5aV0DWsI3QV1u0IQpVYZftaxIpQ5MCVEipkhJQEqkUUqnGI0jhwRYBSnvr50vireagtyzlz3q08cgveNyActf7/mthxy/9ktVqNSAg25NogKVU9HHbC9hyBLaLXzf4lsFV8vQlXKZY+pI2W3sKi2K4cfPye+JGuLbvX5EXNu5oSxJqFYFolYC47ean71lnLgqolTyFDNzFSNugo+yZIvxKCDolmKNAacWu1zwOCjteeFWRyQaMDHgtEVKhTI/sB/rdHbu+Q3c9uuvQ2iCjJyAJUhMdSDkziAsPw4lvv/8BoQfe//7veHn/E+dPHwk+EnxAaIPK917HQHQeGSLaWcLpCXn/ayKqKud6LUURZ4UoxALi0qxFCp2qe9jgAU3zD9XE+ioVBG2Lg2QT/vx9Au9/sGq/xmuDXw4GftWdr5kzmsX+yqnQCvi6yfwtoFxff7SADddkoAUPCJUZGELmCYQNU3BzUlsySXsZ67+b04wQJasWU7E5VrWibIR/g30UHbImql2HNDFEolyHCWvXNXsSccEAWmS86qorV2XxEmSTfpMEhIAgBFpLeiJGwMNguDvs2PcSP3peXj2vZ4vKQjDRIXXPbhjQQ4fpOozWaAXCSKK1xGCJIiDFmS7+yN688vjuBzA9v/8//j2n52cCEnRPxBO8T9fvHSF4fIhYFwhIpO5w0wh2Ququxu3Lgm82c2yyDUX4m5shhECEdEOijISgkCLmvL+vQJ8Uat0bsP408wM2e+2XPLbW/pcQgr5otf+BgMhWSCt4XFNXLDdgi3+tjnCtEIqJqpmgq7vXHLdaeFgPCQ3lJxCCJwRPjHJRAo2VLydf04JQs2ZXzSNvIJZfQjFbzOBL6RJx9YlY0dpysyrRSUZkDMQgCDJUsI0Nt77iExUorKTm5j1iuWH5m2XOOBTwzkiQCiwREyNaSe4MGBF5PGh2g0IriTOavel52KkKsM5Rg1D0XUTpGbPrEcKCT/dDxgvRPqccv3vBxBeO735D1AN/+Hf/lqc//Ejs9oiuw40WFyLeBkIIBO8JzmKtZbYepCFIhfcO5RyYRu0KwRrAgdJ/YInvi2VcrKMIoaL2UgVEyMi/arsByet6gIwNKKmRUv29XeNf4gFsMwBf++jfJxsgFlezeqBth6mWBfqlc2te4ZapbY+gufXYuPyhtAPLlj/Un/SaDwEVQv1drk5mQcBvLV4LsLXMv9v4R1YqsXDVxe0bUyw+tz0CAK1Uop8CMnpEgIgkBp9EWQgQsvEQRPUCilu/YhiKAh4uXIAyJbdsCFWsvgpoETBKYIRgngNd1/PuTrJTDulHYnSoqFDSorvAsTeAYp5HIg4pHDAhOSPdiBAC5zxYiYwWMX9E+BnTdRy+/xd4tePj7/6W82mEwyPT5YI7fSZimC5nnAsEZwkkJTBPFuc8UQSi6sBodAxIyJ2Hmt1e73MG5+RC2CnMvUX/RkLwOAdKSmKUeCGJQiFEWHj/QiGyu99iSItyUEgh8H+ioMG1B/APFRJsD9O68rce6/D7OoxeXtqm4RZFwReOHL/6N+jW8a9nLwpY1obNreVPgh9CIPpA8IHQ0IVjzSosrnvD3l4vUZbYWNy5xsOp1jWWlGKFKOtrrfJIN3FRCKL9jhqngpSC46B5dzTstERGB/aCjYpptvhLYLJ7Yrcn9LvFHROCGAJIsR5FltendliNAZTOabuAIuKjJ0RPLxxH7eiERyvBodM46xkGw7EXxHnEe8c8z0xuxk0XYhQp5lcGMV0g+sVTQxLn1GNQhgQWKhmJ8zNSKQ7v/orY3/Pp7/6Gzz++ZxwdLkiCGHDSEYhY6ZjtTAiSGCJuDtg5ZQ+iTNkHodL9hpSKFLLc1yV7UXr6K5Ubecg2ZZceoZyjisQgkUEhpU8eWJPnl0oszUArHXidCaiK9svi9NXHbYG//fmf420UY3QlqBuMoLx3JQLtVm1wkvzmtTVsMDFxlQK/7Q3EuDXEmxBAUMCq9qzyZq599EP9IXfS9TFZ/+hDHikes3XMh2l88db1WIF4+dV0LbdclkhAIGMbE21AjVjGk1U/YYWwighCCrRUPOw7/vWv9xzdZ9w4I7QBYQnOE/eS+XziMn3E2Z7ZPuL7O9AGqbtEQ1UGEZI7KtBJEQkFeEQUKAKCPDDFzwwaEB5UYB8vDEaw60BriVECOoGSFolkHF9xzuKtw04T4+sTMQq6wx3RvWAvr0DEDAPKmKR0fQE4UuGPkBFCYP/9X8HwyOcff8fT+w+MkyV2e9zsGc8T59cL0zTipwk/j+AdaTRcwFlPCIARRB+Q3jWAbVz2R1Xr6e5WC565CK2ghpzNKMi/WKX2wqr0d+v6SyVWyqGdG7htxBKLaf+T0PyyOVty1i+KFVbn9OVOWK33uoDiXyMarfZ6Ab037/+5YVHxBaoCWG7qEs0u9E2qhQ9hCQEWr2Bx/3UMxOwulzTbdR4vW/VWiG/F/+VkxOos6/OtRhP5pgdSieNVTUE+FELweH/H251gfpb47pi0oxoQKoAQ9N0dZj5DmIn+CWefsJNEDkekUgSp8bJDSA16QCiNCJooZIq7BagQIApktByEQBmBxtGFC73RGBkx3Y4YPM7NuOgRUmHnCe890XuCd1WDJtDV4qYLUikIHdHbBM7FiNQaITUyOCKB/Te/Rd9/x9PHH3l6/xOn1zOX2XF5/sh0mYlSMY4TL58/J+VtJ7ydUUpASFmBoBQxWAKWTiePppNgYxb4EIGQ4qiQN2MsRLC0GasCEAIRqF5TCHJF7Fm59nKd8pNCojMYmJSKou0RcC1c+b5XA/ozPYIWXV7l0q/xpasjbizv9e9bFu0m3q8yeCP2bRTKShx+IQayAsbzv3r1avNHcWuLwJdSYB/XyL+UEZ89gaQE8qZgjaBXg9GcQmwh/atlzu+q96TJLbc3oVUEFe1vQcGsCkQ+D+94fn5l/O47unvN+OkTYR6REWLwoFSyIKojIOh2d+w6iYiO6Caim3DnER8jQvfIbo/QBiE1QfagNEYrtIIQQMTAEAW9VHRKEOcJ6SZCEFnAPN5ZfPA4e8LPE975ml83uz3e2qqVg/N4a5FaY4Qh2Cl7YxCDTfn6+7f03/w50+x4ffrM04ePvJ4t5/PIZD1CGbTZ0d/3OO85fX7G2uT6i+gTYClk5vAHAiD9DiUFXd+B9fU1fBYwBRKVO8y0nqPMuMliEa8GleTNXBVGOwuwfmbTK1CVXoF/fOP/sfTcqkUZf+y9G4navLD22jdYmFgrgeolFCXwlYupyqL5kq9Sk7/w2OoXvb3M1aTYFXOuuPhtOrBF0kPFBcrJtm3DlrvduI2bdFkl66w+skh4deWbcxUxVuNT5pwVjVsXvvAL8jmN4wXrweBhPqHdnGr0VYfPzDnhJUoZnPPYyxndaUzX093dofoT09N7wusHpHlCKonpDNJ0SG0wokOJjoDEe4+2GqMHFBoXZpxzOZ12xs8jEYUPATtNhHkkeA9S4+a50ojqCimFmzzBB8gpuhgDIQjG588opbh//DVedHz4w9/yh7/5HR9+957X04iNgigU3d4wvb7ig0Dvjuyl4dP0ByZ7QUZAgsQjiPgQiDrVB3jnUKajkxHvHMFbCBEpQAmJkRFVqbslLPA1I1QMx9K6bOkcvHiKLYFbUEhHbUiQPIEcgmXBWQvsknb8Gj8nxmVv1WzFAvjUD66oy21K8yrZvTy/boNXBH9JW15hAH8kZFh9Wyv4+RraJN8veehWVGAJnaJoBDzftFB/L0qg8AFKRiDjBI2GaqvstkmUmzH/Jq3U5iUjEOXmXEsstKIPlK42beJkKaYYg2AMgl1I1rhTAqF0AtKiyCw1CMHip5kYI5MdsaNkMppuGBje/QUiOtz5ifnpJ6Kb0Aq00RD3RGUQUmGERDqFnwJ4w3R6JniP7vcgZULE55F5TlhEFAI7nnHW42ZLRGD2B/phQMaO7u4B0U2ofkD1HUoJnAvMlwlrHbt3v+Xw3Z/x8vrK04fPPH0+MXmBFYbT6YQQgskGdGdQumOOkXn2RGUIUmPnGZxDRI+SefV0Cqrm8ytGG2RvcNYS7ET0cyKUKNBKoDUoCShRyzbSfgEZk7coYkBWcDQigofokssUQpM9WeoXECz1ASUdqFRKHXq/EcBF+IVcet8voWkLqLGEK4KKV9QisQSnLMfL702GSizeLY2iEZkJW5MfMpfVN9DJl4S8Ee6v/V7OZyVSP8sbak5SlHLgq2TBckVpPkBG/Ev8H9KqxBDxIcWCygeiSq8JmYW9aP6aELhJnVj+H8Vtl24xBk2WoqC4y+dLWXOxIKktRbqx62sTvIyOd7uUa9daonRK+XnvAY8PiQuQwG6J0JoQHPY84ieL31m63UD3+APd8Q3jp98xPr9HT1MqztEWoTTdbkfwgTgGrJDY8yveJ2Wp+gGEws0v2PMrenfE24nxdGI8j0mNCUkUAnO8zyk1gRn2SG2QXVICWmisf8/um+95/LP/mtlOnF+emZ1H9DvcaeLz5yfG84UoJKb3dLsBqSPz5HE2cH45M18mgpuR0aG1RJge3WtU3xFj5PL6gpCKYXeH7yze9gQ7grcJ+1ACLZMCFSJUQLZkSUP1JD3EgBIRIyJBJA5GCBYRHJLUqUYriVEKo1SiOCuFUhqdhV9rhVaaqHNIkjdC9ZTl0mxUlixQNuxtZ6tSjVn6DQhRgOf0Hll6YgiByj+IPD2rkOGozkQFQlVu/iqAkJW996ncHr6cFlzF6jFeJQPEZjf/ErpwbGUs1hBg8QNqfN2y/rYeQLb4LRXYZxAwxIDMZrp4D5UdGG+d0BpzWRXeFNUkGv+hZgJiJRJVJ3nDJahHzOQfEdJNtc7x6eXCX765x/QfEMFSOqYKBUIohNT4KFBGEqTEzRPRRaQyhGC5nC547xlPL3TDjuHdn+MP90wf/pbxMmOMRxuP1xqhVIqHETifBUBIpDb4OXkYfh5BKdzlhHeeECPOBYiW7u4OqRTRg3c2eTc6EWHU7g4hNXtnkbon4nj58Hs+//SZ50/PjNPI6fU1pT9NxzSOuHjhNFrm0WLHiel8YZ4dQkS0jBijUcagdz1mN6D7Hns54a2n6/f0+2MKldyEt11SANEiYkyMPpKAEySlGYtIUpesdQY58QERHCo6DCEpDRmRCqSRyGhyRqWY4gyOOoMzBtd1BGdRMeKDX3evzt6lEslrUBVjSOfjQ0h4Vt6AIoPHKoOOpVo0xkiUkpD3pa6FSEkBWOexwle6vMhApZEJuESIXFUb8EGA8AifvluUfphbXGBFKV0QrwUuawHG5e+vYZ2tp98qGL1FBVdvaIS8LQtuCUIiiEoUCo2ygLRAJWvfMGZv6roK71Uz3uQPi2JYMTfE1VHSe8q1iAw+QSkGKnnT4ALvPz3z/OtveLO7wz6/JzqbtKvSeTPknqlSZU68RHR7ohT48YKQAmvThhV2RtmJ7u4denfP+NN/5PL6CWN0yiocDghl0g2SCqJHGgNCMJ+f8N4TEAifLApKoYwhisDldGa6TJiXz4kYY/p6rn6eUMYgtET3PRHB9PKJabQQZoa9YbITuu/oo0S4wGQ9dvacTq+Ml5FOSbzzCZcQRQGKZP2ERPUDZjcgJKiux/Q7un6XMZ8O73vwNrnxYQbvILhEqAou3coYiT5nDbwnOk90Fu88PnhECMgQMAK0FnipEklKyfpvp0TKrgjQIqJInYx0DIyA8z4boaWEXWSB1tka68wtEELgfMSGZf5lARuVXMrXY8jNcXJGR6iiAJLR8SEwS4VwFucTgUwrRW8MndFomYKP4APOe6xP3SC98MhqREuozNW+Fo1lLLjW1tLXVGDZ5z9DCbRH0DffFGN2ldpBoMny+wbsCzEiy2TdGhKkm7n2ImoM0JzE+lTKX2uRp3oDKT27EG/rxZdzhdpDIFZ3oFkcEj8hQWcRay2fTjOPQ4+dLNFNSK3QOqRiG6mROjMDgycKhXOOaZ6T6z30eDfh7Iibz3ifctu7u0fufvuvePrrf8P56QPSaA5vB0ASrIUYkEqj+4EQHOdPH5hnj+z6JAy6RwZJtKf0dwYiz8/PEDzHt9+ihgPCGLybseMIck5utxmIyqMM3L3tMBZsgNFG9M7x+eMLh/sHTqeRLggO90ekEDx9/JRSmDISvMOHyOUyE/UIw0B/f8/Q96B1rhY0CC2Sk+w7YnQQPMJbop8Jbia4iWAnvHeJX+B82jMuWXFvLd7OOeRK97nE9RhFiGCNZtKKTsnUoDR4VPQYAppIh6eTkYuEeZ6x3uNyRqSMqlNCJHxCqhxSaKRKGY7ZB6xfrDckBaBKsVEs3bHSa1qn9GTakxEXPLP1aCeZXbqOrtMMnaE3BiVE9uSygrXJy7Be4MTCpiV7pj4sprj1DEqIsion3whOSxFegehfylrk165AwFZKS0FQaIC/1huoSH/DEYiZFBRr8cLyTz14k/6rCqEY/831LRa8OkJsS4ivnILYqI+SVihIsUgKxHvPHz488cOfPxJUhz2dELPDyRmlNWa3Q6nkgAohYL4AEr07oPod6GRJgp2w44SPEqEmzGDpjke++6v/M6ef/gbciBn2RKEI8ZT8CtODEHjnmacJ52F/d4+fRrwLhNIgJZ+vjwLnBf4yIdUTd9//Gr2/w89zSiHOE1JJtN4R0YTosDYiZMfDu295/P7XvL7OvP1h4unphY8/vqd/PeMDuGlEd10qH54nrJ2JQeDCxMs4Y2Og3++4/+Ydw/03SN1lAo9KYJ82KbwJHoIlho5gZ/ycQNUQL0Tv8M5i8/kG7/HO4eycUp6QBF90KC1ROrEIOxMxWqIlKWzKykR5j46eLno6CTslGS8j02yxIeFQPtewSCnopERnLMFojVKSKEhegPdJCeRshhZLQxJBUgARgWxwCAFE77HOcZlnjFVYH5FKMvQdu95gtE6fd57JzkyzWKjnQiB9Tp3HmBroZuCz6iJirXGXQlaSWysWxVAXBRo3RrYIeVUmTXgsMqiqb3oMsYAcm1x+ahSYEc2QW1TFFU+geAdSFj78ph5eLO5Ma/nLZbRWPz21ofTEjAeIlbzXT9aGEzWlmGO8mOnGkRrafHp+4Q/nB749vsF//kyYR6KzaK0J1mP2kYgiWgdCI/s7UBqhNfN0wV7OBB+R3QGhBELJZPFiQCjN/XffE+cTynR4H5Bao0wSkMunD8hhn46f2695b5nGEY9BKYPpBQHHdLmg+0eE6ZnHkfHlCdkN6GEg6A7/aolCEyablYcCEXDzyO7uEbO7Z7iX9M9nMAOn08z7Hz8xXs7Y8wvzOCOVwjmLdWlFtYDOCLou1eXrzrB/fItQJoGiledfilYCMSiiV/n7JVFKZJSJAKQcMVxw84ibJ7xNTUp89haF1igRE/VYqYX5h0JEDT5hDdFpsBK8Qg0aIzqsgklLxnFmtg4XmzBASroKJMqkUFRSMCGCCzGlOL3PFlFkvIDF1xQylVp3Xco6xGRA5mliNxkus2UOAa0N+2FgN/RolYrj7GwZZ40WY5UDKQRKBrwPuBhwQiR8gGbqtEjZi4Xx16T5CrhOy7HJWY6WAdmAhzTqoWQzpJRJAbRZgIVxlDVKkwpc4QCFCBJlTpmFhQuQFUPVVCs+RCblhDX4V95c8fvs8rTYwbrBYRNExCXVJ5oqoBWaSj6n3KqbCNY5Pr6M/OrXv2Y3nrl8+BHnYZ49Poyoi0/AUb+DYY+bZvShw80jl08fGF9f041SEt1pdoc9Ilr8/IoyAqVAD/uFwac7InD+/JmAYv+dwQvBdDnTHfZ4H7DTjMej94nqG6cUW9ppwhiJnyzT6yuYnp3MAik1USisdSmr4DwvT89IpRmOD9hpJEbB/f0OaQwffvyJ/d0hhS5K0O87QoiMlxQiGS3RWrDfDxwOO7q+x/Q92nS1YlAIQZQswzpjSIpBSBCaiEIKhYqSEAUygHQRXCDMDh9S3OydI9GmfQIQJTmL0COlrhkBoySD0fhOEzpFdBLhFToavBT0StIpyWQt1ucwgGRsjJIYrfKPxhidWqyR3pMUQAIblUwKoJ2YI6VCDz2m65G5iCw4zzSOnC8Xxil9pzKa3W7HbhhQSuK95XIZkSISfQolE1AqUBkXEM5XzGnJ7beEqTVKtvAmwLdA5drNXglV9dhZsiIl3ak3x1zktHI1QmUAxsIE3AKC2SMomQAVUwpNVJO9xgCKu1JTeTdbXGfFJMQi6sWKN0sRiLQNq+tCpP24WYqmrj+f96fPz3x6d893777HTyf8OOInj58tgilVqY0BMUV23/2AD4Hx9YXgE2oehUQoiek1Is5ABxnUwxxBCdzpQhAaKRTeWqZpIoiOIYAZDowvp5RaFQohFME57GxBCKQS7A9DAgxnC84jnSOcTrgI3f5ItBY/XvABPIppHBknz8PbB6bLBR9OmK5PG/bsefzmDSEEDoeBTz/+AWcn5nHi5emMkBJtNFoLuqHD9D3KJLeZmLoDheCSZY+KkkgXUiWShhAoUVh6ElAEoeiEBmEIQhOEwYtXQjzjw5gte0QIh5Bzip0FSB0SAzAGJB5FwEiRgEuZPAyl0yuJjCTotML6HFvnikSlJDozNDtjcu9EhZBp0EyhsZNTxinVV7tiIITC9AOm75HapNDPe+ZxZHcyjNOMDRGhNcMwMAw9WiXMSCtJ8A5rZ7zTaSfLhB1USyzAB4GKcslyVeEvIGDj7YolPCndu1uXv7S/K8+3nbpUTXOm57WSsgrzNlWwBPCbECDmYqDcQGOFE9SeAMma5HbCxfloGFKh8TqW+Kak8UoML1cgR3OGDWp6a4DIslrrX2uMla9nHC/89e8+8PAXbzD7PRf+QJhO6USkTJkA09E/vgWdiDxutkznkRg8qtfoYcfu/sCb795gjESJgMQtNRRCgTY4Z5nnGZRJysB5usMR9fEnrLXoziCNZn5J+XY9DHSDATejhx2Ti1zG98TzhV73CDmB0ImQFMg5aU+wFiE75vGCm864GBFSY73Cushuf8dv/+w7Ph2G1JNgvPDp/SeUORFIbnkvNdKoJCi5k5EyQ6pDKBZkg0hLIUEJooypc49SSGWQyqB0jzTlZ4c0O6J+JZ5PCTCMHi8iLkqkjwjr0n6RMmVNSOQhERJJKVnrdA+lBB0FwiiUlnSB7P6n81cNe9B0hs4YtDZoneoKiiCJmPs1kNOZeZ9KIVO7NTMkBqdMVaHz1NEbxTRNuJjCmL7vMV2HFBLrHBCZp4l5GvFOImWHUR4lLNJS8YZSUVsFPFdXys3OLtWWMpNbEgZXjGje65mbsNTttPyBnMLMr+uHx4cVql9ZfVlkjNaYvqPrDNrovKAaoVIcSOOqtJa7kjKq5S3mv8IQXIt1rEg+RZOVlt7lPUWfbIS+Rji1g+9yPqUzzZIMXFhhwQeeXl75eH7gu7s3RPWfCNGl/gBeIMxAf3dHdDOn9ycur6+M51eCtVk4FP27A/fvfsX+8R4ZzgR7RtCl3K8D1EB0M/PlREQilWY6nXGzw3QaN01EqdHDHnSf8QJDN+yxs8VOFrOD4+MDKoODUndo02UB0ZlN6FBaI7sdygdePn9gnmecj4QoQWogcn594jIFLqNDCYE3hm634+HtPZ8/PqUwJKq8TgmcCiEk4VV6qe8vt3G1Q0vrt1ztp3TFPpTpkxLpdqh+jxyO6OEVO11yTYMDEQhK4HPxkIie6B14nwQ/MwmX9vDL/pMSFBJ09kSURpsuCzoIkUhfXddhTN7T2iSBzvhRmV+RVf/SfUgbtO5y3UeO70dNp1UCcmPiiOiuR+k0b2eeLd47xv3IPI3EGHE+4pzMfAHJ7BzaC7zPHbbyGqqMWxTeAqSYXeUwpvSzWM1cIGfqvMe7xSNva2OKnHgfcCGg/+zPfpPjukLkafL5JHRWG4MxJi1c12Hy30brFT+7IKWdMclSRIgx4Hy6XSU3W5hWsJxYAQsRC1hXvYHqQTRkiHbntZFDmdKzfS4uKicWahpLduDlMvOrd4+YuzfM5wmCIPqId3D+9Ikon8AMEAKmH3AiuZ3d/ki3OyCVxjmHislljDEk8lAApEwlt9YiVaKF6mFHtx9AKmS/w82WeZoTe7DvkELQH+8IL8+MzhJCTJbl7VvsOCGUQShFiAFRrRUgFD6AHc9M48Q027QRA0nxzA6QnE4juusJQuCsRxCSqzrbpCiVRBqV8LwYkhs8HNGmRwrV0MRz7E913Sq2E0lhgBJLlZ9UGqk7VDeghgPd/o55PGPHC26eIFhSdBtTRIGHKBFywRhWRUBZYIWUqBI7Z8uvdeqY3HVdrhtIr2uj6bqOruvp+i5nIGS2vqEaHJmRciFl9WYSZTyFDc5oTKezdfeQezUqncq0lVJ465h2I25O9SbWpayDUik8MFYlLCRnBQCUTudntEGrpQuzLNkMrdE5JUvj3vsMZroMsBZuRMheeE07EvE+MHuP/ou/+Ivc1qvk9xuiT3brlFao/KVa6dp6OmmhdAfa2m2jDUoqIqkttXOOaZqQYkoL51wm7IhViu9aoFsffknhLaShVhdsmAUVWyi/r18v9qOUnFrrCGLg8O57Igp7mZhOZy7vP+A+f0IPPaIbiGYgKoPeHdg/PnJ884DpTRJsN+H8hHcj0bt0g3wKBaRS6GGHu7yidwf6+7cMxz3OOrq7e14/fOD89IzZ79AmxZlungnOgZSovsd7hz2fcdbS36cy5JAJNQkv0AipCOMLU85QeBe4XMYsPB7rPM6nzSDwxCBQAsbLhRgD797dcT6NaKMqKcZ5TzR7dg/forthSaWGnGoT2VLShAWx9e+yxdOJWCWlRmmDMQN22DPPR+ZxxM4X/DwSnM3szNRXQZoAZkeneoIwOBQuCHyQRObU+1Am4Kei2zoZrH7o6IcB03W5yjDtadMZ+r6n64e03pW22+BVouwPVfsRIpMCiN7jtEKb5PZba1O9gzZIqZPsSImzjt28TzwAMl/B+YRLaIlVsiqAiEjC3/cMfY/pkpFVWtfMmZLpcyV8kdmAhRBw1mGdTf/OM7NzeOcrL6LId2HHWh/QP3z/3Yba25bwJiBF5bSJyvnfMvlXFhQjuy1p4RcFQUyEm3maq7ayzqUUYdFeG/5/bKR0sf4ZDGwyFLDaa6vGoS0vuO0UVMKAmqXIFGbnPOfLyMtZ8zDsuP/+B86nkTn8hFUvnMdn4mvK4avdkd13v8KYHqV7ut0e3Rn8POH8jAgTzs7JZRaSGGaU0inHPRzo7x3aOvRuqKGSVAmLLfnxEGIqynl9xdo5ZSFyoc7L+x/xzqGHPf1dT4web+ckVN2AnU6pmlFKpBH4S2rsIXVKz0mlEPhMiMmtw+3Efm94++YHIpGnj09cLiOBkFmKEt0fUKZP7r9c0NVkURL9u+ZiGqgmEBuehkz8fJ0YekJppOnQ3UDXD9h5j51HbE7Hpng/pPg/OES3Q5g9oj8i+wPqfE7W187gLUSfzkUKtNLJwneartP0Q482JjcVyS52Z+i6pAAWLyDmjIbIIcDSkDTtLVk9gGIYnTNom7w0ZOqanOJuyeB8Ug7eIQkoJVBzkgGVOQc+JPBPGUM/DAzDjmFInkvXp07Pyd0vmQqZPZwUjhW3P8nahJ0t8zwxTqnyNIUGJLp0WOZthBjRx+O+IoYUSmJNKyR3vDRgXBo9LimLEo9LmU7IGI1SuiKlSibB9d7hdIprCojh/ZYl1GQOKOBg29G/aZzQKoLmtXK8UpHYkoTauoOiNXEerxIqe7aCd48PdEQu80+M08jnz8/Y0wXpI9JojIEuCvaPd3z3F3/Gw9t7gj1jz6lzjw+k+n0iUWliyPTavJHMbo+UI8FNRPoUn6lC8U0Vcd4HrJvwuSahOz4gTM/l6RMvHz9hp5nu4R1Cd3iXKgaF6vDjxDyOtc+CFIKu3+Pp0f3A+TKm73AeiClkEWA07IZ9vWdvv7nDun0KxXSqCxCCFIsTq+K/KmktFNp2/7Dc23pfxTIBSKgCFmqU7ipW4NycvKiQAEBCQHQW0e2Rw4za3dGNI9OYuAXRTYmJmOs6pCARfzqDMRptFH1nUCZlAGQpJtJdqoxUGiFlPe/iAVSug1D52rI3GUsHoww0qlTSTUzCjw8oFbOn0eHdkNiSdacuRizGZDSH/Z7D4chunxVA39P3Q64wjTUFmxRcUUoJ9PPeM2fBn8aJeUqyaGdbvfrCG0heQPJudN+ZfFPIhTu58mmLspXYKhc4VGZRBiy0VihlksDnelrvSbGlTjlam2OYwqf21TIvQybreKZquTf5/BUB6Pr9tY6ivqO5hKr7FupQWdjZOT68Tvzw7VsO2jNf/gPPv/tr7Ocf6aVA9j3OKwKSGFItvDQy1b7aQIwz0+tn5nHK5+3peo9UJn+PT4i9UNjLK+ePHzEP7zCHO4TuUMNAuFyYp4npcgGpcD6mXv3DDmstl5cXnj49ExH0n18JSIRWqG6PDODsDEIyXy4IpTNWEbBx4nx6ZR7HXKefXEGtsuVRaUNBpO86zMNdagXuAzZE9LDHKJEsWPb0Essv4zjFOLB4g4Rl7WNcbuS6P0TTKTqDeTH/CJmISTF4Ygb9ZMhIfOdRuwNmmunnCT9PBJ+8gOiz4ojJ4upSU6CTElBGZ0ZfBrSz8Bb2YbFH6wnXEpGKJKpyS41OsoJwMhkj73Jr9VD3o8xpVWM6fGcJzmW8LWVsiMmVH/Z7Dvf33N3dsd/vGYYhhShDj9Emea4h4TSx6apc1td7T+8s85RaxE8ZA5nNlElOaZW9T+E+Ig2f0f3QJZclI4gpkFmIAlUb1v5uC5uvtGYqAIVqQoKEPCYPwjf8apnznuXGL8K9nv6bvP7I1r6vqx3WKGG5cTUk2HIncygR4tLAM2bXapwmPj0Jfv/xmb/47p7ucMf9d9+jhSLOcxIIOuTujv3DA7vjEQFcXp7wl0/YyytutrjxTARMN4AwFaGVSiWsxUfOnz7x6Xe/o79Y9t94ZmtBGhAj8zjz8nJBak1/UMm9fX7hfDoznc6cR4tHIj+/EJRG9QM9Hh3mTH6R9Mc3IATj5cJ4ek1DP2Kk77vUOdgnN7mIqFEJzFIlxac00miGXtMDQmkUjmAvBD+lkFDppfVaFv62OUcQpU8YK2+gzUmvjYtExJQ+DEonLzyCL0yynNYTMjUUld2A6hzGWaK3OVPgiMERXapHIFgkHi1jKvkuwq/VashoiYkFBcvISoylTTm1ApCaWQpSoPI1Sa+SKy5C3dFFQGWuECz9C2oLtCyEXd9zPB65axRACgUGTAkBcurRe4efU4hZ5KWAsc45OpNnRBiDNiNm1rneQiaujkuYVKq70Oi+63JJb74XUlQoBEpjA5nDgMUtKjgA1aqyalgQfAKIXCZUJLes5P63cX9cZLlJ7xQgb0UNhsUFaFDAqioaRsS262vZgG3KsnAcCJ55Gvnw8YlfvXvk+PY7Hn79gjA7ptcLIS/u8e073v3Zb7j/9juIDnt5JTpH9JaQgaAYPM5OdLsdpt+T53Tm0lJDlJppspx+9zvG2RMRyG5gPE88Pz3z9DoTwsgxCHZ3Dzx/+Mj5dEaYnjFqXIiIiyU8X9gdwOzusd6juwHVG5ydubx8Yp5dyt70Gm0sPkTOpxNSppg3EUFyF18lE0NOqbyqISHnUiZPo8TGMdbGHCl9ttz34iKLEEEGQpBFWtIxy6at1W8rFLfuLSUVXobUGlz4yngTcRk5J6VCK4MyfTovUpm1CJ7oU6FRcCmroPBoCUrLigGUkD7t1VQQpGJM4Ygs2fcSBpNbnS1eTmGlRhaDiJB5jTKBrmA6PlQZi3EBG2UOgfq+Z9jt2O/37Pd7dvtdBjAzA7Pr8v1KAux0EuqqSGMyYsYHTGdTut5otFaYSeODz+eUwr4QQgL0jUEnQoGq7k7bi22N7itW7KTKUsras7ojhYa5EIPq8JDc7WVbCVgsexRrwW97ACBgkwNYHIKl7fwqPIjrJ3JjkOX4ZVNlXY8kMo0jp8vM28OR/cMd588fOc1nxtEyMaPvHnHep6m50hN9GqbpQtpI0Ye0yXQSRNN3oPZ4PxPsiLXA8Eg0A68fPnO279FaMDlAG6yPPJ0mLqPFIUB1jKfU3cfcP+KkYbIz4XUEMzAcjhk9HpDdwHR6wb5+xluLzvF0uuGSyzizPxzwztHl2FjltKSbx0qFjSLFsd47CAqZY+YYPCG4RcFmK9bG/pAIgYTUEWchgcYS8DY56xJrF08iIfQx58hjkISgKnqdXOY8RFTJBDZrlQps8h5IRiANNwnZExDeIkXI3YTLPgpESkcrn3oNhIAMCdhrB4+kITGq3YgLOa7xHsr+9yEVOiUOx8w8zSk15/3iZZNwBa0lXd/R911OTab5jnUeotZokwBMYsDLGSlltuRpzWKTuXOuyxm7hHV0XaomLTR9Z1P/SSkVxhh0jMUql0EOSw40kRXyIq9Cgsqry1pyGQsWKohlcS7lI62d0xe7VCe+Sdxv0nXtqxmsqDd2WXyx/XiBnzdef+0aJLL2zZs0xJiKT0JIhB8pMEqjRWS8vKIe33F8+5bT0yfG0yvjeIF55uX971FYTscDh4cjh/tDUnTeZ9JjQEiTkGWt04kFR5QGZEgdhjEEc8CrE5MLKDQvT69EIVBdx3m0vJ4muqGnu4xcTiNiOBBVzzAorPuM9wGlDNaDCzAMd9jxlfPnnwjjGdUP6N7Q9YYQBc5FokgEFSmg6/uq1IP3uLlLQ0DzfZUiXUuUOnM6RAWhrJ1BmtSmPe+ZkvYrKy/Keq+sfFYQYeGatCFbgnNi3XdLr0mPE9mz8j7jFhKVC3C0TupbqKVZRzQheQJ+RjiHIHEdUqYylRaHkD23kAq4QgzIzKLTJoUjyMZQFL7DipG68FdKVWRwNimAKQNy85zCQ5tStiUvL6XI+EAGIXM4mjoyh8zxKN2FEulMilQkFbReBvY0Z6S9q52SpBTMWtfvtS6lppVPxl4rhfbBZ/bT0rqpxD8FMCqEHgrvv7pHZCvvq3YLOa1mncNZxzTNmZAy4ZxdhkuIIuxx3RK8vZr8b8ysoIX4sLQUXxc/bWPLcp9E7RFQr6/V4CExp0JIYFH0lmme6PZHjm+/YR4vhBjYW09/ODLsNF0XCfMr06tDmYRi06UCHq1kIu2UnnU5fRrEAaEn7PQZdo94+RGlO7rjA4Po+fD739MhmX1A73apeYcLjJPD7AcevnmDd475ckGqgd6o1GBDJDLK/PpMGM+YYc/+8S1mt4MIs4soI9gd7xEib7CYPBUhJMHNKBnxUjBPF9w8J8xHZ+56buMV3IyzE0IPqQdB7sy7GtIhWk9woYgXKrkvOe8y8TinBhePW9T9UTgoJppV+/lUvONQvtShGKLWaJGadlQykNKo2CWjE32lEIssrMJZgpgTlyAuzUuJpNmFIqYiJiJCpE4SSCqHReaUYKwAaIQM1AVn8fOMGxMNeMoKwFpXh6OUtLmUKWXubBoIE4GeiDYmp/7I7c1UVbhBhTySr+z1jAX4NFGJTLYTGW9zwuYFlgSS4s98ggziiWbwQpbOlCvP9f35piZ9kMuB/YJK+lAUQMr129kyW8s0zYyjxc62uiK1wCEWZ70aaVZe/1IYsLhdRfhjvGH0F2S/4hhitS9zjrpUDaYbK6NInAGXbsD5PPH88srDcWA4Hjk8PBDcjL2cGfaa3SDQxqO0gnDGjTThU0Rpk5B5mb4jeI9SXdp4esiFVQJEYvPt7u4w+yMf37/HO0ffGUYLx/s7+qFnHnqEUmjTMewG3DiCGFDKIJXOm0AQrMXsDhzevEMNB1wQaGPY7VRl+oXg69AOiLg5K+UC9kLy1JxHRYlQvq6lG8+gB4Ic8EEgtamdeWXhiGRQbZkhsQhuwYZ8RrGFSGFXEahKVs21JkLkOvyFyJ4Nbcpv1xz3xu2Tmf8flUqFSWVnZKxAFMxAeYSciMJkJZHCgiDSwNZCoI8idYeSlLr8dn8tY8oX3CA1PnHzzDyOXM7nNHHKl27O6dAqTz2KIWCtRUxTWuecbu2GPjMtM/ogBYKU+xchIFdhd6jgqyIRlaLREEwN3aRLHZWEJHU30gq93x3qdJal2+km90aowodsuv+GlLdOveQkae5DjqfKv/nLk+b3NRe/8PPbFF8ryuXJUgHY5AJycN/qh+QpUPOsNdG3AQ9jLDhD3mBZCSihUUSCmxnPFz7LpC0Ow4HjmzfgJ9wuETWUylVjWldL5l2qDuz6XULY3YUoBcGnIhqvQHZ79AC626HFC8PhkRgt+7t7UJrHd9/y8vEnjoeO6fMlzetDsb9/g+x6umGPMYLj4wPnU2CcHT3Q9QMipNoEc/8DsT/gpcb0ibdeQVYhUSGV3Qpy74KygXIcHLKFdj4gfUBIjR4kap6Yp4nQB8JksR6UctXtLjz1dlpPoaamjR+J2ZtEUId8RlHIYGvMZ6GGiMrsi02BCzFx2YMPOGGzd5juryKmvg1CEpRcgOxqIrKQSo8QGik7Qkh1BsQUJkSRM0Q5cxWyIijZq4QhpE5RyAR6Il0iE4WYqz5HzucTl9xzMSm+xJlRGRn2MQFzzFPGXpJlN1oTXKj4CqXsWuRwR4a6TsULjxlkLV5CwRCkKwoqJBxMkg096MPdXRLI2GqSRfATQ9AvTUB8XuUyhkplyweEPDUy5vjE64DKhQ/Vg1gDv2uqQU7dVbe+Iq9Fuou3IKo7X2sYK5SwuPvruKLxMvLKJWcodYHplEBL0DllqfKNlGbH8e139J0CP6ex2yEgpcK7OQu+zCBNTMi/6RJgM52x0zPz83tEf6R78xv07oH+7oL5/ITWI7rfY7pEM37z3btUMOQtbx47+rs3iM6kLjVdIslooxj2ntfTM5eXZ+JuwI4nGHZYdYf3hj4qCLmZRoDxdEoEEuER0VY6qQgOf3kljJfEZHSBaZywcxoD5pwD6cEcUHQQJH6esfKMkMn7UBlHkEpijKmgcVKKKcXprMuttWOd6BO1JqqIrn3EC6BMjWtDQ8qRQiRQK+olpHAuKRmXUW5dSmB1BbE1IHTefyK3/hRAzFkGqRGqq/ubXC6eUsShtpurA5/J6emYavyDTJOxUQKhAR2JcsILTSDxRoJQROFT2zWdEHopBFFKUBp0R5QGHyUyCEIhE5EQsAVLEU2WruxtkSdIxTTVOhQPeAuXp9BbCpLSzp6b7rp+qenPTKbaCDQs2iU4n4EuX0uCkzZZLACAUinW8Jlma3VKMZXyxTZt2J5e+W0R/oUcREz14XUoSLEW2zThAsU2l7/OOKQZganSS5JolUZJjEyNMDot6UxCT02XKu52/Y5drwhuxtsLwc5IqXDzhflyJoaAMvvkrpouo8iSOOzQXc/08oRzEfwM3tMNe/TuHmVGVFYWSgoODw/cf/OG+fxKYMS6md3jb4h2TGQYMyA6jXAOYySv1hMOitPFIp0CJCJYnj5+YDqdiAS6YU+/PzBPM95OHPYGw4wKlvn1M24emS5J+IXSqcWXTHl+lz09FcDbC3E8gewQQSGUS4U9Zb6f0jjn0Rm9FkLgcw3INFuCD0s+XOc++VrnxFqzseMSbpZBtHXzi9wW3MTKbosZc/I+hRk6hCa92+h+lRRAqKEHyVqTlFdUZeOVUCinr0XZQxkvyu67iMUDSASvxPlQYASiD+h9pHOSnegRw4S1NvEFilXOaQulcqWkTgV0SgpUZ1D9HqEHPAoXYiqRLtWOlYlbirGyNx0iVA+8zXCEPDQ3CX/N9Em5bQq6pO68d/UnzanzDXBD5VPL3KtdqpTzsVbhfUQ7j81TYksttpAyz8ui0VBQ44LWbq/c/ErPWt6Rb9Yqtm+9iXqw4i1kvZmJjEpElEiDOzpJ7jybO8eYRKPUWqV8rZJoORCUwoqYNHduey2GIRGLpMEMB8ywb3jlgu7wjv5xZnx9wlmXO+WGxFyTGhfT+6J3mH7H4eEBgksx9NmhdE9/98Dp6SMugBQdwdyhj5F+ikSz58OHz3z6/MTnlzPCdGijCd5xOb0yzw5tBt7dHbnrYew8OozE8ZQbvMD5MlFIL1pJzNCnSkGpiQKm2RIdCC8IowV7roVHJW+ujcF7n2rtZXJvrXfM04x1WQEIAcYghCHIpZO0Dw4ZU6yeavNDRcMX5mBB2xVKBpTWGXwOIDzeW8KcMI7ktZrqTRA10qs8tjzX39ey5WXIR3IRZd1ApY3dku1aVEHqOKOIMn1/QBONQgaJ3km6qNmpHWI30c824xUpa1KmHguVqyQzFVpkt9xoRb8bkMOQip884AJReDS5s7HKnnL+CTl7URThuqYnKU+pdM7GZumTCt1O/A15LJbzc60yi2XGW5Yl1eYnVc5X5sUs6Q1nFMpKtFyaMRRwMdS4ux2dtOSHW/K+LDFOS/yJhTSSFEHhXclGe8eWqlysvihWP3VC1UKmNtQyoiR0UtJpRWc0nVF5vp9MU4IQRJmyAyqXhAZ7QZgeZXapEMf0SJWmB4eQBn1qs0upwXlE7wP+fEoucUhgkx1Ta6953hN1aughVY9QHabzaJtcu+H+DT7TUR0Gi4DdW8yD4vT0kX/3H/+Wf//TE13fs+s1x/sHlOl49T2fXx2Xl/f8D391x7vv3hHmT2A0wgz4eWa6nBGDRiJxNjW28OOEsIEgFLIbONwPODTBOry7EISrSLOUupaLu9niusREg4T4W+fwLtTNX/dbXEBBkS10m0tvW8+XvdE2vjRK1lAzeEnwAh9Tmy2w1XKHoPDe55Hly/RhKMClbMDnHG6WuFsWirOsfRGqgRKFBxlIXY98MgxGojD0ekD0M8a6pf25WLIeMhdVSZGKopRceDZSpu7CUmu8EsxR4r3Ai9QKXUmQQda2ZakbtyBEicsov48JP4rSgIoII5DSNOYxK4CQtXMSfJtSdT7n6zOaLjKHX5VKP93Mf2+KgqSIyFhICBap3ZIGqxo3C2apKajwXpPWqyyrRfcXHGDpC9hQfpsbWHP/FT9YCCJSiCz8qbe8ESH1mxOgZcCoBQNIhiBkIDMijF5cw+ASaUYnbrrp9xVsC94S0Ug9IFRHcJ7ZCc4TvLxYxvM5lUcHCZ0mCsFoA/bisNbhUQx3j4RJExhzIUhqHabMgEdi44SSETle+Lvf/8S/+f0L3/35v0CHCx/fv+fz6cJvf3jHw8MbbOw5j44JwTe//nMOu7/ASI+dR6aXZy4vn5guF6bTC4GAdQFrPeEyorTiMOyT5fCWKCbmGJl8LkDKlaI6t9kyuc5eaZWFJneKznuoUI2LkPuaUoYFS89Kv6b9fAb9aAxG2j9aN1yBmIaOlPDBuwC5u5HyAaXC1TxBWSjMhRJcAb50/6VSyEw8qqFqjRSyNxCS9xCjTF6AFAijMbpHdYE+LLMyEoZXMLOFwFKKihZGZWsgIy4GvI+4EJAqohWosPRjjFHmrIskxlSK7GSi4HspiVoBHUKXUCpjCFKh53lM3Vmdzbl8n4U5p1Ny/X8p8y1ac2lAsZBrBGHhPNf+6Yv7VimTMv2rCsJaikrKzS1tsSkdgrKKKIohLn0Bm/tSU4XVfSveQdaqUpCBPjAioqWgE+ShlilFIkWakEtwtZFjcDNBdWjTI6TAjRPBWzqzoysCAnUNfZAJkCFwevrEj3/7t7z/8Q/89IcfOY+e0YO1M8d9z7tv7nGiY54CqtulYRIzzHYCxmWz9/vEhReKLqrETXCeHz++MAvF3d094+czP3185mwDbx8O3L/7nsNR8/K8QyvF/rjj7q4nuJHoZ5wWaKNwc+p4E1xIrjQRoSKq69K1CcEcBJfRMXmP9XleXu5BYIyudesms9gKfbzsF610og/rWED4BIzlMCS1kVsCuNVcyQwe14guq3QlVQLdsiJxzuWqVnIqLufvVTteKzYGKe0T2VDR6xkIifShgpxFQVTYOS7Et9IQ15chgojUENXoPHmnIB2FOCWrwSt7t2AgNMolAesLkxYSFuBUKnKSKmZ2q8zlvnm8Yh4CG0pdBR2oUDkwUqZejkFI9Hg+LSmETL4oTKKlckpVV5+V694IYFvskW/omgIcFxcHUZmGolruokSWbsRVSzYKoRCC2hZgsWESrqxEPp6UZGQ/1aEbCUaJ7PIr+k7Td3nmXO47IEKA6Ig+0VFLFYibx9TCys1E3SdFIcqsgcR8nGZPmDzT+J6//T/+A//mP/yOT2PkdJ55PY98enrh48sJ4R3/z//Hf8+//KtvCTHVg3fKY58DXM5IqRfGnt4RfWr0oGRquDKPE0+nGesVr+cz98cj9w93+KdL6iYjBUPfI4A3j/cc7o4ImRttarWEd9GnlledROiePka8m9MaKkOUJoV33uMdTHNKwSUuhsxl4ImAovWi/GXubNMZg9emCnSIMZF7okLKNEimVAaWOLyQbARlxl4semAxPJmK3DUeRswjvygl7gUMLsFyTJ2e6p6RTUja7BtEmrjsvV8VvLUWJ8aS5nSVXFU0iUQs3YRKQZFcUt/tDMICWG5rV8s151GcaWoUEeXBa5m8AFkKsBSFZRxzpiMqTcQThK8LVxqbiJxt026eQOSpLLn2Wzdc4qTFF6FvRT9WJmDOx3qHs6nxpZ1nbKE+xpDTKKJ+scixWOov5yvOcrNDEI03sKQJakZg6SO4YARFo8uc81RZ6I2UGA2dMQydYqgKwGD63Pcw545l0Ss+FfcEZqaXT8yvH3J9uEyucXZD56m04QqMTvPThzP/y7/9G8R3/4o3vzkSf/fv+fyf/gNv3r1FHe/5j//5d/y//z//hh9+9Su+/9X3RCRdJ9HREeaJ6D3DbsikFoOdz2lARiCViEbBbB2X88z5MvHN/R1/9eff8fp84t3bR6TpeX7/ERM9v/3+LcYoJAJ3SSw1LQVOpGrAIBUon6bTxMA8kmvUjymtKQ1ByTSWK08PSmBdAoqnmVoVqFRuD6c0c9elqjbTp72QC2S8SbX5BRsS9V9RSTxVcESGB6tQ07jREmVEHoyil9r3GBovYun8JLOyrg04F7eiSSU3WQhCAx4ubnphxTqfQzfvidXIFYUhUsYpdywqciSESB6ikFWRhEKOKp5uVkgheHwoCqZRFCEpMVk5FBCDSOFIJiwhJSJKykjtpdmoqsfXFdDrEpCjSq10UxW1WOmi+pYWQ9671MbaJeG3U2I/pXyyzQUlqfy2WOTSw64s1OLDrzJ4S1amCHk2EEHAEpUVxbRwBcSiZnPJJWkKsFJ0WtAZmea3dRpjUnso3eWehya3YMrdISOpr72zFvyYeu2dTqmuexy5xJn59EoIgen0yjQ5gtnD8dc8nwMvvuPPf/g10j7z8eMn/r//+1/zf/qrP+fx4Q1vz+/4u7/9Hf/rv/lP/PCb39B1O1RnkCGxDgUhDyrp8UIm2XcWoVWu2hZoIZnHVz68/8jDoedh/8CbbocTmg9/eM/v/+Z3/Hf/1W/4zW++QwDT+Uz0Fq0kk525nF6JUrM73GGcyyk1l9J5UtLvDiltafaIoAiTJzIjVWJ2FkVf+81lL6z0iLA2IeBdZ+k6g7W2KXoxOTWYY2CZBTmHkaliLfXzg+SFCRYKce1IFcvknoI5LB4pmdeyNLTJZJsyEiwLRmXAFqURm8EbPmV+YvSNh5lZjXnsl8/4Q1JkMdfOyESuEmnPq0pTFoiwlNwnrMRX+UieeKYb5ZZ6IcbV+5eiu7AMGS3hSBYLSfYeKoGuOtNVpvVw2C8xvsrWT8q1cDbSWNsNe5c5/xZvLS5b/eIBzPOIt7ZymQv4IGo+v4QBC8OiKBixAfua3GANEsrzJS2jKkZQgJTiJYpcOabotKY3ir5T9Bnt74ym65OrqjuTe+AnHjyqA9lnRZCmA6E7pBmIceLy+sp8OeGsTQUzekBqRX98S//uz/hkf0Rpg7cj08tHXp6f+PQy8Xq68O77XzHs08iw//Xf/TX/0//9f+S7X79JY8j7I8PDd0g8QiXL44VC9cecbUkZFw18/3DHx9OZp5/+wP82jzw83NVOtfPplb/6zff8T/+3/wv7/Y7zywtxfkX4C5enD5w+f0wNSvsOSC5mzASvGFyeZquQ/QHZHwGF7EF3E5dpxFnPNI2pLbZzyePLNSEuJnp4LQibZ8YSEuSuvKU/fxn5nVJiqRDIGEM39DVMFMisZMJq7yRKt1qQ+4yKl7Fq22g1Wf9UbFMyEaJkBEQRxjL3MnmuoaB92YylrymcwkWx1JAzQ5ohtE1sA94nqjghv0/FrJBi6UKWGLQkBmJhPnqXGJtSxtpsJ4aYOQhNBUzJnMSczhTVri7kvuw5pIYmoIdhl2sA2oYf6zi/LkrwyUI4h3OpuKFUOblcfuldqFV/LfOvTFytOICIlO6uqXXEkg1YK4FVEFBUSfYkFld/8S6owx20THznTqeBjbtOsSvDG4sS6MqmzBV8eUCl1AOy2yN0D0ohRECEGR1C4njPM/M0Ms4evbuju7tn//AWc7hD9Uei7Ln79Io/P/Pj+8+8OXR883jgL3/1yA/fvkk9BRFE5/n8fOLHP/zED7/6Ps0j7I94F8GeEVJDTHx8bQ6JHScVnM8YrfjX/+q/4vtvH/j49MyH5xcurx+QUvHdYce//B//Nf/6v/lXHO8OnJ+fcK8fsa/vwSclPZ1OqK4DofJIs4h3lvlyJniXuh2bAdPv0IcjOzPgvKCfJvTpxPl8qZFbYQD6zBkpQ2RdphdbZ5FSMk8mt+nK3aW1yQCzXPAmqXCuS6msEGr36RzC53HfjXcqm5p8JKiCdtOEEFTlUMuL48IzKAmq6op7T5Ap1Sez5qgNT3K4KREJOSZxIhLTbgHIC64p5LKRRYiZ5Zco7lFSsZGYc95CQMxzArxfioeSGkx0pCB8TVkWiUhKfD2XowzsjaEkzCNOBrROHaF06UC7+qnWtbj5Phdf5JJGm9z91H44ufmxKQaqjCkWd6PtJJQYVgsguPQXWEKBRQm03P7Gsuc/0v0v3YbSTDuV3UjTaYbeMPQ9+75jNxj2vaHvTfYEutwYsquYhxAytdzWfap60x0ImVwtr4n6mOJhHxORWBrM4YHd4zt294+Y/QMog7WOt+/e8MO95t//7V/T/fYHjo9v+W//5czh8RteR8vT5yem6cJhv+f5/Xv8eEEPO5TpGfZg/YgxhqD3CGvA9Hg/JEH1gJAc99CrI3/+m28yzpLSk/v7B/b3D8QYOH38iTg+I90rIqb6DXdOTU6jVFiXYtl5HJnPL8Tg8yiwvgJ8u92AHO5AGg7ec7y743Q6c3p95fXlmdPpzDRPUIuLqAg5BGRuS5UaVuaW3Z2hy8Sl5PKbWk9gjMVam0ai6dy0s2QUdE6NYnLJbua+Z/kvuXuVZ1eUQV/JEsWcgly485VslGP4GsuHdUapZCcKK7CmEiVpGlYOAVKWILX9yq21k1AX9zzGNDkqrDMK1UxLUZvVBucSWCtINPwcXqmo8eRUfaXaUzMTSQHF2gIscS+osxi16TJpqykCKoScUMZ8ZcDGbgQ/5JinUIPrpKC4kPquIcON4IqlueSixRZbXwW+wQSqiyWWn3Sc5B4lWm9qdDD0Hbtd6qu23w3sdz37Xc/QlY2X01d5SoxSGqE0QmqiMiA7UB1Rppr+GFxOxaTXkhVQRJGE0TuLdxaTkXWlO+4e3/Kv//Vf8tf/r/+Z//3zJ9796luifuT548Qf/vCe3/3t33KnIv/jX/7A9497zu9/jzSG/Tc/oIc9XqXiEfpHtFPovk+utrMIpTm++57Pf/MJYzTHb75FHx4ScclPRDczvn4mTBfC+EqcT0R7yRsmrbscDngkrx8/cjknirPRgn63o+v3dMORbneH6YbcgUehu45BSPa7HYf9nsvxwOvxwPPzC6+n3Lps1GnQ6GyZ7Vw3c8nuSCtQymJnzaSnSiQyWqGyR+CsxXUW6yxGm+qhytwX39kZ09Cui+cgcxGMEIEYc0drsRihajRy8VpNPzdpvcRbECgZK2uuTsai4EykGgARETIQQ8laFDqzAEoWjDz9x6+ITVFpZFQ1m1AUgRACHxKJKrglNV9iGKUUujP1fAsHJZXzR2wu8iLCbLOB9jGnSlMtizYdfdehV228Sm2/89nau+reO5vAvpoyrBqr0VwNUXIrvVvhF9WcL6a99mVrs6RiEfwS+0kh0FKiVJp1Vi2+1hnRT0J/OOzY7XfsdgP7/Y5h6PNsuJzmzHhHEn6DUB2UtFceuBlReQO42u7a+Im9vXARH8FYzDAkJmD0hPkVjwJ9REjFt7/+Ff/X//6v+J//l3/Lf/7//YEpxFTzPV/4b78/8D/8yz/nX/z2V3RacP70e3Zv3nL38JZeC2Y5M55e0bs7BtUlZpj3nC8XiPDut/+C/b7j/PyeYb9HG4W3Z5AOH9KwDT+ecedX3HhO7bWVRPW71EXYB6YpNdUUbkKJiOn3yG4gSINDMJ1fALDThd2DQ9w9YvrUCl1rRd93HPZ7jscjz88Hnp+feX19ZZ4mxnFEjQkItCKBhqnVeIToamigrcUZjTMGbVya49clarG2DlsG0OQOOrNKeJXO2JVsKOk6dy9SUi0dg/KIrdr7XywZrGpZct1BqMVwZUR4mSCVgLpk+ZeWYVet9KsiSVV+ztrMXxC5J2RJuQu0DmmGYAipa5CzOYxI4YS1LpdwlywBuR5CIucpzzSMq/OQiFROnD2w2c6Zgp4Gq3iXyvSVSvMMdCiDPZ3HeYe3GcxxM86WnmaO8r7a/mjFi249GNH817rqC4mike2GAVZovMVTK1V9S2hS6g86o+m1xGQQr+86hj5ptN5ohj6lnvaHHfvDnmHY0Q8DXeE15BSkzGyo1Kg+KYAoNR6FQKXoLwrKFGQpNbLfIcU7NBNSaTrnMft7dscHsK9Mp+cEHurAaBMY85f/zX/Dwzdv+fHvfs+nz8/EGHi83/H28YGHt98QLs9cPn6g63cMh3vu3nxDh+MyfsaYHn13R0QTYkwxdQhM48jh8Q1v3t5x/vwGJTwyjrgR5tMLNhqm6NHBwSSxMWBjREaPcydcPDNZx2xTf4But0NoDVLhhQEUzjrG01PtI5BqGBzx7g26H5BCMnR9CqV0Cql2+z3752dOr6+cz2emfmSeZ8ZpTD0VrM0dcQt3PTXtxHuQERc9MxLjDH3XYaXN9QaJ+FOJaLn/QMERZCGsGb2qUSn3uhQc1WrFyrrLcX0omYHMSSg0XVLWwDmH86kNV/E0Sj/BlrnoQ8iKhpQhmW39nlhd8lQWr7TFGIN3jvEyps5ZpWZCkNOmLjMmU+WkycNJyZY+pFiwov9KZAVASnVO07h8Z8Y/pjymbBj26GkaE1Kb0zXF1S/gQ+FtLwy8lq6wsLQWx70It7hpya8fS8BQSL8J3FjcfJVzw9qkmP4wFLc+zWLfDT3D0Ke+asbUqS99P9D3A11fAKdF8AvtKAqd23WZVLZJqqqLQWQAJWltgkMFiwhTSivu7zBdj48CpEZqk5pIzBeim5hfP/P69Mx5EgR1oOsH/uyv/it+bVOra9N13L39FiUE5z9M6Mc7Hn/zVzz89q/od3t0DIjHH5CmQ/UHnPeM5xHhLJd4rtWM3tk8+gqG/VuInvH1men0jLmMvHz6BE6Chfn1E3iXvC2hEGZgf+izJXaZSJJgJhXGNHPAzamK0V7ATQQ74y+v6K6j29/T7Q6JUioVx8Oezhh6JVMI1mumqWOaZuxkiG7iPE4EHxit5zI5ehU59lkA48TkNNZHZHTY6BCqq8JbqbxR4COZb5DT13W2QJoCLKTOIeJSG1JSjGUQiChsVag5/VTlKmsHXxC55362pMRFiWQsgSxYZbafzDwS5x12St2ai8JwuUkoMpU320klBTAlRWmtq5iEtXMiIok0Mch0PV77DMbnQr0YcTkVqbIXHHP873JPgpS29yl8j6l3hfeO2Vr0+eVlEXxfNruvpIMiopUD0DxKBVVuFLSK4Vn9FAxgTdJZwoLCsmy1skRLidEilehqzW4YOB53PNzdcX9/5HC8Sx1U+z6nlkydm1a46YXCrLVsur2mmmsfyLXaS+22L89nDKQoQWLR3HlGneqSMOWSS2995lcb7OtnLp8/MT0/MV9mbNA42YMymGFg2A0chh3GnyHM3N8NmG++4fEv/zvMwzeVI7F7/DZvQIcIqWnHdDrhxgt+uiC14vz8kTA+MfSS3rxD9Xt013E5SYLskIdH5OmVbn9AioQIowxRSOw8M41jsjBC4ZH4GIhzmmkflCDaC3E2xGEHbsSfP2O1Rg9H5v09qt/T372hv3uL7gZCnNibiNprBjrsTqdSWGuYxxHnBqwLvH+5cLebeejTNOXZej48px72j8cdgwyM7sLkLDFCbyRGGmRQjDaitSI6iXVpMlLMHqJWZW5eHj4TQp1OJOUy51JnElIdZJLbaqcMWm5jnwfcOJc6RrcVfYLUdcfnmL7E8EUJKSlq6Ay5j1/u/BNDrPMIiDG9z9kqyD5PhrIuuflSKrquR01jDkdDxRO89wln8T5du0l4SYwRZ1O2qlxjUSgxJtbqNE7o88trHhnkqF0IKkUqW8mtcDeUzMpMKnBA83oh8iyZyiUEWND7VC1YRo0jUqWeVqk2fzCKXddx2PXc3+95fHzk4fGBu4d7DscHht2+ITCtR0HXUss84TWdiajUyhhSTt3nCqqYc82pDLqUQMcMGqWYV4kdIlqk14ToEdOpNpJw04UQJUHoBAQqRXQT08tHfFR0hzu0PKDiiehf4LBPTSGHA4dvf8vw8A7ZdYnsAwSlCd4yvb6kgSHTzHh6YXx5xo4XyAoudqmvvx2fubx+xmbsZp4uuPHE7v6R/viQPvv6nObwOYePpGwH4L1gmh3TeIYYMDLiZSRaBb5P1n98Tmm7rkd3O9z5gN7dE+0I3mJzh2HVDewkGBMJnYZBEuaI26XBpU8XhzKaoxzpVAoFfvp8Zj9o3vaGQYvUIguBdDO7XnG3kwgN58miNYxuxsaEBXifXWwR8drgLHlSj2CabaYSpxFsuqQdlVqCz+Kap0k2S2twKRExzUac5zm3uk8NQ1IJrq81DMVQpExGylJYO+Odr2n2EFJD1QgYlXr9e++yYDbIfR4n5r1LpCodFnDe+Yrye+8q/wKRy4QzhyfGgLXpnEvIUID92p8xePQ0XqqVFtVPF6l+vkzQjW2OvonXGxwwYQLxpkJY+AALxJfKHpNbp6Oq/QGlTI05h06x6wy73nDc9TzcH3nz5oGHN2+4f3jkeHfP7nCk71MjTqUWFHadelzOOkSBKwUTIpVXOpLbmxqphFxP7euGEMWlk7nhpFQYodAo0IJZRKbTcwqZ7MTl5SkNAeoPhNMJEGnDIdBhIr7OhGGH6B8xhwfMcKDbP2CObzH9DqENfkrTZIPp8PPE808/ZrZdZHz6zOnzJ2YfGN6+YTg+oOix04Xz6xO4kfF8Rg5HhuMbzHDH5fTM+PlTmhtnLTYEZpfAXqEMPipm74myo993ScFFh3AjwQfsdEH6iSgFXimGeECKgMcT7AX8TJhP9Ps7dN+jtSAEi8BR6vdDH4nygIsCZWZUP6Nih1CS8xT55lHz3TcSomOeJtJE8Ig2iseHI1JKnk8TmsjL2WKEwsjU4SjEzPbUmtmnDImKitMpF1MpyXlK19obzSXH+KF13WuzWlHnXybLGbAZFws+1HYB7dAPkb20SERJnQU7AZxpik+p9iuYQRpdTozJ9Z+mWqQkRYrnrUtdmYxO7cpsnJf2ahWPo4aoUikcVNJQ8KmDcu3dmUObAn56nzA/HeoYI7EMPxApjRFX1rtN0S+u/2o+XGxpimVxfW4kumiLUoWllaDTOp2YTuCHyTX5uyFZ/bv9wN1xz+PjA4+Pj9w93HO8u2d/ONLvDpiutLfO9dkNEkvVTTEJfUiDp10EV1z9mLoXJQ55HtPsQ25emh7O+9wdSSCNJOacszIdsd/jxhfs6YXx+YnTp4/4KOl2A7o/0O+XG5I6A0voOoTZ4VHsj99g9veY45s83HNkPr0iQkSGyOX5mfl8wc4TPsTUZfb5CSsk5jDgdI9zE/YyEYLEOoPcf8P+8R2m33N6/sz59ZVpnjifL0zjlLkbgdkH7GiZpjRCXMSQySEJIe6HOzotUTiivWDthNzt8mzQQJhPSG3AaoTXSHqidQSZ+g5G51KDVAlR9Qgt0d7RmYkYHEL1CGUwXeTNQ9p3iVGqUhiKyEM9ey7jzMMRorecRWDoNEaFVLEpFcfDjtmmDskCwe8/PKFR7DrFh6cX5iC4P8B8ueCjwIWILZNzw5INkHliiKAg98ltLwpYZ2wgNfeMlTZfhHPlUZQGOtmwSpGb4giWEvzMnC3ju5TShBiY5wmtE5ZV5Sl3Uy6dt5MCyt5NZgzO85y9A18pxi7Xq8jShDR7DyE4dIhp7ku6pWuBLy5zbcdQXf82d7poxNSCaGkuEjIbsHL6ySO5pECLROwYuuRbSCkS/bNLQxJ2u4HDbuDuMHB3PHB3PHK8u+NwPLI/HuiGQ5rsmiekUpDc8hNDroxK46V8jNgQsQFcSJ7AgkaH1U8hghQiVFoK2QAWZUpyxAxHvJuZzy+E+USwIz5KrAiY4cCgO9BP2Dm7eWqHunuDN4bxckG+viD6e/bSEIJnevnA+PKC0QewlvH5ienpKRdYpXDAv77y4cNPvP/d/8G7337P8eGA1PvUg6CLPH7/A0Jq7DRVNp/1AhsF0eyIwmCD5eImxsuMcx4lc3+/mDbyPJ4QXoFOjVKM7hnuHuh6TfAz7nKmHwb6oacfOoxR4E6pEKX0mbJjarjpJGrQKCZEGIEZMfQI3eVOwinFFULARse+P2Qaq0B3Hd4nt3ueInYwDMPA0CmMjEQkw37HONk8Vdnyh4+vHA4Db7Tkpw+f6RR8ez8wTxemaeZiPaPNsXtowlUES8aKKkgt4FbaoBfrr00yYD5T40PJ6Yelp3+x1JA6ASOoHJtSl5CQ+/TdSWgdSqoK4pXPE1Muvwz30LkUXYjUTdjlJj5l6EqIvjn34hFnrkIM6DQ1lMrRX8f8dVlYZrqVWHpx92Pt1BqJvpCIFusvIGXThUCb0kWmq2isymOKun5g2O0YdgNDP9D3HcPQc9gN7HZDmpq629EPu5Qey33UEocgFygBnmThS7drF5I7ad0i/OW1WLXTUhiSYqjk/peimJSGAq2h6xSd7tJQTe8TeSgCekDfvWe6XJK1nh3WjfiYwMagUo14VAaETkrSJWF7+fQj5vLK+dNPPP/0nt3wls4HLp8+cP70iXmccfOEe3nh9PyJP/z4d9z/2fdcXnr8fOLw5h33736N0DvGcWY8faaMypbaEJDM88Tl9cT5cklzAy8pJCCreSklQ2ZPmv0up5wy2UTEPJcupNRrn2onzO6I7gZS4VJIIYAEP19Sr083JVZe1GgMhBnVaczQE3xAG0P0yWrZySZFYUzySJUhCoGzjs5IOiPZ7VP6se90pSBbm8Zuu3EiTJ5vHw+EGDldRr5/6NFK8vJ64eIsmoDBYzpB8IHz5Jh8JGb32LmQC92W/HpoYufaAj3vEzmLOgwn5lqFVNgja7YgcQLsSghDTiEiYhXYKlM5FHXRIeYpn0+uESiZuaygyiCUIgMle5dCYEntGBzjKhwudT16heiJQnVsxD8rhti+j7Zxo891/wvPOtYpppHSPVgKSWc0Sps69ngYBrqhTzn6PnXWSX/vFlTfGDqT2Xtdh+lN6raiNaIRfh/ABoHz6cdXC08ecyVwHpyPdYrRAnOmm6JkjRzynPfUA0HnkVJKwc4IdkbmuneViIGmQ+/v6e6+xRx/x8uH3/H66SNBRBgeMbu3dHnx+92B4XhES4XwM1Lv6HZvEEJw+fwH5tePzKePvPzdT9w7mD69Zzqfsc4T7MTT54/87uNHxP2Buzf3CFJ/PG8d55dnxstHQoRu6NFacXp64tP794yTRe0eMNFgosJ6gXu91Cq0vkvDQI0SGOFR9kJwZ4RSqP0xdQEWqbpNxDTBaPfwJnkYeVNrozB9n2ZA6pRKC96hu47dPo8o2/eZsGMIKnUqjsQ8aUgjTV8BuNzgm07n7kJDcoe1SgBssXKmU3SdYBIB03c5zpYce8npfOE8TTze7zgeek4Xi42C82Xiw9OFNBfCJTZngHn2uNx8I1ljqgCGsMT9Ka1HnnCUOxJRrO7SK0Fmmm7hGMS4MGC9L1Te7IG3Q1RKCBtjQ1mmjkdbKMxL5kwKmSy+y6xCsQh7ee+CXSTugm5j5epLRPI0nvLxtqyo1XQhFy2EWvwRq2nNlNgYqhAZreh7w+6wY3c4sDscGfYHhv2ebkj0U9P1mK5PhI7ceqyMJ9OlQ5EsFVkxtUrK4J71EefA+dSi3IfSTCG5eonNVQojliCnlF/mzlO5739WCqL0FIipk5DKr+eadyElIiQacUSC1AhzgO4PmPGCHS+Mry/Y8UwMKae7vz9yfHhg2D+ATHUH0+tHcGdimBiOPZfnn/iPf/d7wufPDJdX7JSarP7d7/6Wy9DzzeM3hPnEhOPy+kI37LiPCrM7IoTCTRem88zz0xMfP3zg04f3vD59ZLpc0F3P/Zt3HO/eMI5n7HhKlYd+RuIy2zJTrGMgzGesu6CGgf7xLcPxyHDc0/d9ivG1ous6RPSYoUNKDfSIYAk4+t2OftgTQ+kkTEqh6uK+doQQ6Mw9yvTEWNzYpIjLOEJE6aSTkHJ0br4aQUSFvr9jh6gsPJNeZn93xHnP+fWCQPD5NBNj5HGneLIXpumSZjPKhD0oUm79MvslMZar7GoH4zz4MOShOBVARFTZSNcms7UNjfAlNp/LHaRqor0YWpbvjLnJSPFU6zSggrW1nnrzXcVjKES6trlqpe9EUggQRUREmbN/uTCnBffqh5o+baEAZrlZQaErUpCDRFSRRDqlGLqertMMQ8fh0LO/37O/u2N3uKffHeiHXHXW9bVFsmzSGm1DB3LZpg8eF0SKb33EelJzxLBo7nLjljbTIbU2b6COhoJQy0KXmoPUVkrEgAwevEi93OMSIAkpUVpidoIo0jRddM94eub89BPRjQg6vPWJXJPdy8v5RKDD9An8Oj195vz8idSkxSIPgtMrzM5y/vzCFAXq/p5vf/MD3SFZQyVAdx39bo+dplr0IaTk6fNH/tN/+s+8vF5QyqQOwwO4ywuf/uYzw/7I/Tffou4G5vGcQFkF0U7JhQ9znhqcCDR9pxh6me7h/TdolRpbDrtd6rKsDd0+IfbRTcR5RB2O9HcPKN2lcd0qD1Mpg4MznpKgFZOrSJP1D7j8vEweWS6oCT6kYaVx8UITsUelWQbBY5RA7nu0Ucwu8HqaGIaeiMAYxbf3HZ8/P6G8ojMHbEzc/qfXM+fLyGWamGbX7KNQ73VNKccFP1o/Ys7fLzyDRSGQU4IK55a8fN1Q2/Llpj9Bsfp1KtYqPAeBT41X6/lCmnKyFC+VvV66beklri8nEGopbjb4Vduk7iRL95/a3aXJmas6KUbUEuDk9nfsesN+P3A87NjtEz9/d9jT7/aZTz+gzYDUOW1StW1yrspUlpjBGxdg9hHrArPPMT9QZhsUTSdoqrjaBa0LlyvJ2uYkmbAQokfGSCAQcKl8M6T6bVxMg5VzcY3I7bG73T51fFVm6Z77+oxzCS8wwxGpu+TSWct4Sm3DX5+ecHNquDlPE4GIuT/wep44yRHnIsd3P6DMMXXaFQIxRfqDwbuIj2dgwM4jp9HxH//Tf+bTpyfCdEYbyf54x+U1sBu+JQbLeHrm+f3fcbx74OHuiB+fiQH0sGO/38F8QYk0Wbgfdtw9PjAc9vSHOyQO3R0ww45OZxB3OKaKyhhQw4DYDZjhgJQ6FW0xZFfX56lKub49r7lQhuATXhO9S7hC8Kn7EjGPorMpLTZd0tpKVXslAogYEMER3Aw+oBDsutQezXWSaWdwzvH6cmHY7blXPQcfuIwT0zjCHDm/Wi7TyGVOczNDLnSrzTzEMo46ZIJWMTJL7K6ZrPwAAGwLSURBVB1z74Ilbl9Cm0VJlIxBVTCNta79DZqQYOUliNKTIE8JaqpjUwjbWDhExS/KsWMkzwVoC+9jSKWKhQlY3ZzSCKS0D7cVHS3khHTCGmlyNxchMEouaPGuYzjs6HcJ7Ov7nq7r6fo+DUcwJhWrVEtfZj4vlQShCL9PLv/swLqYiyJa8nFJabIUGdWGJBnhLwUUsrRRytcLeVpt6kATRURKUvrIWjrhQaTZcmnnmuqloDUmpIGUdtbobmD/8B16eMA7ixASHzyXcUYRU/1BhCgN/d0DnE6Mry+8Pr0yvo7Yi8ULiVeSy/MJ9eklWbH9Dmk0Lgj86YKcHUIr/OuJ59cT7z+/cj6deP38gYeHR4Qfcedn7u/vmccLquvZ778HN4E9g4XD/SPj6zNieoboEd2e49tv6bSk7w37w4DMGI4Q5KYeCqWg29/lOv1Mmup2GSXPMxFDajJa5k0WQUGqJYaWGjeXEBQUIvPgFTE4Yh6x7uYxF9QohE7NTEJOwQXvkvfhEwdB6y653MJjfWr4OvuIUophd2B3gOhG5gs8q8DLSRGEYt93DMYzWcfoQga8K3G4pvaUSs01o2syXmRZqG3TwgqAKwBi13U5zTiT63dWHYdEY+brMI+cQixpxSSiBQdY2p4X40lr5NRSNVkUjY6xJPrEwnAqcX5p7Ol96tya8+MlheHcnHLkPt1QpXTO5WfQTqdCncPhwOG4Zz90DPsk/N0wpMk7WjZTdFsjHPP0FZkFVSYwLyYL4XzMYJ/I6b7FLVppzLroLTmoAfvI8dC6GUHhNqWFzG4meIwMiS+fvYjSWqkWLclYrYQxhuAHQoj0UjOPF6ydsXbm9HLieDgktlzuw+e94HIemS8XlDGYXQIv/WWGfuDwrUFIz/j6wjhdkGOfPBSjEEoQdM+PP30As+P54wfefv8rXsJEsOdUDdlrRJgwQ6Khdn3P0D2iROD89IHL82fuHh5hDLjxjMDR68D+7pH9wwMSm6rRCCjTY4xCy8Cwv08ZHSkz7bqrE48kSdjUbsj0WpMrQmV1VUsWyds0Zs0MqQOydzZ15QHcPObwoUOL3Hgkz+uqFXZ5qGYlnuXxbK7GyKnfvlIRpTV3QTJHxemyYwpnhB94fNdjDvdM1vL5+ZWnlzOddcxuaS1Wc2Fi6fDr/TK6vHXVvXeLMFJanumFtx8j03TButDsx5JJEBXMKy3YS1VhUSLVzaXp65mrIVOYFHMtQuJ3dF2fRrip1I9QF5ZfSQvESO7o4nNs72sHlRJHL5ODSr4zsbD63AByNwz0nWHoeobdjv3dXeLt7/pctZesiGn6wpUmDsU1TzPeZCbqiNwdZsnd11ReDl0KOLQ0c2wAG/Jwx9gK9+IVbHsSxEbDkxFYKdM8gU6nTkOieEs+IKIjZopY6pFY6JaJLNL1PeMlp3z8zHx5QeueabJEkYZoCCkRSmGGXeKT9wf8pyf8ySL7gf23uviciZRCgGjTqOjoOJ9mPry85/nlhFaa42FHmF759Z//C55//Gv6/o79YeB4OKSS0PMz+30aOOrniePDN4zPHwiXJ45390xEzC7hNLpTKGFRskMPO2R09LsBLTxdf2A43GNUmpaUQMEyN1BWBaB3RwrKWl1k74i5AKd4lVJlZp29pNdzXr0oWGLKhhTjZMdLEkylUd2A1D0yRiKOMJ2x85jCBucS70D32aMMRC9AaIb9HQwPqHFmP028vp749HpGDfccHmfG2WFtqcmPq/HmRdhCLvRpcQGfqyhFAdbJ3YwymN11XWUDep/B88xnSczCpFgEZMVqKjux9jFsZgqkJji5uU2uhkyeRHpPIXgZY+pcD532cUyzTkNmGwW/4sP7UPjHstZCK6mQXRIEozWm63KTiB27PjXX7LsO0/fs9kf2x/vUaKLrUgpNa6Lu8bIDFERVZwAEDx6ZptT4iMsofik6qq3H84KJqnXLOGry9JXSiYXcL7llCZZRU00XJLFUMbZgSwEGtRQYndpUi+hr2qa0YvZ5NFNpwZRISgPj+SWBdVJhiTy8/Q7hHfN4ws9n3HRJxTghoPcHpHOY/RHV9ejOYMcL56enVHAkQHqV2WKW0VpeLhPvP3zm5XTmeDziL0/0j0dUGHl4/DO0e0HYE7v+gaHvMXf3uH3i99uXnxKvX0C/ewB/huBS/UCnU2q2N4jgIc6IaDDDPqVkO8NweEwzASQp7VUq8rohdxUylQcf3UzwaUahnye8T8NVIyJRsL2tysDZMfHVvUutyRGpHXsTiqbWczGxEYXKSuScm2MmgdX9nhAcbpoz0a2kfCVDJ+lFwMtILzseDj3eDkz3O37wktGl2oPZemxJJ+ecfymxTR2nVU0V1r4aWY5itehtXE/tZRADlUZcqgbJgl9a65dSYCV1BQbL6HSVuy+Xfoqdyc1tpEip1rDMZCy09NIzQQqJdm7pkOoL4SAmpVAaOpb2SUIIjNZ03ZAHQAiMSVZ/6HsOhwP7/T5tnFztJHWXOsvs71HdADq1oA5SpjFXQSOdREWBzLXQkVTu6bzI/P2knFKr48ZHZ6EixxS7JPdMpsaMRfNSPJci/MW1byqRS5VChgpTfrYipxElAkb4BDARKXPhC3gT8ow/QRqJFpVKMxWtTR6D1oQ5sNvtEERme0Gr5G1FJTm/fMJONoc4AaE6+sOBN33P6+cPSBWZLmdOpwuzTUr5w9Mzv//wxMvpQrATh13Hm7sdwgXujx0Kj/Ynfvj1r7HnzzC/4pQEZzFGcXr6kO7xPOJjasnV7+5QKmbMpsf0JgGkIeYGpZLheIcWgW53xPS7aumlVOh+oNsdUsGQ6ZCCZOUvL9jz59RbwNvM1swhnUsl0t7NudpSLX37kEjZEYVC7Xq0MrVwRipD8A47XdJU5bCU0qpul4adktNtYoZ5xNkZYnLL0X0uMbb0XY+jw5kBfTTskQlrihIbZAqBISsqXxUBTb69knwaVunSfCTv09JwNIcPNTtAyjaoPH8j5GxOCQFUdue98/Xv0rnbGFMNmDamhsJKqXwOMjMtIyArzgagd8dH5mlkGs94N9cTksjqqhVCMDGilWA3dOx2e3bDwLDPQj/s2O3Sj84TSFEGoXeofp+mnUqDTyUi2dUX4CTCF2DFZw2YO8CGdTy+cLUX4W/7rKWe9Ko2gSjpwkiJowq5KaX4lnHoMRM2luxAhhxz38KIDjP4E96mopiKBAsJQoPsUxlmJmOIEIluRgaLwmOnJ/AjMoKIDi0gqhTaoCRapCyDHc/4zA8/qx0xKs7nV6TSnGb3/2/szZYcSZJssWObLwBiyarqqp7lyiUp94HCV/7/j1CEIqQM753p7solIgD4YovyQdU2R+SQKVKVkQjA4W5muh89ir9/+4Gv396EzWeHURonB8xPT3h9vmAYDLQbYcnjfDlDwWOcf4Har7h+/4aP7z/w9PorhmFECPzcw+gQ7ndWDE9nXJ6eYazCeD7zoocV2jm4ccb89CKMSg7WTTBSMlZGww4jhpkxHdaN3H68vmO//uBYfF+YTs2c+NllLB2MBSkujyolmX0VgBi5Gy8EKKMkYUhdiMYDSh3cfJYwjOnbSBpxUgrw9w/4dQVUHk82oCLcAihpkF+gDDHwzBqerKOEHUoK2lCSh5IwlECl+pXEKzbWlDxZ9lBNM3A0Bp4jUIF1wuknBiorgJwH425D1XQTcoegE5xMpmCXUU3sDUkOD0pxWVRxlyZDj/NMREZC2v/6v/3v2JYbrt//xNv3r9jWBTF4gBJcHOHswIc+7TBK4TyNOJ8mPD0/4+nlCzflnE5MvCFan6BBhhtelOUON6Wlbhtzbb6SMmY+8ygaUyvpSycUKqeKq65EijXZInBjSSZWiuesObiWrNnHkliUBAueiiLJJZpMgAIQkkrQiAhph04blA7MHKCJm3ugpV4doN0MpXnEM6UdSCtCWJG2OzQChnEChR0UFfQ0IWwLhtMT9m1FnCekfYXVAFSE9xwa+KiQds/riYTnpxknx8+qQFhuC5R+xXQ+I/pdyCwVTqcB56cTKAacJoPz9E84Xc7Y1xWkHOww4fJywfbxJ1OFTVyNOY0K1hBOr79hfnrC/eMN3i+wbsR4fsI4jaCwwJ3+gnGaudVWOBCH+cwdgc5BpYBwf4e/foVfPpDIIKkRyozFnV9vb/B+ZzbhwANpKUZBbnoEie+5XMhlPuQ9Ag/VVELnBjuWFnYWSNlBiqzIQdCWB24qqTxQIsAwJ19Yr0g6MM7DEfRwgXa6cE4qY6CV5bAUKkeUoBgQUgSRkoG5hoFKuckgh5CSNwp+h9J7ySPwgBlV8fraigywAkiK38NRbAKEws66QfIl7Hlw61ROdPMsi2I10c8XIOK8hQ877B//8t+QEveOL9d3LPcr/LYi7Bv8cgNFj+32ju3jO7RKOM0jzpczTk8veHr5gvl8wTCOsAMz6JJy0G6GthPz6sliocETQIQ4EXHLYm7JLCU5gV0SwUvCwxjLY9gbHkHVZmVFE+a565mooaKcFUOHwa3OKQXpX0gypw4yXJIK76FSBAPmKUhKw9oTSK2guHBiKiaewwYFO16Y295agIJo2wiKO498HiaE4KGHAZQMQkwwwwA3zoj+zvV2QzhdLlg+AvTlwmXHqJHEtf3991fEfcPH928IgXD7eMPwfMLLl1+gjcE0T5Lw8jg/P8FNJ3jvYccJdphAry+4Xz/w8f07jOG6vTMKzrLLb5FAYce23WD0X+DGGYMPiNuCYTphmmf2huZnuHESwTKw8xnj5ZUtv3VQlBCWd+zvfyLtC6AcNyohIfoFfl/5jC1XhI1/JsGwM2IzImwb/HpD3FcQVJm/SEpDDxOUHVgIHBO0+vud4103wEwn2PnCPI1uhjMjEt6x394Q/A5tHZRxDDsmkvKiR6QN5HdgSjDQSNEDiicpW2U5iZtklmFJmkvOB5o7E7NVF9uTZxloYnxElKoZJwiJDQkUj1AHQLr2ZRABIXq2/lS9DQUmG9GUGY2oKTE3TT+ST4HShdU7xogoFYIUA6xxA6waMU1nXJ5/EYpvLqls6w1+Yaiov/4AkudhGuPA7aKnC+w4sYYyA/R4BrQDwSLm3uOClGpADiKYJVNO3LNtRV8xRjpzFTIONKaERBYDsSZXghNQHK+UgRJa50EL4j7JKGat8pyBXLNNFXJKme5MyagrjvE0CFGDZyUODqQVgMBswZkJGQnGzVz3zsLvV27Z9Huhc1JK8UFMHikQEDZoZ+CcwW4BGgcYvGC93bApBTtN0gSjoOAAbTDOJ+y3iN0qJj8dX/H05S+YTice6ZVLkhK6cdeYBgX2Ktww43ROcIqw3a4wtGN+eYFzDP3db1dsy8oWeL+B4g5jB0znM06XJ7hxktn1T5wEDQTteE0ZxzEAKSLsd+wf3xD3O8z0wk09cWeEZjT8Xr9BIbF1TR7+fmU8iTIc+ysetGKHCXaa4aYTMsZUuYnRllA84zB4buJZF8RthV9XuH2FGZvP2AH29ITw9hX+fuNZiJSghhEEplJXiRDDHVj4bGD+AmUNyO9cprMj3x9UyVFEIgHgSM5C5itoLZ5AZhnKcb0k1LlzMA/3RIEZR9RcVvFmpK01RB4xnqsL1tR5iCEGKeUHwdFoYexmhGZOavvdAyrw8ypiVuBCCq41rMRHSikM8wXxsiP4FYj/xLx4Os9+cwLC4ASJ0ppLWrmsGGItaUgpxAg6KZVsPSdDePQTC3nwPHCDW4S1oA8J8B5eG0THQzzyXDkoTvhplWMnVTyMioCS/AGLdCkVFvco1bFWGeKcIdKJWLnEyNRSMBbKZBx4gFYWxjGQSSEhBabk9sutNOpYa6AQEb1HTB5JRygVpa+BcHl6wTyfsHy8IfkFp5mbnaA0zDBiXxcATJSCTeH1t19YYWlGWVLcQfvKTVSnEytkIkQaoYcZADG8WBskmpg6y1pobTGez6D9ju36gfv1g91wZbGvO25v30HK4vnLb5gnfr5hujC4hwTVOF9gBe1HMSKGFXFhJmF7+gI9nkXZj0CMMGZiPsPpC9TphvX2BjU/Q03vXA1JUv5VDOmG1qwAxhnacgsxpP3W7zsnrc2I88xAq+X9G/z9jhAS9u0H919YDhOI2JOIRqz2uiIFL9e1YhAIKnmQoCLVcALZGTATJ9SGE599pXnQag5BM+RXvNokicrcBUhCLFqwA4ppyjK5h7GugIxiCYcFOSheRUarqozKVaa0UTM/AHsOSApRciQpdwESFcg+FKASsy/bzARQE+RteUzDyuGupbM80UeXBy6utgBjcvmiwvIyviAUDH/WbDmzTxIfLcsd27bxeCjrMtiZ6Y2krTLEEUOMpZ0YGjzNV+WQgBprKP+hAjgKTkRahnP+gUpbZx7kKNNXwXDNqAjRWiZOIQ+lBgECRaS4AXHlxB8SnCEBOFnJkGsZ2WyRAsET12iNijDWIKgIfblAKyC8RtjxhLBtiDHg8vyK7fYBpYBp+iuS0Jou79+xLwsrJvLQm0fyV5Bx0PMzx+OD4x0eLCuNE1chKHnEbQOgsW03rPcbkHhNv72vwLhgj4RxmjH9y7/CiJflxhmgCG0nmHGGGUboYeRSV4igfQMAuMuvgBkYVBYCgt/gt7VAeUlpKD3Czl9AegJph0jfELcVxjog7EielZbfFuwpwYwTzHSGmS58EiV/w70BjqnIXn+HnRds1zek2zvitgK7gptOsOOEKAAhImDfd+g9wY4JhlCGiyTxgkEJ0a/A8AQ9BERopH3jkGCYZVw8Jwa1oO6YD8BWgTT8epDefyWluDziK/fSaJNJRvNgHeJyc25M49PC/5dcWJsMz0ls9jSo9FqkGBFz5yGYizBLgzGKkYAle1CaDiDF8woxVEYV4al1C+ljpgxLpMIi3I4Fk+o+QmwGKUpt1BiDECoDqzEO01R7qVvqJRJa6n2vaD9yDjDc0WfISMIw1/NrmyQE+ptEuVHRvlSgxlxf9rKQBgoWCkzQSIY9DQZQRJjcNQfwoEdK3NqqeeMGEXxo4s46bYBxBKUJyXsMw4awLQAiD4gAT+dV88Qlr2EGnU7w+walDKxKcMMMN05IBPjtjri8Y5y+MMvuDw9/+wA59uDSekVUhOQz3RRj5o11BSKaYgD0IFyAK15eX7BtO273DWraYZYVf/3XMyhssJdXDINDho0lOY46czKIIQEIejgD2sLvHMeHbcG+fCCsV3aL/c5zI0kDbpauQA07nWQ+g4FJE4zbpAcgIuwrgIh4f0Pa79BuRPKBYeBKY7tfoe3A1tmwZ6JA8ADnG4LnHJDmseXRM2owwoAaRh6O27m7FDbCnkZmJwq7dJXekLYRcXrGcALcOEnZjklLxmHg7LsXyy/edIo8QBcirJzEhQx6CUVJIATAcE9NnnlQJxDpUo6vPQRsaIwVCFuS/hTxkki8cJDQisskMAA8FDeLaIW05tgcVTEUb7rhSuGuAwbvpBpfK8WAIc5QSgtlaW7Igw1jEeAUiTc2cqmDa5quNlIQT3hxboAxUUpFGTAUwRVEHpSpDMHAllq+0gq2QH+FBSXVJiFOQMauT0AryPQZnh5EFEGRpBWZcw7WGkx2gDWsKBUSjGpYhylAJXApSmrOFBKSYgqrlHkKhPUVZkKMHmm7QRN4JiEStHMYrANRgjO/YBjP3HacEjYD6L/+K0gp3H98g06vUF9+hbIjDwQJXppFeMBlDDv/OwYuy613Ts7aGcZYTNOE6Dco4/D0+oIYEk7nCfPoeHAIWDmGfYcdZrjxDDudYcczK4C081oaC9IWKbDQru9/Yr99g79f4e9XKddweMMU2D+khXqSsDDB32/QjhOk1g6AIk4GCilmhGHPw0XE9zfst4XPmZNxbkpDGS7dhRAR9h06cYnZjhMPbBUkqdKcOPM+ijvPwCKtNCisiPdvUHGHMhNIj0iksccAhAQfIwZ/gTIWu/cAES6Xi5CU7MhjbrTRMEphnKbSkWeNYSiujbCSAHWCvoUq4PyS9E4yRixjC0JMcEZDS39M9n4BhuQrKCEbDRW8V5SJLl6yLUi4nF3PtfWGGktJRrLpTODXKfd0Nt1JpKVRQUogOR6nxEQRYHbWMmEIfVkPQBnAkBlNMokCKQ3rdBmFVMgRCrqvCUMAaDJIiskhahsHyXOQxEwZO6AAsEufIc2Ds1BIQPSwBQFIcDrCmQQNDyQPJY0qRJGFXhY+j1FL4qZCWyShc1JmQPRA9BugI7vI9x+gfYWdLhie/wI1nHjdrMXpdJGOQ+n0mi4wWiNsN5iXV6SXX6Asc/ynGBD8ju3+wYM+EzPvhJCwvn/H8v1P7PcPzvNMM4bnv2D3EW7kBq0XGPz4WDGfJgzTBK01gr9zAs8YQAmjLiWEfWPFqBJPtDGOZwf4gLCtQnDyA8nvjJ0yDnZ+grITLABSit+/76B9R/I70npD+PCg0wnROphhZGAVJUTveVgJKbjzBfb8hNN4wn6/Yr3fQfvO49y0ZhCb0ohKwy8rwu4xPSm4mT0p7Yi7Br0vKFNtjFQduGkmBo+w3qBchMLCxDI0IpGBT1fsewS0RpAsbJREHYex3AlqjBFCG0FGKoWYPU4Bu1jnADgpd5tiIDPbT5BJ2yJmsJlHQHoDAJR5BtnjG8YBFPn+Mr8FVwWqB2H7waAFrl0gjCCeyK45nS6AIPCI4VxDR+XQK/iK4kBUuJ1CbRhKlN3GGsdkUI/WpvCe50VIgbumyDpYpyuqT1XFQUqBTCUdBcBTdlPL/55DCoVZhonM04RxdHKQdXGVLJt4ILK7bzTKdFZKgTvp4oYUVqTlDWFbGeIaNoR9KxOVoue5gQRwA9W+QY9PABTCcuWqAgB/u2P/+A7j3jH6iPH5VyBxDdtNZ0HVRWg3QWmHlMBU5PsKUlwBGYYZRNyNOIyjWG4+lKMzOI0viM8nLLcP7PcbjBthL88gJDhn8P5xwz/+8R3z5Qmn04QUA7PchgA9Omh3AvQgXZns3aXoOZGrNcK+I6w8VCTsC8LOMwzMMMOcX6DsyEk3bRFShFIOOGkOR0Cc0d/u8Ms7K1PZ18LDOBk4pbFdP3D79ndAW7jTE/RwgiWD7f07YloBYwSaHeF9REyARQKWKxIxdXlOPhnF3mOQUrQSmq4YGPQT/Q1hWREVl7mTJiadMRugFyRwa7p1oyT9gG1dGeoLKvMMh2HEfJqbWYY5bAaGcSghb4ipUx45Ud82IqUYROkwUWkO4XmvuIpmjEVscgWJUgEv5T8Wxz8yt48huRkdlxtbckKtutHlZ4DbPkm1F0MfM8hIpVZJNGEF1eSCWDKhUUqEDSvSFrGuHjby2Ki8SCorq5SQxBUyWsvhjIwoEyAJySAPoxVOpxOenp4wjdwvrgFpyrFlagylKEgwBdIyoRbgMEYNIDUgkePMsk6IxiDsEdv2gbgvjE5TBgkGfucaeNhWYE+w45kP53YDJWD7uGH78cZw22TgYWHcgLjeoN6+Y3r+ApUi/Nd/h7YThssXkLuAYBGWD2xf/x329MI048MArRKss/CrtNDuK4xzGJ+fMT+/wO877u9viClgGAfc3t/wf/4f/xe2CPyv//xXPD1fGMYLcJnTWIR9K4k+I6gziIAmAvzuRQm8I27vXG56/gVmeoKyI2LY4dcb5xESgcCoO0AxvbubMU4XjM+/lZJZAWtRxL4uIMOTis39hvuPb7h//wojpUJ3vpS5eVnxuXFAvO5Y1w3bneDGDfPrK4bpxK5qBNK+gWIUHL3lYZ8EyZyLS24HRoEiwETGLmwhIWiLLQIpXbnL1Q7Y9w3ayEBVywSwIf7A5XxhFJ8MsbXDwOAhMX45vNWG439yDkYzlsYYXQhE8mhwo7nKFGKE0Tzm3drsGYi/WzwHJZWuVIy+bcEy1YXWgrJJqOKpipedX6oU4DnRl9P+khvQNS5hCw2AeLSTKi5/Dj/a+YHSYGFrInEYR8ynE0IelyTdTbnfP0lJMZMkaul2ouzS5EqEzLgbxxGXyxNO8wgddqT7wt88jFBukNnq0tAhJU07jtzTrmVmICyXCrUCTQbKzMB2A0WA1gVRyWRX2YzIvXHQ44kTqESAHhDWNxBpRD1iD4S4fiDqEcnNcPMFcd/gr+/wIWE4XbB+vIP8V0z7Bnd55WqGcvDxhv3b3+H3DW4+I253xH2RHv4B0TDHXkwkyTYuNVECPt7e8Lf//u8IKeH55RUvzxckvwPGYt9WBt1oAzNeGEAksXRxJ5MCCaOwcH6B/AI7naHsUIg1YwiclNs2rv37HaS1QIA1tNTrzXASbMEoiEvJ9cwOygUujdoIc3qGD9+xXj8Q9o1xKXaAigFWa5BM5jk9PWG7a+z7jt17pO/f4ecddj4BCZwr2BYW2oE5BoL33G6uLQAWUu0mrrv7iC1oeLKw8wucHvC23hBixDDwIBGihMHx7AMrGJVluWP3uQJkMJ1mnOYTAGD3ewNmY6U6TyPjWAznoLZ1FUZqXnc3DEJvnqBVwLIsDM0fHfZtZzyA4tKklf1XmgeqpJRgmXdNoHBisQnsHSnKcbNIb040qLYJpzYslPdLIrB4A/lzknHvrT3EvStahefy5d9rVkg8020AjTPyUAS+VGZWyVdVZVgDJJwoPkhOdJIWF1+D9gXLt79hf/uB4fIK9/orZ2S95w6yEKAAJrs0FkqmtBQK8sjdiykoULJIagKmV+ikoPUb9vsPhP1eEG1Mg62gnMRq0rACBQyXV4T1juudSUHM6QmkHShEbMsdMSacjYO9/ILtxz9w/fYPzERw51+gxguwrQyfXW+AHaDtiLQuSN7DnS6g5LHdPpBigHYjYCy8D9j2gI/vP5Bg8Nvvf+D1119AJKQlwwzsPE9QGW7zzfVlJMmvEHND1n0FoAzUcGJXWyns13f4bWEvzW8IfpNGqggVPTh+Jmw+Aj++QVnHnAOXZ+hhRkwKsFbajS3MMMJ4z5n/8QRFwL5vQoXO49E0eF5EDB5REaxzDL6xFnHfsN8/EP0GM11gzy8greGXOyCUY0prqBgR9xUhEhJWYHwC2Yn7WOLG8/eiBg1nWONABFxvV2653neG5YrFfXl5YSKcjN5TEfHGk4Fzw10eWQZBES7DAG144jVAWJY7d1s6h2kcsa0blnXlLlvD1Go+BGitZQgosG3ZaMoMBG0wTULYQmAXW/KVJemAQnZQefWyNJeR4hl7DXH9VYVAFn9AdaeiWP3sQZQGnUahoPUOSjLBSIMDQcE0HViAUfWz5f4P98saMyEFIUyUvvL19gP3v/0/iMsKGk4gHxC3DcvHB/bbDSolDOME9fIKO3kozzmRGGJpCU2R59NxpjpIEOMQ1ICoBkTsSGYQ8FCCStnycruosg5xvcPMI06//RUUPZbvf2L7eAMMA40SAfu2we07hvkCNT/Df/0b4p//wIkMhjNPGlIKfGC3BcPpGe7pF/jbG2KIsKdnBvnc3rDfb4gyGFUZjdPrr9jpB/aYMDqDj+/fsKwe8+UJf/nnM5QeQDCcOU9iM4RJKRInY7Vx0E4hRs+z+sxQejeMHUqvhPUbxiQeZNwBigwiigFx99jud+zLDf72hrAtrAihmNEZCto52HHkPndjWTFrDe0GThLe74IYdWXAi9EKwzwzpj5GmGFA2lbE9c4VgfmCpCyS5lFjbh4x2hOi3+A2j0iKB4SGK0LcmX/SE5AU9s3j9vaOaEaQcSCwtb7dblBK4fnpGbvf8I9//A3WDTifLxjHAeMwwdiAm7/i/eMdIOB0PgvaL/KMjGlC7uzjwpvM+QsB8zTJ0BrCNAlKc5qxS8PQvvKsxXXlCopzlmcVKmCQRLdtKYeV4mYDRZXphFS2xtKD35QLs3C1gxQKhBGMolMlPKgWun5WxLxJVGThz5jnXE6oJIeq+X2uIbTeSb5+hR/nZiKlFLuV7PZg23ekbYNPGnAn7MSWar1+YHl7Q9w8hnFgR+T+jhRWmGHiEEGhlJhSCCDirHcKG4xRQPLMWpQUQtIgspLw2Tk2TZKUDB6kLUhpxBBhxhOGX/5AUhpxXRG9hzs9Y3z9DX5dkIgR+DAOepiwXd9A+BuUcRjOL7BnjRT/hL99sIAMXF7zb19hvYc9PcPSM9bbDfv9B/bIBj54z623Brh/vOH69gOwM15+mzA4xQNPhpnRlJTLuxk7kfc88VBRyixHCpoAaAMznAQmrmDNxA08ACjKXALVtFYnwna/Ynn/ivXjDfv9XhK968LTjbQbML18wXDmngduKV6h3QAvcxR15Kw+BJIOSpimAcFzR14i7p4zFkh+RQKTaGwf7/DrHdPpBDdwcpi8hzN5wu+GdYn4fk+478xOdQ+EjbRAl89IBKzrinmeEWPkEV+RkELE/XbFjx/sHUzjBOccrtcP3O43seSWm+zGUaC7CsM4II8x37cdy7oghIDT6YTz5QJrDZbbDevf/y5ONYfIUQaaphhlGCkwDA7L/Y53pWCJ6pABIsVtrIpHDSWtikS32GQI6EGJmJXunMMfElwzmuxlrhLU0IL6cKALDarvnkuFjS+fwYng3jC0mqa09EZo6KQAzYfMZA45KGw+IekT6PkPKCIEbbBer7h9+4b9euOuq3lESAHXr39DXG5wT18wvv4GCNnCtizY7+9QiExvhoScZE3gQaGBiMk8RINS0pzd1wYxAhSJQTGBkCwQEuPg1aSx3RfA3uFOZxgSDj1wKdBME+y+Yvv4zk6bsbDjCXZ+Qgo7YvQAjYzI0xb7egfcDDNdMP3yB3z4H8D9zk0mMlN+0ApvX78iJIUvLzNefvsDz3/5q8BSeceJUPolsoeYMectXYNSHNMzkvsOv3xDWO9y/8zg46aZQVfaFEVthgmn8Yzh+TfMyxX3t6+4v31H9Cts2LHddly/f8Xt+3c8//HPmL/8iuF0YS9sXYSQQ8Eq9rD8tmG/X2GMwjzPTHCimXh12TxGpeFGJi7dfUQC4O83hOAxnZ941gCYo0JbjVEDF2JMPmLE19XjvkZsEYj3FdDM22gd5z6WZSlO6TRyyzzAFaGv1w+M44Tz+SI5LYVxHPH6+gX3+x1fv/6JYZrwxX2BcwOutxvWdWHOCKVwv98KviYPCjVa43b/wLatiCEyMYsx2HePKHDkp6cnKA1YagRLHCwRbIKKurwuNpspxFXW2FQErZfY1pq3CqEmC1WrVcS7aN9VqbrqNTOHPydAhblYAEN85/laVPABShEP6CAUeLPSqpQDlR6gZscIteiRtAfGE7SUDiMpbOuC7dvfsHz9O8Zf/sBMfEgJGuv9A8u3/4DVCS+//go3jgBk+KIP8CExd2FMJSsbPQOCrAPnDzx7BLCGaar0iGQmxAhs+x3b17/jjL9yrT0m0ak81lwNI5TfcPvzPwAQnv76P0G5EXq6sLDtnoE2wwkpBmz3K7B7BO+ZEGPS0PGOsPIgzbTzQFA7znj58guenp+FISrCziNiivDLldlw3CiNJpxojYnLVxqs9622UHZEuH3H8uNvWN++Yr/foIcJxnFsG07PsNOT/Fs47zb2dJRjuPHpy+/Q44ztfoWbueFsWzd8fPuK9X7D8/2Op7/8jmG+IPgALRWqGBlZ5/cNmw/AxjMex5nDMTs4hBARI8EkBqTt64b7skIrhdk6Jq9RBmbQQLamCZiRoJXDYABNHiom/Cljx3wkkLawbsQV6FrZh2HC+fyEceIejX1ngNbpfEEiwr4yPPvt/QPL/cZK2Vq8v73jdr9jX1dM84ynpwsGgWB///4N//Zv7xiGiUlf9x236xXv728Fy5Mo4X67cpLSDXh5ecEvv/zKOIBiMYm729gF0OgmABXaYhQOsoIYKI0ADdqvUQl51kAryNmQs+A3hcA2pMjC33kJzdTi/FqTP1CdI1E/X5vlDiVMCFSYCNAOZnrCbEaEC4N4EHfs2x3rHvH+/QPaa8zawc1PMHbAdnvH/v6Oy8szzHCGmSZQ2JDSylhyYU/2+y7xl4LfNoRthR0G7hRklglGFGoChEthvy8cQqx32POC6fLMFQVSUMrBk8G27EiBeLjI7Yrh9oH5y19hpicePLneECMxEAcEO00MzhLijRh3wDiM5yckumG5b7i8vOD5t9/w+vtfoe2A9XbjRq/hBAoLt93aEUpozDI5RyICBRkMCh6lFKOHv79j//jB8FvtkGmdUgzYPt4QI2E4v8AZx9dN3FNAYZeqDvekxClBGYPzMCGQwrLtuH4VDgu/4fLr77DjzJ12ced9FzCWNhYJjP/PJ0dbg8vra62LU8IwDvBhgvc79pCgE4c6PMuSuL5uFStgIkBz49ZpXHAZCH9+RHxfCG/bhsXvrKxl4GhKCc4tWJZbAewYYzCfLvD7jk2mN5cJQlrjdDrjx9tUfg8FvL5+wTgM2NYN27bidrvx/W4blvUORRrBc6PU/X5johGR2dN8wul0Kr0uts/Gt8a79gKUoZ9o6pTCvgI0BJxZdDOiSDWCneP07EmU19EplwobUEd/orTzUIYn5/Jer0K6z3avUw5JqqbIZcT8bqUtzGCg7ShgEC6Fqee/IJ3fcbtecfv3/8D4vGCYz6B9gzYD9PwEuBNgZx5wESJSWrhEBx5csq2rkDUQ1m2HDpHRYcZwcnK/Q/tK7KCHGbTckJTBdrty/7qbQIE5pCMsonJYb19hrEPSwP3HV66RT2fuZPMB6/UN2+0KlQIuv/6G0+kJemB+/P3bd7x//Qo9jBhOZzhnMYwW8zQibTfcf3DvxvTyuzSZOCgrrLzbCjuMUmkh7odPJIlZA1KEuLNLbqYzMExwcui1cTBukoYhBQKPK0taAcqBEDi3ogKXBkfuL9gVd6K+/tN/4alPMeH24xu+//f/G2FbML9+YRozowqpKElbth0HaQRigxC2HVpzZSHKKDujgdNphk+TnDiNlAAPRr6mECCYQU6ABs/dqlrDaILTmpOauWMvkRCchEIBFkKQJjMwb19MDMwKbCxKVYuAb1//URrvhnHA5fyE+/2K5X5lcJYwDCul2CNIhG1fsG5r0wK8IWgN50ZOCm4rnB3wt7//Rx0N1sbmEB486oQYJeimpJF0ahpuMvxWqMNKW+5RgFX16gk1hs9AAm6fYYe+AQo1Ilx/J2CI1NyaKokA9MqkS0Pm6kKTltQNKagMFRFaBq592wGkLYIyUF//jm1dsa4bvGcX04Bg3j4AGJyfXzGdZiRYhBix7x67D1jXDet9lSlClufQeR75bZSBVgxy0QPJ3EE5YElh33aQNlDLgpgUhtNZUGcR+x4RlQOSgk4aPhKW2zsGZZmFRzu4mfET+23Her/DPQWYcRDcvmG+/HXF0y+/YXx5hdERRnPXplYGyg3MyyhhQgxeGm5OUJEEIapAFITYk6Acd8Qlv0O7CdOXE1LwjBq0A8xwgnUnKDtwMtXviIHXQCmFsF7hr99YQAXpB2tghwkp8DW+/PN/YbKNf7NY3n/g+u0rUgwYn18Ydi2erHIWcQ+gEGGMDHAxhsdpbwtMdNDWFGou5xymaebKgw8IiWCgmAtAWUS/I4UNUArruuPPtw13n+CDQoTGaRygHXBdPW4bh1tBMPkxEzeIiIzDCLUxCCmLTB0C0iS1FFdX/L7jdrvCCi5gGpjleA8egJbBPbHMTszU5JQI+7ZxOCKcgDHKdODGu+6FTQL1kvhXjVA3/na1/e1k4SrlhJ6As5PJWudD5UnLJUXJ85c2hXplFL+jViao++aK/mc4QTv4vGqHjEQsnkvObGqUp2Jc+AuMG3H68iu25Y71fsV2fcf28Y7l7R3r9+94Hwc8//4HXv76zxhGCx8SlvsV94831trXG4zRGOcTYAz8usH7BUqx9UgxAHGBG5isMvkN274zeCUEwDIhJmnDHAykkJTFFggIO2Y3QCUF4wnK72wNlgVpXeHmJ4R9w3a/wd2ucEJi+vTlN1zfb3j//h0xRGwqYJpHJG0wnn/lOvd6hdmCzLZncg03GWgfobA3RyKUYaOJNGJiNiGlNMw0wZgBOnmolBD3FSlEnpAEDa0s1GCgvGch0QPIjNjWO7AJn990hru8QMvMgWG+4OX3fwFB4/v/+Dcs7z+wXK+IiYQ1yCFPzeFZfBF+9YyGPPMg2hQDT3HePayzQrOeYLXGIBOsb9cb1pVRlJAEKBHzFU6DwsvJIFwTlo2whIgtEDafsG4MOgqSMAUSVMxeM4RmjNl6eG6kLhU5Y3itjRZgkCT59m2TxJ8qfSyZPag58YyVaAaAAELDDkAnDdIKu/dVAWTrTI1QKrRmVJckXLHkWYCEILO+P8ODi4nn2j16p4Cte59orN7AEUYsykhaNrOLkKmeS+6gvJVwdEEeIcflqoDiXvDWk1Ci5DS4F946i2k+Mb58X7Hdr1g/3nH9+idu3/6B9fYO/Pl3YBhwenlG2Lkm/fH2htvtin3boJVCCBF2GJCUQvQBMW7M50dM36QAjKczVAzwe0SAhooJy21B8Ay+GaGhhIwlBIJfVkSK3HMvjDp2HDlReXuHMQ7QDikErLd3BGGIjSlhfn7Gsu6IyiDu3Gzz8utfQB/vXDmAhhUyjZQISnvpYbdcay+gHl5NzmbrUpkAlAxW4ffwEJANSu2FAzAlgh5O3DMw6ELQocwgfQUbwv2OmBTs+YnHrIPLbucvv4GI8xvb7QMhBuht5VmOhumvzOBgU8L9GuH3DTFFOKE2H89npBixLUtJDpIMHTHaYBgnJOx1OrBSSHJetGKCzvMkhDbRYNsD1hARMtuV1nCqHsnUmKsQpMmHmCczFjZgroowbVjDpJU9Y5FBn8vbWTmD70N7ZshSmQ2rs+8JSvHvCh9AtbAZ1NO434IMZtqs4pEUwcsKpI/5qbr3rH8AyhNhcs0+P0qWWUkCZmEm1J+RmU1EXWTapCO6kFrR7gW/u1f05clMJJmVGr9d9+pCKdgyYWXAPJ/hn15xev0F6+9/YH1/Q/I7zHgqJUBSA8gMSGQR4wbShHXdgHUTfLdB2D38tjHNloCKhmXlgSLvV8T9jnEakbZdWnhXFnA3wscEsgNIG86AY8E23qHuVwwyWYm0Q1JcC9ZuBBF3re3bAu89ti3Azk/cWOMAdznDuEn630XF+h0kZB7GTdDmDEoRYV9kSjQz79hxkj0EMyXpV2TiV4oe0a/M7x+jKKXIVQqlGeAzn6HMCJsShvNLwfSHfcO+3LnZJiT4tIuC1rDjjPn1VxkVNmIXFmBFgGGoG8gYGGcxThPWG2fTQ4xcPbLMael3j+V+B5kBOhH2PQgC1WE2Fsttwf22iFdDGEZXjBwLpoGxgB00jE/ybMyboJQt6FENCOkt81HqZuxXbvgxeeIQJUbGFkOtBUbPwy90bo3PoDKlpGuQpBdCpm4V466gk0IUHEc3HjxDZQtitspM0eit652FopQH5QYp5wFa1hIkgDhZxMM05SokXgWpTikc8wf1OihVgez2dBWHo58hmOr+taoWul6FroygUNVTLWDmtzCmgBs23Djh9PQM/+vviPsGJmSNCCMz3aj5Be70A8vHG3fH+Q3bcsO+3gvbEhOnMpusDxH+doOPPP7cb4ETgj4ipR1kViQouJERiVy7djBOA6MVMtEItXto8BDRGALMMDP5ZUxCaEr4eP/Ajx/vuC0RUVv8L//tf0bcVsTksd6uAMB1cwVQCnygpAegcNxJi6lKnCQjY6GUZZIKxfTfJCzQYb1jv9+40mFtET7jJpjpAu1ObCSg4PRFlLAM5QgMttqWG9brD/jtDoAHYGA6I+weuyhWaIO4bUxAkyLC7qGUgpsnKGehgkfYNuxClBIjhyNuBudEfISOBK8CrDWVxReJ3fp9R6QZozOwmnAaNZSZMMwabgw8JPa+YfExZ8YQvIcPfB/esxdFup4ypXjqVK5sJWLup9bAaYl5Y0ZYyjDeOqYclSovg+CacDaD6KIMOrDUyItqzXtrOctNsNtYo/2joBRV0iiRXhHk6FzV+dCs0XROQuoaDvRggVK6a3KVNXZvEpL98ORq7VtLfvz5+My9F5HLiShKoHlUaKUxuAmDG5Enw8QYEKYL3PyE6fk3XH69Yb+z0G/LFdvthuX6xkwzkSmz4r7AbytiArz3wB6gwHDdPSSQNdj9iqg0QkhwnpuiYCzsyUGphJg4ManWBX5dhabdwO93GCip9xNsAie0SOPjuuAfX99hhhl//ac/oOKO+8cHAMJ8OaNwQkQpC0sYFv0G45xYfW7pDYFHgIfomQ8/evjlA+S5hJVSAukBdnqBPT9zGdRYJvPQjq9TINOKSV1l/JUZubZuB+7LWD6+I/oFgBBfZlDSm+KuRZWPvEKCht937D5gmCeugmyEdeVpQ+PpLBOtNbRKzMQk3Z+RmNzz9v4GSgFWE8jyHjATsGJ+xcEhRQuvZpzNCTTsGL2HF4pwL0AdgLCuTBzLmBRmy8oDQ6AYp6ISoIwMuBEGYqW1xPF5IhbvjTEGFAT23niwPDuAS/qAFOeaCcW2t265xt6U4PqgvQpz4+7Xun0jnrkUVzAAjWhlF0b+nS1qFfLWUB+AQK2Oaq6tuu/IoQOVTx2v1Ql9Aziqz031d63akQpEISFp8hQc8xpYcGxs3YhhnDGfnnAOHt5vQnrKfAH7xm26Yd/g1zu8eAXL7QPr7Yrgd1DwUioSfP3oQNDwISDGG5NW7DuzFJ1OUNbxIPPAoJz7x5sw1Br41WO/r9jvV4yXDWqYsS4LIvH4bwXCer8zLDbsmOYJ1ihYZ7GvNxApGDeV0Ijbd0eAEo/yipnPQcN7A6McVBkdz5N83NMFg2FB5646BtGQ0iDtoYVmPeNDCJwgo+gBivXEKcBOF5CySGEHxQVGGQ7NphFxG5gZyQdpxiG4wUn+Zod1TCvOU5w9Ii2YzzOMAawz8J7zFARgvUX21JTCum5wlv1cdi6ZXlspBetGzGRh5wETOVw84bZtuK8bl4NzR2QM2LZFKgKsXEKMhSove7eZpixP8cmAq9SG16oZEWYN8xMWo4cmByC4A5enA7NCsTmzX8pvJSnXp8nKX8Q0yNWo1pCglvsbYc9muZHqJofXCSanICqwqGT5mw93Dn++VnO35VMZPHQQ0Kqf+qcrkUfnrRxXohkdhnKb9Qctmrr5Ti1z24wdMAyzTDzitliev8gzBDJppt9XjnV3CRNuHwjbguX9K3fTIU+GhdBhB6ap9gF0u8FaDTcMCJHYNU4MOVVSDvKJsO0B6b7A6QGRgHXdQNFDq4TbxxtM2nF+umCcmQNgub4jBo9hvmA4nWAHBr9opYHE8NIkKDkl7MBl0Itw/XP+x8AOM9fd9wXb+sE9CMRkGm4+M/UWpC3YOUYhbgsPC/UbKxMoJhEF5z32dUFYPhDWG8K+QsXA3HyXM7ZFY99WZhCm7AIzICgP6rQD9yHsu4cbBhjL7igTyAQstztACvPlDGMH7NHDuUnieSXYjgSrCdM4IGLAbM541gNuu8cSEiLxJB7vd+EB5PJgDFw6DSkihQgvhCIEThBWTks+T3lyN4TRh6vnRs77UKYQJUj5T/JoRjyrcZphrRXkZsg5gF5A21xAqZ4f39da46xBijL5JI7PFxOtlZpafzG41ApWTiTqRn+oKviCA2gtf1YmHbCnEVw+hO09HZ6nuYd6y0cMATql1CmMVqlRu0IomlopA0sONFQvIhG7kiHKoRAUVwg7M+Leb1g+3rDc3rDfPrDdPphsM/FEGAI4ybbvCB6FUWZLdwbAWAc3cvJPmwQ7MRHn7bbg/f0OBeDL6zOmacI4jJjnC5NQCLVYjB7D6YLx9CSYfV0TS7FOj0buYxc3NcTI3ZXBg8IKk2RM1TCCwgqEO9J2R9w9JzK3a8HcWzfy3EE7cKyrAOVcqUTExACfsG4I21Yn8gbPNGt+w6AJ+jTCDQbRTwgxYV1XbgIjQtg5welGHmJbGHV9HlJC2FaPbfOi4BIzARN4HgAl+JDELHF+I0bCMJ4wKoU4TDDTC54ETkzEoV2MBO837H7Dvu9c5SgDQ6JQ00eZKKSx775iZySjsG3c6eeck+eOCClI8pBpxbxnZGE7Wpy9UsaQGA1YUqoCgDotkI0xVWtaDneuldXwIQteX8Fri/9U3l9bfuWBSmytKtSAaikxYwiqsOdQRTf3XKnJ+TsytVNv+fNt5e/oS4PN26i18z0qkY6hBFU/pFUKx2pECYt0yYAA+SkswaYBaWhZlRlKnGc37usCv96x3q7Ybu9Yr2+4v33HvlyhKSHsNygK0NYieKYeUxSZ+cZwIouZhCy2bce63aGMxZdff8HpdMIvv/2Gy2XCdnsDGcsDPgcHO4ysqGLghJ1QSwOQeJ1KZyN8wJ74mDrnYIiZdLTRUCki+hWMjRQyVhLWZESkcEdYNyhtgTgi7Feo6YLh9MIISGUAbUHKMG/k/QN631lBjDPcNMHeByzvzFXIkODAcXpkXv1hZPq1XYhCvPeIC5NsjHYoJbcUI2Lw8D7ARx7CEUKENhFusDKnT55DKlspBGhSUO6EcZh5eO7ooKcLt0ZLbJ9SwrotDETyHt5Lb4ZUwEJgGLNzExIlaSturAyA++2KmFJhH/J+5/KfdAwSAOdcOe/B72z93VAGjYyjk7kAKtfzM+9YA6NpBLFNpuUmHDGsvYdQQoFsZRv10WQdS4+Bqi47US82SnEpq4YYnSr6tJ/gSFXWKaEcvuTSjcLxHaUsmB8uty3/9I863vNjCpEaBZDLNUoEqLxPM3sswRSlYt2IYThhnp8QgocPO8K+Yl8XbMsN2+0D+/0DYV0Q9hsz/vqVR3DH71BwMrGGy2nbfgcpjX0PuN8WeB/w6x9/xetvf8Hzl1fE9Z3ZltwAO4xQ0oduB4dhOokC4BZpHmuthbtey3ithOQDEtjSDVZhHEY4zfyEnNBilJodL7DTC5OySuccs/qcYOzItOlKw12+wLi5Vzbawg4Bdlyxryv8cuda/zBiurxAKYVVvKVtW+F3D5lEydDjRDztRzHTz7bvTDLiBmSGKUo8uNM6x3yQSrGQETMCkZLEaGKkXYKGtVMpzWmtZHqPgrIKkOEkAFNyJ+ErTCkVNiCITDCfIM8MPJ/PnEPRwolBCTdhEUpE2KQ1OBEE4Rck6bhzkpgIy3rH+fzMXZD7xpwEg6nNQIUZuLViVP+u+qci9LJ7XOv0Vb6Zybd6CdnmUfH7qVq63pwehE2sfRbWVrmUZNHRFa/Wu+Y2RLlQzR0URXFIAKbuO6iWB9v8Z3mM7I30iqBdS3V4TTeaSzXP1HoOlCsb0kmmNQvJkGak+YJ48Wzl/caz9taVSTjXm2D/3zCcv+D+9g0gmYOouYS37x63G7e7np+e8PzlC/7yL/+KwRCWuApWn6m7lVLCazfIBKSREXakYMwEI+XAFD2C9aAQkCInsBIRUgAoWdBgMTjHs1kpCX/BK6bzM6yMFRM8DNOIGcu5kG3jsAEAggdP8w0AMax3PM1cVr1/YP36DcYy1NoOA4Y4c5mSCEqv2Ladm7Iij9JkoRWjF5ksJlPcZXJOLc+fYgCMZljvvmPbOPHK8OYoHP5Mu5605YnYwwg7ToxcNA5aOBCMdQC4E9BaZu/JhiCEwJabCNu+AxgwTXPptMxzAfi1iH3fMI4jowETw7fZvZfR48Sh0VPiuQ5awjNrLQzT5jcKoDWBaIS5jYQl1leH+DlLVes2p+ZAK2EGKQa81ytVgLJ1bq9NqiYOdfudVeCbv1BIRdqQQ9SQKrXCQ1hT1ESXzuiFWBKcVAS2yz5UIS/Y5aoU9EEhZC/gmCuo+Y+swKqLohUz7xriBhY3TEKvzuU3JhzdcP6yYl9u2O9X3N/+xO37PxDWK5Ak2UgLzi8W0yngy+9/4I9/+RfM8wi/XGHGCc7wBF2QDKgYJgyni/AsWijrAOnhJyIg7JLMjIg+yXQbQCEialZg1jGxpc7VgGHi2ZLDKHGrUNAaJ0pmhIOFshtS2BGkYpKi5xyEhmS+GdZ78i/wyxXLj69w4wA7DtCKGL05cWNPlBg8j/jmlLoktRVb0pyriTHivmwYBwfnLO7LAljuBEwhQlFEDKnkPGIiKE2wtCG+/w1xusBNT2wcRfFaw1x8xtoC1rHGwjpb5IHAo/L8vvNIMzknUUqEeRiItczVCBDm+QRjDBOyRl+wONyc5eGGWQhnBO6sGCUYghdW4LYW3prZ4jThQViKSD0EukcBVcW61XJbyzAgCqUrJaIVxU4BdTyFjZ1XVC1omTFwiMWpxiXI9TwOeZpwouqc+owQi9wmESlbakieoa5bRkpkbfcQXny2Wvl2KVOwVZVUYLWFpQlQQq6qlWaufTthHE+Ip1Cox/2+4vTlNzz95Z+xvn+HX67w2x32+gGtNZ6/fMHrr7/AGiDsK8x4wXD+pUzdNQL1zTMX8zpozYqIErHS8Rwrx6SFWlIAU1rBOAs7uJo0BKC0dAICQjwqM+5iBGEDlIG2oTS2kAzd9OsH/HLjuZBCyJKk029+ekbYN0S/IfgFSrGrrmKQCVZJcAA8SDQkAcMo5mmEAmjbkZTGMAy431e8v73j+ekkrFDi/gsdnbIWMTElt5tYIYqmB2jH9vE3KOvgUoCJHvr8CmWHkoyDnBStef4FrymfxRg8AJ5PYa0VlCpX6Ha/c77AMFT4dDpjHCf2WFwokrXvKxP36DP3lgjYKVGAgoZ1A9Z1kcEgcjsVCJcTZ6q4+0ehL3z/2ZUVV1o1FrtXEE2loAh8ksmqD3YUnRqgprRGn4tR67ohW38RqMoFoB6kr04typZdvANCEerq6iuUxGNep+L8SCgkOlQ316RGA+TfcSxaFUNhWE4kY8tFmSnFsTPx+LEy+qwmb1i5agUNI57BiDTOPBL6dMF+fsb89AXb/Yqwr3gVfrhxZOrwuC2AAQiRueYNwcFw8s5Ju2+KLEjg+Dj6TcqXO1vVBHHVs+dFYqG53h/3hGBlsKxmHD/3kMjea8OlqpiQdk6QBZkuxKHOirBekTzzLuroeUy3VB/sMOL8+gtS2LG8fYWiIPo3SRfdhmVZsG47NyoR6hxII5yXSkGFAG0MN2BtG9bBlP58KCVVgWqdA3Em3hgtsxISklJQ0SNud+jhBD0Q1vdv3Ep8eYUZ6gDePA0paZRBOzxRmPc/JSYotYIFGOB4IIyQ1ZYGLcWZf0YBAs5ZQQjy2cgswEQcQjg3wGgN24rasTaeL9ySZxTUnarWqncL2PKVJj/koQb5eu1b9ScViFzeU7U3QQSflIgZPTjNIvyN4OR7axQE8jM1Atwm/crzU61qdCpJnrnkAMrz5XimKpgi9O178n0eMrr1qUV4Uipgj+w1MeKLcdyFRVnVey29GbJHWmkM2knsPsGNM4bTE/y+FfShT5EJSg0AA4TtA369Q93vsMMNbpwwzhdO0smB58TVxg01MYCoTpnJ7inA4BUtceieAOUsjBrhxhOmyzPm0wlOBq0qEM/tA5B8LontSPuG/fod6+2duwxltJeyjvkWg2dyFIi3MYxMmhI2GdEekLYde4i43Rbc7huX7TRxE1UUOvmYABVghwGOmEU3hAjnLPfPLzydSSdgWVZ8XFcACsPAMFzvA09P1lzqgzYgvcFsN5jpjBRmwDiEsIFuP+DSE5ygRnM9P/kAlKnN3KQVQ8AWA1twa0SONIaRWYG9Max8JU+ltIZx/Hkn07dCjHCO+QRX4aOwzsqEaAN7qPZXWaZ6mCrYVzXluCJPn1rkUrtr3G5VFEBVElSkKV+rEn1WDuEG/tPmGZqfW2qyOq/gUQEoATK1BcpWFDv9dNBrWQmVZqnmsy0UumsxbtemsBH1ydYasqDFQbH3Qu2kIw0SpiaZgNLc56HjKys7beDsCKMt3DAjRp5SxGzGQcpkAcZvUO4EM69Akik9IOzbziPRFMG5kd17ma1AkRNOIsIy85BZpnkiLfPWDyMnw4bTBefXX3B5fsE4jTBaEHWIUmb0CApIm8y7J76PjGU31sI4h6QMECNIcU8/BRnsCc4/pAQeD75H3NeI6xJw3wKTnloORcIe4DfPnd+aFZbfOBOfkvDoDw7eR+whQmluuFnWDdsWeWpUZO+M1h0xAdM0wBotSUo+j8lvSH6FsQPsMDGjUwwg61hg8zSgsEJBwgOtYJRBSFFa0XUx0Fp4E7UxUEIUGmWAioIqMyrzGLFBWKeMZiVuDGMCUkwYhxEWqj28NadfjrYqot8ctb52xudQlwP9IDwEzv7IIW6VQNPfWEKDLPi1YpgVRnX/i6v/UPJrvIMS6rdJwvb7clTwmMt4EKTDqzVhWhUC0FZTWqXYhzStoirXkkSUVkBSmZREnieBm60IAKkG/dDsA9UqTFvBKR2WWnNbs9KwdijeUpIOxBQ9xssLC1QW8BRBYQeQOLtNTAKqrYJVzJpkIyP+UiJYwfCXeXd2wHQ64/L8jMvljPl8YbbbaZRBGdIAkjzP6Mvj48OOuC1IwbPVH3gYBzQjCJIk7rLnEQIz/6QY4JcFe0zwAfAJTMduR+mIDIzr3yO2LeC+7CAQW28A27rDx4RxdKAE+Bjx42PF4nkNY4zSjMODayIYZhugmYdBhuRmMheluVksBg8jMGalOQmYEX4p0/KXjkAFo3leA6ALOUzmFGSFwF65ldkFXhqdIOPHldawErrk0IATjfw9KSWQ4uahZjRYb0dKwuYo9FkeOyWgSka7TYYdr6yOIUCRwtpLTo0b3WkBajLmuTc6f6Jk0hvPoTgWfRKzhBW186nJe/TuTc1zNCvQfa79LOd/+iCqry+0j/XpH5Xde1anhRqqTSZ24U9GodVooPzuobyqylBVjjR5+ISGAxlCZvtFLs/mMe9JkHwximVLQAoybopKCFIfmzMu2hiehTfPmE4nzKcTc9c7K+sk+LnEM+6ijI3PI+RD5H9HAmewQcyejMQwopjgPYOkgrAah33jVuPE7b8KCm4yGHyCvi4I9w3LuiF6z5ZdcgBm9/Ba4b7sGCNn25UxWLeIt9uO68ZezuA0NBGmkSsDRjNfQB7Zdb1v0lCkcMaA6UXKezHw9J5tg3YEO4yiqFNRmCkluU5AhEIQZiXKrbuOOzC5FFiHhlIiDAMPS+EqjChNa6VLE0wqouqYP04scvhmO+HOGe0mK96WsupBbn7f+sLlbY17Wi7dlON6Q8hqpxT0U/O7KuhZmIurr+pfvZpB6VVoTmUVwDaZ2Fr2nJzsXpO1abLgaJ6nxt7oZg903lIn+X1Y0gl+zkFoAEmVjq2Wm/EIMMrPVrEQtW7Sv6NRAt3itwJsyrMA4M7tfGCGJOXGWIAoqt1bld1TtojGGgzOwQ0DxnFk4g3nxGLxjSoJIVJMCDEh7IF5EXxCIIMIBw8mt4gRCD5Kz7vmBiBKpXkGFDneT7GEPQqAVgSjLOwYYcYZatgAHxE9K5YgScT7FhinH3g24O4TkIC324b3G7f0umHAPFowjzuvJcfrBqAcenD23rkBy54wBcCoAeM488BbKMmZQNz+PAVblZH0RMTdgzkn1ISzSinsu+c11sy45L3HMI7sgclUoawcNDhkyJOyQggl5+DcCOfycNCHmL5RC5+cuUYcmt83Fq9YrKNmSALGaS2VhAIlBm5S4+VQV6XQ+SmNnLfQWkmZlew/Fbhgze6X/3ROatae7M8YhtohpkUYi4Og+mu2SrHTAqokHPOqUHu9nFDN2AFjANKfKw1Zj9Z3YmXZVG06R6y9hzoCulWuZdm7hIgClNBOKdurE12Txy1FlTEa2hoY56DsUOC7EVraWiHPBSQYBFh4GIQEhAgE0ogkjMIxMqU6MYIPhNJDr4Xkg13ABOUcjHD9sQstU3Kdw3w+4TlGTKcTlnXDj+9v8OsbYuAyIc+W5MrMsgesHvj2vmEPCc4onJzGaXAY3MDJS0qI+w44JvUMXvgGxgHDYDA4ztRHPSJqB614aAqU4pJpSFIa5Q2y1vIgjxgxCWMwDwWRcyDnIyXOTfDsg9gdsyiEotoYGDnbPIyGkJublFISVjCjs23d3EdZb4KAVht051EhA2+qi6yqkBYBroxlLeAG+TA2STA0QgLQJ9/ZfXsnZ7VkJ+CbDArKjkPxQoQwoQh7Ff5Cc5bPf/l967Qc2YMa66p+dqe5ktIIX/tgisuKqqGAytd+SGoe94mATDxR9qT0OjR5F9RcSPWoHhOnJeHb7Fp5vXxFez2m9eIW1gRjErxPGALBWk6a1ZCJitLhmQMaEZYVQVIyUSnPfJQ8lDECheWnSALTNdaVtU9as5cg1aW0E3fZEYOGpnnEAM1jvsIbrgt3U2owhHZ0GoGA691j9YQ9EsbB4jQPeDrPmAYLZw2GwcKvC/y2wzkLZRSIGKWXXzOasH3/d5CyePnrf4WeZpm3y8nXPBOSqA7gjYFp4wnSYwECEkFbw4KtuJyXUZZlOrZSSFn4tYLfA9aYyvm0hnMIIURY5+A9Jx2N0bDoBOAYOv4E6NNiYakzF3XqT5kb2BwgOmqZehCyEugEnxTaSuCncnXQXG1UkhVS1oLUxgyN/5rfx4AMTlTW0L/nK1Dd19aEn2oURV/caynQawBVPRgRVnGE2hnwZZWOigCqWctDQrax5jU/U++mJVGvSUY5bBlLIe9kfEe/vtT98IlWbkI2LjXt1SswHCK0ejIn71IIiHtE8IQY+flIWyg9QkFyD6VBijsnU5SxdkqDtEFSCRGEqAiRNPaQsK4C/01MVHq9L/jzzzd8/faB71fOH5wGi9EpGLJYvMJ1CyAojNOI02Tx8nTC89MMZ+t8PsYzcHJNGQ3jOX/BUOiEfV15zNiP/8CHTEean15AYYVNADmG/Q7DwLIS69qxIAt+IgQJqQbOjSQe9GmtZQwBJaQAxMgeSAyA90KHDgYq7akqjmVduA3aaCg1wmYSyqKZP/UEGi+gPWC9GOJBW1C9ZolMqVcLVfA/UQBZaaAqE9Xd0SGp0GAS6sGV+Fo3FQO0Ap0xChmY0sfarWC30t9a/C42b5RAK4ztdzMisa9WPKZFVBUmeQNJCbO+86BuCN0a8/Kp/n2HrcuJ1EwhXVefmlxpDbyOeYaf6QF2V7klOFsi3eZJcmolH+LoubGGDKAsYAQcTAYED0q5dJngfRRKLe7oS9KpGEPiVttA8BEIUeYxePZGPm4r/vH3b/j7377hx/WO+xaEHDUhKULSAEJCSArzOOJ8HvDyNOPl6YzLZYYzlYPvfl+KARhGh017pJ1HnYMSot/ZrdcKtC/Yrz+QYsT09AytleQLtPQDGEQKMJbzJEGGg2hjSjJvSxsnLA1DerWuHmqSPFH2crVhwlgj5Kwh8DppbWB0hNLZdhPsMSvfhYxZfrsDngk3IDyAB1bf4uJSlr3ulLTOQ3NWQMdQoFytZpmLyLbZp4c4txUoPqaFFVXx91S3uLluPqDtbIEiiz0EuCQLP1MA3Z18kolXNVbvkpyfCBAvXV2w7D90Yv+wljnX0SZVC/awUQJt3qVNsPbXIrmX2gCWP/ZJovGzXEVzo1zsqUFFixLlyqEkIo3hxB4sCJb5+IMBkUdMGhEJkaKAj1LDtJOkOhBkHkPE5gm3u8f9esePtyvePm5Y9h2RiMlAwPiCRBp7YAU7jSMuTyf8/tsLfvvyjDln/QfH4YeQbcYQWYCd4xZmpXC73rHtHm5wcCfLsGqRB+t4iEfKZKCC2lvXDdpoJiyRUiqXAzn3su8M/3XOIUUtbMGxeAmpC+Ok9CcTk1OMsJkZ2RhEa7DvG6MWQ8hVAPx841S3i82hKw7kJwcBjxIu1+oMvEKnHNrv7RJkrQhV357/3VICdGF2/1SV6sx0oXgXw2v16Orn9+RrtknBT+OjT85+c290uHqVD+qWok+G1rxJ84j9+rc5hfazikAUO+Fu7zo1IKTu3uV+WqhzQVl2MQF1z/CzP6rkDNr3t5fh0EspmeCgLEjGl0M7QHmQDoAOUHqAVhsCFsS0IqSAmLh11vsIvxNCUPAB2PeEbQ/4uC54/1hBsLg8v8LNES8xiTtsAGgEEdKXywm//fKMP/7yBed5KGxHOYTSKuH05KCMxcfbD1DiMV7WshLYQoKPHnZK0KNhvsjBwVkDKA4doABjLIhS4SJk4Q9IFOHMgBQjluUOgGSK8FBAY5mPgIQ2XGktiD9OKHrPLcpaUKPaKOZrVArDOEpiUFiBy8hvqpJRSkmkDgfuJ6dcyDceT1I6bHi9Whnx17rArft9OEGdAOb/ZcF8yFwfFVvr3dSAuSiBg1Uv0bvqwx/g4BF89od6OjPqlNwxwkdJ2PVCfLSqUkHJbnMjczn3z/8W8WyUAH3iZZSqw3FNHu+uW8NWt1NJqvadlR2sunnvMfzq9qbzMiX0wgDSiQXfBCBFKK4JgswOZ2fArsAuICLvoc0ON0ToGAG3IZkZZGaQPWF4+hUAJwRrKVtq9YrZd4bB4jKP+PLyhNM0IKUg2XUSBuSAbdugAJwdz0dcblekFBiLbx2U3rHtEbc1wJ0BFwKcYBU0JRhr5Xyp0oPA7npeCyYJ1TKoIs+Q3PeVSUCc41p+SggQQlTiVmCjecw8e9qJQwky2DceR6atk1ZhTs3bciZLeujggqv8ahvLdlsrfzU2idr24U9seSucqpfolvOvEzzVvVrLcPm1Jhqg7n2PEQLaa+DRmlfXv8XcEXqpPxzwh8VpLXoVzk6VFovaC1X+oYKiivPddT3Wr0r1GvTopj+49k1Z9LgO5RxQX7KstiFzK7S7X936vOiPGYqqNI+eXbsqhYRVFL7WGmQMNI2SAAzQNkC7ADsEuImptkPwSIEJN5OAifzucfIe3u/4RXgAcp9Em/wFIBbcwFiD0Vo4pwV5muHN0lG4rVjvd87ERw9SA2Am+PWGhAXDEDHNCUoz088WIvS6snuvNUYCj3Z3Y0HpZUSj0YZDmyBswaQknodgBhh7YKyRcWOJeR8Bzo/4CJJSqNFcnTAKfO/yvFw61OVs2ZqAy5svkWbJq9V/9+e7JtyoFdzW+4RqCEBUcyJaRdBr/uYoQHJlxdKXg9T9Xe+9RTGWaLlLlh+szEGZHMOGPsmoijDVcUsP7/zUDe4FMzXvb7J2jZtUlUBCi4ykzz6HPlY/9j985rkpoGAkSl6hYVoqDiFL4kPc1oUTjf5uw5ReQdJhr9rr19NTy6xNl2VzXpQyMJaHg1oSi5wiY+FjlNo4g5aYVy8VngIOg+TajaLLT5wBNFq8j4y2y++klJCk+3E4czNU2HeZZDRjvd+hlyuSGUHKQbsdgWTsl9LC1qyhpCKQ9h3jNBUB3jZO8lnLCdAYAyjnARTH/EZza/C+7WVikDE5v8MwYUnaSVggsqANVJKRZwIWSom4LJgoJ99k2q90Z9WGldRvZpsAolb3twLXbDeh6357qJW3VplQhLgxCd1bHoS01St95N6e2fqK/I+6txG6Nt+DEgPlQen4T+PcXjib1xtcfxfjl7xAXcP+eiWIKN5ZF4s3gp8Vx2dNUMdeB8odjVAZYY2M/2RB7HmWc9tqueHUrnD1DotnoFrVUv26Y/6i3ZS+QNmuKGuBPjRUPPRTcWuxJgJc21TVIuhSLQF3e1J3DM3+ZK8nE3aU3wsIJ8YAO3Np0W8rzDTDTGfY04JhucFO73DTBff7ggADZQamT5/OMCPTmq33G4yQc4IIu+cKxzDwGLRcS88MQPvOg0iVQ4EMW8ut1s4xviIlac0mYIKCTxxq5O5CJ5RrrAM1SOjZLGX+K9nVev57a/rpuSeUen8+wOoAA6ZGsCu7TbURNa4/hgmP9rj22FGJE7MlfMw+1Di8F9Dsxh6URYbEUv9mUq2Q4acK4NNEWpego96QdlnBdn15L2rjUj28fV6QGsHP//HrqXwWZW2LvmnWtQh+drsP68TbwGuTKylFV2bPgB7Ftu2XAD5Ty43Vf/QNuktUB5D6F/JiaMVnjpqqQpu3ObhAnUptFWYHhGrWE70yiTHCugA7eNhhgp1OcPMZw7pgW85w0xnudMF4X7FuHhHcvKTsAKUdYuTZg5zRj1jjymPa9o3pvnKpVDr6tPBEJuIORVCSgaD8nw8RQer+RhtM0n2ZIo8O41Hkpq6wQqkyOOfycNBmy9vONtT9rdq+1fnIslh2jHKceCgdthvc1parVZcvyVOBP0kktTt51BmH8Lu5uYpIzO4IG6nm4LYly+aQtNfqAUrHO2qTdv2he1QM9ZmKUB6UQFUcjWvffrp5PR0QfPXgQtzrRnFk3akktFPHfeq9gPb1csNlTcQqK1ROhyYsPOqEOoum/vKoJto15+1qgM2EXlU0wn1IaZRnPXqb+ZzmNWwCVZSiq6qBcPb8FFXvQmkLEn4/Ywe4EBDGmecdjDPsdIKdz3DTDW5dsYfI04qFZi3TtAMy/UkpZirad+H2M6UEyIlpLtcl4kqDsQPcwN2UTHQSAADWOgzDAG0MQkgS0oiJ1UCQlmljdPV2tG3Ggx8Oc46T8kXyh/qMtmyc1LbVcUc+seL45JXOtc8gEarb8/CHDj/mA5k9kPKbfuR4sXOqsUEd2pAOl6dHeW8scmc5M4qOPrnJwx215o36ry6Jwtbpr1/dK4Qs+CmjczpP4hFoVPeQX9BNXqMAS5CTop/vVYUlp/7+P3nin+MjUBX8sUJxfI5P4M+dV3e8RlcC7b+4Og9tUqWx8Ie77AIDdp+gdG7a0izQhunJMs/hLsrAjScM6x2bF5YlQISSQ+0cZqREMMbi5fUEawVnkFuHJRdBAKZpLtN/jDHwfi/PMY4DjHWVbkyS2pxTYLainhCnGllLKZVf/PTPIYBrXWL18DbRnw9sP1SUSpN6kmv0YUMbF/Ynqz8Cn1nXvtxUExDUJyMaYpDGTW8UT73tz+rWVN+PXohbnoJHaLW44M1mdAf8wVPoXZz+u34CIEKvNtrI5pgUfcAySH9Em/Ssz5v5E/NvdLGUGdGX0IcXnz1J/aqDsei2+ieKoXVnuvc1m5bxC5C9OJQeuquWKkxVY7y2rWJvE6X9rWhteD+1FtQeTzOyboAbJrhxxCCDQICKtgQY+BRTLLRdTrL3hAQj55iZkrRQejFMPcUk1QoL8syOrJTG4Byss6W+z/enpauUaeHzemc2bkopewDtQawbnR+2c98eTl0jWCJkRTzbXB64rNF7ZZ3KqIciu3sPwijvVc1paU9XU7KqskX1+7ufG0Fp3PdPH+/44J+FBc0ClVdTY0HQrHMWsC7mOHgfxUU9eiWf/Cnfqw7vbbywPpPal1EVK2zdWI/mxo6L/Lg++V2tgDSC+Oktf2LZuz9FkzRqjx4/21ywu071yg5hTpN87e+lJho7X6BVeg2+A/l5FfcgFC9WK/EMmPhj8L40NbWcfGWaElTh8dcmy4gWglSuChhhBUopIrdd5zOktcbpxNUErRWUFaEnrhKUpKiuXgDTrxNIEWy1WqhSl//Sqsvif6qAmy1UYkKr5szewHGTDw0vDTQ2x2k/9aQ/s6qNEurc3CbibOPamoFvNNRBeAFVsPxdbE5VebXCegykPrU4+dlLiKW7r++8kf4ktxLRpGxLdFPc2ZzifBT+RvlmIW8FXpTAA8gpE7x23grVAw+U+zm2OX9m+fN69T8eEqI/WctPr/PgRT1m9LthMwpQmY31QRPgEC4dsojZQ2z0oyISzEReS6bcBph9KdoBKSMxiWG7DABKxSpnxVCrFoAaRKBVRiryPWhdmbeGgRmbnRtKzN96Vko1ho/y2pDMD0itB1A3uxop6lx+lWPlNobtNqep+Ze3NCtWTHrOGuXknLjiuebdbgo9bs6jmq//qIqA30jNPpfGnMaycM6gORndZdsscB9uFJfxP7NiP3XRe6+nirZqADo4XLtPl3XKU/VXBsQ5pyZ2ze4+UA5Fi3zskZBo76iGSYTm/vrkWs7A/8RR+gS41PxFAPeB1Ocu26ZUt+7HqKQtHZYkKDIQjQrrcrlAjuObE1OXsQ8VHt+RD1JzqcMDt+/VPLhAXHDbCSBIgDupuuLl++Q5OMsv+6A1dJHrXK6uZXdrbMnV1fORuS7qzWUFk8DzDBJUpQQrIkndv8rx6wB99MkuZ5JP+WcFmtTA+ii/fF3VKAE0sfpPlEBjder29weFFUuzieWUqE/E/OFB6u86l5O6DaqHoF7nIf79iRKo9xAlJhMhPLIh9T8+HMRaBs0Y9ZzMqt6UboS/IFtU5T2olr9pjjqgIonaPVRVh3deEBojXg96cUzQwsUO7jx97k0dah9F6PPv+lp+ebXuXefZFkvWhUpV1R3VwuH8icdWV1vutbi71SiUu8hK9ZOzoCmPAcvnqk/DtV2TZahMp5zbfapuC3XvyHuN4q0krg5CCXeApcNGFlhtTmkr2XDV5wYOKare2rb/bqKmVlQ6ZaAq8qut5+R3tpb68yakZiPygnY7nDPfCm0isFV0HSV4e7cEcF2+VUQSS6H/Uy1a/myvLPL/ciTZK430GGLRAYTVHI66sgQonhvwEN6KtT8Kdu8B6OL6V7e+n4lcY7g2dOoVbvmvFWiqnkhV2HkJKzLyqOQKKWynXVRjiOoZKevdKvz8euN4lkRobkh6SHo3OJWfYD1aI1Dc6Xw2UlV4KN+pege4OZKP5dcmfKJ2r/p8TXsrfUIza4Cjsqj/zkqpEMRowFZkGWvBsphFkOtEH8rjvdATdVSRaTa6bFSu6bdIu4MV7ZYm3ySqJUH1KNrDUkFKB6XwmYfy6Hu0W97gHRTa1uTWynQ9DvlKVLV91fJUzignLfvN6Cwymr1t3Nps4ktappj7Sn6WCzrt85VVbKxFvk4XH+YrHO+hWY/WehM1WYfG0uTPdfuT39Yo5R56LKclX+eQAK1OZbO2D3FDp04a4WvWj/q9yu+tMqK67+t/6s+LagxLBQ/VPc4Kr+abwHulmgmalCdWV0PbhhXl25rz8ZmayhtDh9+0f6tOllUTAtRnArR0AwKNtWk1q2plURKPqos/8oVlx9DVxlQeGd6LTe++12PbGOxeURyEK7+3Ammahz/seGsd+u89ehLVnWrhtp9mlz95PVtHVSweNa5y41J/Um7t77Eq0bL9GgDpEteUXAnl78yJunY9srIpX1w8nTYc6Faj/O8RBXcUui4ifHiez84lyfepx/epTy6ShQ2NIni8KEAoQ1Q6I1IfW+DO1I04q+tYf8pC/HCOFOcTqqLOyuAzQ1MtNjX5qBKF0CdnoAsz0P1cPanGA6fH1ej8WqpekJIu3XwuMvQ/ezC2W7wu8ZQPYONCUM4TKGHNaqbz5hVS+Yubw1Ve67VyOy2nIADzwSUIzFgB6dHZbr2N1qI8/GkZhVpF1QZN5RC0XY+NxSlK8Ji4a3+u66TagK4BRZW9bc9We53WRW+VRva4jhvdKcn6WJ8AnRv1dvheyot9cM9LbPqoCB/ErFjE1hvrFcTD1KlO6nUj7vVwZgVfLeFBFbfCT9RMbuoXS6HxeJoVUK1U5mVoPOJiLqg9Gc1DtJ5Ac58VyfrZs7bXzWt5dAEOhuaQGP6cJLaG7i2fZNvq33ov+T9bH6A+yM/y1yVhkg9ePnCNsFTwRW+9Pwllu9NU15FZhqr1fIwRP73LLj/xuFDdoaX+81UYDs5g81w5KdMe8J/+yYKca+FH69sclvy/R/VQVrv5Te/u53e1FQVAlYxx1w3Ylsy60llq9r1sYrU8zd9dONSEDg9rUt78eJL667TpuF74Ky7i6JVWkWpd7sMRrlDvov0UlHA99uuLhwR09zR0fIpHb5Pr/4AmVbkVk+pOWA4ru6VvCTHQqInifeW1p/J1HZauuytq3PwmFG3yytmbSqmudY8DaC3ycevUQVMRmnzaMZKvv+xtdU0yFTe+2nwUK6wS+mTd8Qi17gXVv8uePX72Uy+zuyx12vJnf3pLdrS1j1/a2vSHdSwHs6be6rEn5JFb3beVe2yEPyumh0SmPPOnsWT2GFrUYOvy500+2N0206j61+tBbRRnex8HC8v7X5vQ6nceRK4JI3rb3G3Mwc7kOrhufq7eUTkPOYQqjFF9MrB91DaayZ/VxTdQTQjYKKD8vJ8kHVVzjlV77qiRw4ZQh5We6s5db7Rk/1KTBJff1EG0kpgVL8Ee3a1jpjZT5GUcQMnI5zN8FNTWgDU/txvXo/I4gdpqthZQ9Fk/X04ytptdrKCq73sMCY42H/1hEyugGmn9T8T74bcd0OjwPcVSdWehjfn4Bgo+H6oIKEqo1SYI+WGr8KNJzNe4s7kEDovWWZt838fGou42qb1eVcStwJNCsTpoPaZWp7TJTaiqPMr18p6ic+n7EKQ/re0jdvtTwDHVoyJ5/cGTovypJow56NTPxsiV76ac98mlUmq+sV/7/EFVPOgcBDWHltKjd9V4d/1Z459jVjit4u6UOjoPwcamqaPEQkUZyVInDa2ZXDLX9cuUXtVbxZpgeqR5KhdHFZ6HDURvWQ8YwSaKq4fz0WFpPQ407+7TkV0VAL3O+nnM8vA18ljNJ7sqRvv9n18CAFpfot/sHFXW5+zBb8cnxKEidFSgjwo1rwWVw3EwCFlQ+0duvkteLXkI1e0JNWHcsaLVPsdRcVal2SiiLlzJD/r5qvZQ9Mc1r1doMhD08MuyDX10q+rvFTqrW3yaY07i6EHzXN9DaCq70TxWEdiSVO+FG837ivA3ivno1dUUB8GmmBVALov1yZGoeDorJY4tGV/clynKAEr0CgD55/bP4Qy2ntLPZK6PwHremS551LyrASU+Wr3mB+r+r6oVVUfF0x+MlsXm8fAVrXAQouP7G0RAthrdNQ4KtJRV+ys8SEBjUQ/R5YMP9Bm+IZ/kTnl1ic2D99N4g7kdvD5yFVWo7onL66n97u4xjs/18Ia6359Y2OqplJtGq+a7/AHEVquq/DJwLO+EVjksa06Fap61dZgUAelxzVvjk39JJfxs9+AxodrmdPJbWwNZcn9tHqAo9MZ7bjbWptS6fXJoSmyanzHK8AUCkbCMlrlQ6IAKPQ6ht2qfCswhOM/KJJ839bBN7cFSP7/Owxfj8z8K3U23KYXu29o1od66/OSiD//K60rN/4tIKABo7UEbFafmFB00+bEkdLCi9bvyS/xzRxpSr1gVZpPD6U3jQZs24QzVtHZ9A/Vvzy5xDR3KqT088wGRV77lWAprFZ28o1XQ+a3FO2nX6uBJNHH4555pfz8/8+7y/ITj/vf3ffh8I7h5H+mTPW5XKreBkzpY+aIAqFMGXZ+E7K+NefxSrjU2fk1FkEHqiawEcpZea86sKiioVNFVqskVqEaRlAP1WTBUtheNFLafOS4yOkmtcdhBMv8TwW/pCtvXO0OYf2o3tKlsPKYB6dMvU6pd/M7n4JBKusQqvrAqwW4dZE2PyZ9esR+F4nDQ8iFp77ez2KoCVsoaHbsKs8Acn/vgkrde+sNe9TkH1ZwRajLVrcdVLV4W3dbdP/Z5yv0pKutK1K9Mrz87FfXpTva7SsXq5ntD8+82F5OfpZzvsu7UrUtq1i8Dd7ppT/JXOiiHzsXP21AajKhcq3q3vFK2TIShA9wV2bJXFBmTF2hoAqAJBC1KgArs8SH6bpJYrX5nI5MPkCgOopIgKrz0ZR0Pli5funf8Ufn//783sEXalQP1M6UjX5hHeT2g+A5vfsB2NDrtCEYSfftpfZfK+mRrWe/tYb/ybT6oJDl4batBucZP7v+4Co3XU8K84z20teefLP4xLm33mQ731AJacuK3348aUuRr8zZV7onWCyjXVW0cfWwOqgqkOjcHhaGo7EsVzNZdV48/NfJb/t2V7NBhCko2Pz97+/ki9I9Jwvb8MpMxISKDf+pTZm/Ishsh/coHbZQFn4kiUOC8vNC6LKzWBC3lFioY83wYZJFUtiqyMUqVLGm7/NV9qt2HqjtjVbLqYMXGx6P23/j//ads1MPhrYrswXVvkl/twexINxsJqkqQ2qNZk3pd6N+43MdEz/EWP3O1u/f1Lmmvfqh7sav10+ETrSVqv/2ziz+sY+tpNK5p40llaW/r3wffsCii/EOrOB42s/m5i58J3Yi1Bxc+x/VtOJvvoQh/40FJDM/59M6xfwgYWOBz/0Aq76pc0bVMV6w2HWWDvzMPdWlzKnldC1VczJODqFOs+QksydDF2sRxeMDcqaYUtE7cTaS0cKUlQKPcpFK1J7lCTmuCJmUXoVn4rtLdxODVM/jsyKH4WeW3TTdiDmHVZ/C55ox2r+SuRDQXOHxzFwvmbKxCG5a3HjY6s384ENlS1Sw6+osUHZYVTfvo/bFqXczuIKC7me4hjp57sSx0/Hz/3VXHUlPq6reltWLlOq2JaoTzWM9uY//yueqm8Z+cEmmJYboNUnIesqJuLK+8rUNrth9XfZKVFQH129iuEUkuPz/4J2eh3bdSqz+47vnsEcm0psSAnVJ2B3UjwKpAtyonzzDgzyZhRO7OQr5XkR1beOqbG2hdk6wNk9IgUigzCaWtkBJB6YSUVOE8K/Pi2z7znCFuclwtKKPyDzRJMnoU/k54u9P/eeLtZ3+OnvXDrpcNfxQeVaxWVjrq08+2hpwOJ6I9jPkTXZKqvEE9CHwnteWtR8XdnuD/bBEO16XjUjSWro5+Lgm/Q7qm/0qiw/HsPYxsmfNv+/t/fMbjnnel6qL8m3v7z571QCSCw2ttUv64JdVK95eoeqH1oNrnPxrY+uxFiZAIfpKwHFVxdAav8QyKF5KSEH3wZ8t49Yd9r0lB2wt83tRKS01ywJUWdztBGEqJoY8K0MReARH3pGdqY1I8l7xq0WNiK69u+3qLoUbZjEfxrP/okIYlzEBnbVRzJX6fana0xkVHgtBjrI7Dr0VGH272M2+euv/VMVsH8GnzTCgucv/99NN/P8KZ0d5Jc2I/S4RR/++yfHyniiC8f60n13s13edar7KRyHrOjqUueX+vwaq2zrF9qzgkQV1ClCazW/gtDrL+GOXVvFNph+/utxqrR4Gu990qz2MPRfl/nzVs3HMR+pRDcWYJOlZrWsXayS7Va5RJRo0C7s4/UUk1/7/Kl8z5QK3cLwAAACV0RVh0ZGF0ZTpjcmVhdGUAMjAyNC0wNy0wOFQxMjo1MjowMSswMDowMEeWwgAAAAAldEVYdGRhdGU6bW9kaWZ5ADIwMjQtMDctMDhUMTI6NTI6MDErMDA6MDA2y3q8AAAAKHRFWHRkYXRlOnRpbWVzdGFtcAAyMDI0LTA3LTA4VDEyOjUyOjE2KzAwOjAwaNNlcwAAAABJRU5ErkJggg==";
nlohmann::json doc_json;
doc_json["image"] = kitten_image;
doc_json["name"] = "istanbul cat";
auto add_op = coll->add(doc_json.dump());
ASSERT_TRUE(add_op.ok());
doc_json["name"] = "british shorthair";
add_op = coll->add(doc_json.dump());
doc_json["name"] = "persian cat";
add_op = coll->add(doc_json.dump());
auto results = coll->search("istanbul", {"embedding"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
0, spp::sparse_hash_set<std::string>(), spp::sparse_hash_set<std::string>(), 10).get();
ASSERT_EQ(results["hits"].size(), 3);
ASSERT_EQ(results["hits"][0]["document"]["name"], "istanbul cat");
}
TEST_F(CollectionVectorTest, TestInvalidImage) {
auto schema_json =
R"({
"name": "Images",
"fields": [
{"name": "name", "type": "string"},
{"name": "image", "type": "image", "store": false},
{"name": "embedding", "type":"float[]", "embed":{"from": ["image"], "model_config": {"model_name": "ts/clip-vit-b-p32"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto coll = collection_create_op.get();
auto add_op = coll->add(R"({
"name": "teddy bear",
"image": "invalid"
})"_json.dump());
ASSERT_FALSE(add_op.ok());
ASSERT_EQ(add_op.error(), "Error while processing image");
}
TEST_F(CollectionVectorTest, TestCLIPTokenizerUnicode) {
auto schema_json =
R"({
"name": "Images",
"fields": [
{"name": "name", "type": "string"},
{"name": "image", "type": "image", "store": false},
{"name": "embedding", "type":"float[]", "embed":{"from": ["image"], "model_config": {"model_name": "ts/clip-vit-b-p32"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto coll = collection_create_op.get();
// test english
auto results = coll->search("dog", {"embedding"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
0, spp::sparse_hash_set<std::string>()).get();
// test chinese
results = coll->search("狗", {"embedding"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
0, spp::sparse_hash_set<std::string>()).get();
// test japanese
results = coll->search("犬", {"embedding"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
0, spp::sparse_hash_set<std::string>()).get();
// test korean
results = coll->search("개", {"embedding"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
0, spp::sparse_hash_set<std::string>()).get();
// test russian
results = coll->search("собака", {"embedding"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
0, spp::sparse_hash_set<std::string>()).get();
// test arabic
results = coll->search("كلب", {"embedding"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
0, spp::sparse_hash_set<std::string>()).get();
// test turkish
results = coll->search("kö", {"embedding"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
0, spp::sparse_hash_set<std::string>()).get();
results = coll->search("öğ", {"embedding"},
"", {}, {}, {2}, 10,
1, FREQUENCY, {true},
0, spp::sparse_hash_set<std::string>()).get();
}
TEST_F(CollectionVectorTest, Test0VectorDistance) {
auto schema_json =
R"({
"name": "colors",
"fields": [
{"name": "rgb", "type":"float[]", "num_dim": 3}
]
})"_json;
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto coll = collection_create_op.get();
auto add_op = coll->add(R"({
"rgb": [0.9, 0.9, 0.9]
})"_json.dump());
ASSERT_TRUE(add_op.ok());
auto results = coll->search("*", {}, "", {}, {}, {0}, 10, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7, fallback,
4, {off}, 32767, 32767, 2,
false, true, "rgb:([0.5, 0.5, 0.5])").get();
ASSERT_EQ(results["hits"].size(), 1);
ASSERT_EQ(results["hits"][0].count("vector_distance"), 1);
ASSERT_EQ(results["hits"][0]["vector_distance"], 0);
}
TEST_F(CollectionVectorTest, TestEmbeddingValues) {
auto schema_json =
R"({
"name": "test",
"fields": [
{"name": "name", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"], "model_config": {"model_name": "ts/all-MiniLM-L12-v2"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto coll = collection_create_op.get();
auto add_op = coll->add(R"({
"name": "Elskovsbarnet"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
std::vector<float> embeddings = add_op.get()["embedding"];
std::vector<float> normalized_embeddings(embeddings.size());
hnsw_index_t::normalize_vector(embeddings, normalized_embeddings);
ASSERT_EQ(embeddings.size(), 384);
std::vector<float> actual_values{-0.07409533113241196, -0.02963513322174549, -0.018120333552360535, 0.012058400548994541, -0.07219868153333664, -0.09295058250427246, 0.018390782177448273, 0.007814675569534302, 0.026419874280691147, 0.037965331226587296, 0.020393727347254753, -0.04090584069490433, 0.03194206580519676, 0.025205004960298538, 0.02059922367334366, 0.026202859356999397, 0.009739107452332973, 0.07967381179332733, -0.006712059490382671, -0.045936256647109985, -0.0280868299305439, -0.028282660990953445, 0.00617704214528203, -0.0756121575832367, -0.009177971631288528, -0.0016412553377449512, -0.040854115039110184, -0.007597113959491253, -0.03225032240152359, -0.015282290056347847, -0.013507066294550896, -0.11270778626203537, 0.12383124977350235, 0.09607065469026566, -0.106889508664608, 0.02146402932703495, 0.061281926929950714, -0.04245373234152794, -0.05668728053569794, 0.02623145468533039, 0.016187654808163643, 0.05603780969977379, 0.0119243822991848, -0.004412775859236717, 0.040246933698654175, 0.07487507909536362, -0.05067175254225731, 0.030055716633796692, 0.014153759926557541, -0.04411328583955765, -0.010018891654908657, -0.08593358099460602, 0.037568483501672745, -0.10012772679328918, 0.029019853100180626, 0.019645709544420242, -0.0639389306306839, 0.02652929536998272, 0.015299974009394646, 0.07286490499973297, 0.029529787600040436, -0.044351380318403244, -0.041604846715927124, 0.06385225802659988, -0.007908550091087818, -0.003856210969388485, -0.03855051472783089, -0.0023078585509210825, -0.04141264036297798, -0.05051504448056221, -0.018076501786708832, -0.017384130507707596, 0.024294942617416382, 0.12094006687402725, 0.01351782027631998, 0.08950492739677429, 0.027889391407370567, -0.03165547922253609, -0.017131352797150612, -0.022714827209711075, 0.048935145139694214, -0.012115311808884144, -0.0575471930205822, -0.019780246540904045, 0.052039679139852524, 0.00199871021322906, -0.010556189343333244, -0.0176922008395195, -0.01899656467139721, -0.005256693810224533, -0.06929342448711395, -0.01906348578631878, 0.10669232159852982, -0.0058551388792693615, 0.011760520748794079, 0.0066625443287193775, 0.0019288291223347187, -0.08495593070983887, 0.03902851417660713, 0.1967391073703766, 0.007772537413984537, -0.04112537205219269, 0.08704622834920883, 0.007129311095923185, -0.07165598124265671, -0.06986088305711746, -0.028463803231716156, -0.02357759326696396, 0.015329649671912193, -0.01065903902053833, -0.09958454966545105, 0.020069725811481476, -0.04014518857002258, -0.0660862997174263, -0.055922750383615494, -0.032036129385232925, 0.01381504163146019, -0.0673903375864029, -0.025027597323060036, 0.021608922630548477, -0.0620601624250412, 0.03505481034517288, -0.054973628371953964, -0.0021920157596468925, -0.01736101694405079, -0.1220683753490448, -0.07779566198587418, 0.0008724227664060891, -0.046745795756578445, 0.06985874474048615, -0.06745105981826782, 0.052744727581739426, 0.03683020919561386, -0.03435657545924187, -0.06987597048282623, 0.00887364149093628, -0.04392600059509277, -0.03942466899752617, -0.057737983763217926, -0.00721937557682395, 0.010713488794863224, 0.03875933587551117, 0.15718387067317963, 0.008935746736824512, -0.06421459466218948, 0.02290276437997818, 0.034633539617061615, -0.06684417277574539, 0.0005746493698097765, -0.028561286628246307, 0.07741032540798187, -0.016047099605202675, 0.07573956996202469, -0.07167335599660873, -0.0015375938965007663, -0.019324950873851776, -0.033263999968767166, 0.014723926782608032, -0.0691518783569336, -0.06772343814373016, 0.0042124162428081036, 0.07307381927967072, 0.03486260399222374, 0.04603007435798645, 0.07130003720521927, -0.02456359565258026, -0.006673890631645918, -0.02338244579732418, 0.011230859905481339, 0.019877653568983078, -0.03518665209412575, 0.0206899493932724, 0.05910487845540047, 0.019732976332306862, 0.04096956551074982, 0.07400382310152054, -0.03024907223880291, -0.015541939064860344, -0.008652037009596825, 0.0935826525092125, -0.049539074301719666, -0.04189642146229744, -0.07915540784597397, 0.030161747708916664, 0.05217037349939346, 0.008498051203787327, -0.02225595712661743, 0.041023027151823044, -0.008676717057824135, 0.03920895606279373, 0.042901333421468735, -0.0509256087243557, 0.03418148308992386, 0.10294827818870544, -0.007491919212043285, -0.04547177255153656, -0.0013863483909517527, -0.016816288232803345, 0.0057535297237336636, 0.04133246839046478, -0.014831697568297386, 0.1096695065498352, -0.02640458010137081, 0.05342832952737808, -0.10505645722150803, -0.069507896900177, -0.04607844352722168, 0.030713962391018867, -0.047581497579813004, 0.07578378170728683, 0.02707124687731266, 0.05470479652285576, 0.01324087381362915, 0.005669544450938702, 0.07757364213466644, -0.027681969106197357, 0.015634633600711823, 0.011706131510436535, -0.11028207093477249, -0.03370887413620949, 0.0342826321721077, 0.052396781742572784, -0.03439828380942345, -9.332131367059089e-33, -0.003496044548228383, -0.0012644683010876179, 0.007245716638863087, 0.08308663219213486, -0.12923602759838104, 0.01113795768469572, -0.015030942857265472, 0.01813196949660778, -0.08993704617023468, 0.056248947978019714, 0.10432837903499603, 0.008380789309740067, 0.08054981380701065, -0.0016472548013553023, 0.0940462201833725, -0.002078677760437131, -0.040112320333719254, -0.022219669073820114, -0.08358576893806458, -0.022520577535033226, 0.026831910014152527, 0.020184528082609177, -0.019914891570806503, 0.11616221070289612, -0.08901996910572052, -0.016575688496232033, 0.027953164651989937, 0.07949092239141464, -0.03504502400755882, -0.04410504922270775, -0.012492713518440723, -0.06611645221710205, -0.020088162273168564, -0.019216760993003845, 0.08393155038356781, 0.11951949447393417, 0.06375068426132202, -0.061182133853435516, -0.09066124260425568, -0.046286359429359436, 0.02162717469036579, -0.02759421616792679, -0.09041713923215866, 0.008177299052476883, -0.006156154442578554, -0.0033287708647549152, -0.004311972297728062, -0.01960325799882412, -0.08414454013109207, -0.0034149065613746643, 0.015856321901082993, -0.0005123159498907626, -0.027074772864580154, 0.03869790956377983, 0.050786130130290985, -0.028933823108673096, -0.07446572184562683, 0.022279445081949234, 0.012226884253323078, -0.01748575083911419, -0.055989284068346024, -0.011646092869341373, -0.0002180236770072952, 0.10100196301937103, 0.02999500371515751, -0.021314362064003944, -0.04096762463450432, 0.05568964406847954, -0.004973178263753653, 0.013144302181899548, 0.022288570180535316, 0.09443598240613937, 0.0018029726343229413, -0.09654559940099716, -0.01457826979458332, 0.04508035257458687, 0.06526371091604233, -0.03033633343875408, 0.009471519850194454, -0.11114948242902756, -0.046912480145692825, -0.10612039268016815, 0.11780810356140137, -0.026177652180194855, 0.0320870615541935, -0.015745604410767555, 0.06458097696304321, 0.048562128096818924, -0.034073326736688614, -0.03065350651741028, 0.06918460875749588, 0.06126512959599495, 0.0058005815371870995, -0.03808598220348358, 0.03678971901535988, 4.168464892362657e-32, -0.0452132411301136, 0.051136620342731476, -0.09363184124231339, -0.032540980726480484, 0.08147275447845459, 0.03507697954773903, 0.04584404081106186, -0.00924444105476141, -0.012075415812432766, 0.0541100800037384, -0.015797585248947144, 0.05510234460234642, -0.04699498042464256, -0.018956895917654037, -0.04772498831152916, 0.05756324902176857, -0.0827300101518631, 0.004980154801160097, 0.024522915482521057, -0.019712436944246292, 0.009034484624862671, -0.012837578542530537, 0.026660654693841934, 0.06716003268957138, -0.05956435948610306, 0.0010818272130563855, -0.018492311239242554, 0.034606318920850754, 0.04679758474230766, -0.020694732666015625, 0.06055215373635292, -0.04266247898340225, 0.008420216850936413, -0.02698715589940548, -0.028203830122947693, 0.029279250651597977, -0.010966592468321323, -0.03348863869905472, -0.07982659339904785, -0.03935334458947182, -0.02174490876495838, -0.04081539437174797, 0.049022793769836426, -0.01604332961142063, -0.0032012134324759245, 0.0893029123544693, -0.0230527613312006, 0.01536057610064745, 0.027288464829325676, -0.01401998195797205, -0.057258568704128265, -0.07299835979938507, 0.032278336584568024, 0.040280167013406754, 0.060383908450603485, -0.0012196602765470743, 0.02501964196562767, -0.03808143362402916, -0.08765897154808044, 0.047424230724573135, -0.04527046158909798, -0.015525433234870434, -0.02020418457686901, -0.06228169426321983};
for (int i = 0; i < 384; i++) {
EXPECT_NEAR(normalized_embeddings[i], actual_values[i], 0.00001);
}
}
TEST_F(CollectionVectorTest, InvalidMultiSearchConversation) {
auto schema_json =
R"({
"name": "test",
"fields": [
{"name": "name", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"], "model_config": {"model_name": "ts/all-MiniLM-L12-v2"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
if (std::getenv("api_key") == nullptr) {
LOG(INFO) << "Skipping test as api_key is not set.";
return;
}
auto api_key = std::string(std::getenv("api_key"));
auto conversation_model_config = R"({
"model_name": "openai/gpt-3.5-turbo",
"max_bytes": 1000,
"history_collection": "conversation_store"
})"_json;
conversation_model_config["api_key"] = api_key;
auto model_add_op = ConversationModelManager::add_model(conversation_model_config, "", true);
ASSERT_TRUE(model_add_op.ok());
std::string model_id = conversation_model_config["id"];
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
nlohmann::json search_body;
search_body["searches"] = nlohmann::json::array();
nlohmann::json search1;
search1["collection"] = "test";
search1["q"] = "dog";
search1["query_by"] = "embedding";
search_body["searches"].push_back(search1);
std::shared_ptr<http_req> req = std::make_shared<http_req>();
std::shared_ptr<http_res> res = std::make_shared<http_res>(nullptr);
req->params["conversation"] = "true";
req->params["conversation_model_id"] = model_id;
req->params["q"] = "cat";
req->body = search_body.dump();
nlohmann::json embedded_params;
req->embedded_params_vec.push_back(embedded_params);
post_multi_search(req, res);
auto res_json = nlohmann::json::parse(res->body);
ASSERT_EQ(res->status_code, 400);
ASSERT_EQ(res_json["message"], "`q` parameter cannot be used in POST body if `conversation` is enabled. Please set `q` as a query parameter in the request, instead of inside the POST body");
search_body["searches"][0].erase("q");
search_body["searches"][0]["conversation_model_id"] = model_id;
req->body = search_body.dump();
post_multi_search(req, res);
res_json = nlohmann::json::parse(res->body);
ASSERT_EQ(res->status_code, 400);
ASSERT_EQ(res_json["message"], "`conversation_model_id` cannot be used in POST body. Please set `conversation_model_id` as a query parameter in the request, instead of inside the POST body");
search_body["searches"][0].erase("conversation_model_id");
search_body["searches"][0]["conversation_id"] = "123";
req->body = search_body.dump();
post_multi_search(req, res);
res_json = nlohmann::json::parse(res->body);
ASSERT_EQ(res->status_code, 400);
ASSERT_EQ(res_json["message"], "`conversation_id` cannot be used in POST body. Please set `conversation_id` as a query parameter in the request, instead of inside the POST body");
search_body["searches"][0].erase("conversation_id");
search_body["searches"][0]["conversation"] = true;
req->body = search_body.dump();
post_multi_search(req, res);
res_json = nlohmann::json::parse(res->body);
ASSERT_EQ(res->status_code, 400);
ASSERT_EQ(res_json["message"], "`conversation` cannot be used in POST body. Please set `conversation` as a query parameter in the request, instead of inside the POST body");
}
TEST_F(CollectionVectorTest, TestMigratingConversationModel) {
auto conversation_model_config = R"({
"id": "0",
"model_name": "openai/gpt-3.5-turbo",
"max_bytes": 1000
})"_json;
if (std::getenv("api_key") == nullptr) {
LOG(INFO) << "Skipping test as api_key is not set.";
return;
}
auto api_key = std::string(std::getenv("api_key"));
conversation_model_config["api_key"] = api_key;
auto migrate_res = ConversationModelManager::migrate_model(conversation_model_config);
ASSERT_TRUE(migrate_res);
ASSERT_TRUE(conversation_model_config.count("history_collection") == 1);
auto collection = CollectionManager::get_instance().get_collection("conversation_store").get();
ASSERT_TRUE(collection != nullptr);
}
TEST_F(CollectionVectorTest, TestPartiallyUpdateConversationModel) {
auto schema_json =
R"({
"name": "Products",
"fields": [
{"name": "product_name", "type": "string", "infix": true},
{"name": "category", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["product_name", "category"], "model_config": {"model_name": "ts/e5-small"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
if (std::getenv("api_key") == nullptr) {
LOG(INFO) << "Skipping test as api_key is not set.";
return;
}
auto api_key = std::string(std::getenv("api_key"));
auto conversation_model_config = R"({
"model_name": "openai/gpt-3.5-turbo",
"max_bytes": 1000,
"history_collection": "conversation_store"
})"_json;
conversation_model_config["api_key"] = api_key;
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto coll = collection_create_op.get();
auto model_add_op = ConversationModelManager::add_model(conversation_model_config, "", true);
ASSERT_TRUE(model_add_op.ok());
std::string model_id = conversation_model_config["id"];
auto update_op = ConversationModelManager::update_model(model_id, R"({"max_bytes": 2000})"_json);
ASSERT_TRUE(update_op.ok());
auto updated_model = update_op.get();
ASSERT_EQ(updated_model["max_bytes"], 2000);
ASSERT_EQ(updated_model["history_collection"], "conversation_store");
ASSERT_EQ(updated_model["model_name"], "openai/gpt-3.5-turbo");
}
TEST_F(CollectionVectorTest, TestVectorQueryQs) {
auto schema_json =
R"({
"name": "test",
"fields": [
{"name": "name", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"], "model_config": {"model_name": "ts/all-MiniLM-L12-v2"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto coll = collection_create_op.get();
auto add_op = coll->add(R"({
"name": "Stark Industries"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
auto results = coll->search("*", {}, "", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, "embedding:([], queries:[superhero, company])");
ASSERT_TRUE(results.ok());
ASSERT_EQ(results.get()["hits"].size(), 1);
}
TEST_F(CollectionVectorTest, TestVectorQueryInvalidQs) {
auto schema_json =
R"({
"name": "test",
"fields": [
{"name": "name", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"], "model_config": {"model_name": "ts/all-MiniLM-L12-v2"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto coll = collection_create_op.get();
auto add_op = coll->add(R"({
"name": "Stark Industries"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
auto results = coll->search("*", {}, "", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, "embedding:([], queries:\"test\")");
ASSERT_FALSE(results.ok());
ASSERT_EQ(results.error(), "Malformed vector query string: "
"`queries` parameter must be a list of strings.");
results = coll->search("*", {}, "", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, "embedding:([], queries:11)");
ASSERT_FALSE(results.ok());
results = coll->search("*", {}, "", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, "embedding:([], queries:[superhero, company");
ASSERT_FALSE(results.ok());
results = coll->search("*", {}, "", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, "embedding:([], queries:[superhero, company)");
ASSERT_FALSE(results.ok());
ASSERT_EQ(results.error(), "Malformed vector query string: "
"`queries` parameter must be a list of strings.");
}
TEST_F(CollectionVectorTest, TestVectorQueryQsWithHybridSearch) {
auto schema_json =
R"({
"name": "test",
"fields": [
{"name": "name", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"], "model_config": {"model_name": "ts/all-MiniLM-L12-v2"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto coll = collection_create_op.get();
auto add_op = coll->add(R"({
"name": "Stark Industries"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
auto results = coll->search("stark", {"name"}, "", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, "embedding:([], queries:[superhero, company])");
ASSERT_TRUE(results.ok());
ASSERT_EQ(results.get()["hits"].size(), 1);
}
TEST_F(CollectionVectorTest, TestVectorQueryQsHybridSearchAlpha) {
auto schema_json =
R"({
"name": "test",
"fields": [
{"name": "name", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"], "model_config": {"model_name": "ts/all-MiniLM-L12-v2"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto coll = collection_create_op.get();
auto add_op = coll->add(R"({
"name": "Apple iPhone"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
add_op = coll->add(R"({
"name": "Samsung Galaxy"
})"_json.dump());
auto results = coll->search("apple", {"name"}, "", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, "embedding:([], queries:[samsung, phone])");
ASSERT_TRUE(results.ok());
ASSERT_EQ(results.get()["hits"].size(), 2);
ASSERT_EQ(results.get()["hits"][0]["document"]["name"], "Apple iPhone");
results = coll->search("apple", {"name"}, "", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, "embedding:([], queries:[samsung, phone], alpha:0.9)");
ASSERT_TRUE(results.ok());
ASSERT_EQ(results.get()["hits"].size(), 2);
ASSERT_EQ(results.get()["hits"][0]["document"]["name"], "Samsung Galaxy");
}
TEST_F(CollectionVectorTest, TestVectorQueryQsWeight) {
auto schema_json =
R"({
"name": "test",
"fields": [
{"name": "name", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"], "model_config": {"model_name": "ts/all-MiniLM-L12-v2"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto coll = collection_create_op.get();
auto add_op = coll->add(R"({
"name": "Apple iPhone"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
add_op = coll->add(R"({
"name": "Samsung Galaxy"
})"_json.dump());
auto results = coll->search("*", {}, "", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, "embedding:([], queries:[samsung, apple], query_weights:[0.1, 0.9])");
ASSERT_TRUE(results.ok());
ASSERT_EQ(results.get()["hits"].size(), 2);
ASSERT_EQ(results.get()["hits"][0]["document"]["name"], "Apple iPhone");
results = coll->search("*", {}, "", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, "embedding:([], queries:[samsung, apple], query_weights:[0.9, 0.1])");
ASSERT_TRUE(results.ok());
ASSERT_EQ(results.get()["hits"].size(), 2);
ASSERT_EQ(results.get()["hits"][0]["document"]["name"], "Samsung Galaxy");
}
TEST_F(CollectionVectorTest, TestVectorQueryQsWeightInvalid) {
auto schema_json =
R"({
"name": "test",
"fields": [
{"name": "name", "type": "string"},
{"name": "embedding", "type":"float[]", "embed":{"from": ["name"], "model_config": {"model_name": "ts/all-MiniLM-L12-v2"}}}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto coll = collection_create_op.get();
auto add_op = coll->add(R"({
"name": "Apple iPhone"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
add_op = coll->add(R"({
"name": "Samsung Galaxy"
})"_json.dump());
auto results = coll->search("*", {}, "", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, "embedding:([], queries:[samsung, apple], query_weights:[0.1, 0.9, 0.1])");
ASSERT_FALSE(results.ok());
ASSERT_EQ(results.error(), "Malformed vector query string: `queries` and `query_weights` must be of the same length.");
results = coll->search("*", {}, "", {}, {}, {0}, 20, 1, FREQUENCY, {true}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 5,
"", 10, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 6000 * 1000, 4, 7,
fallback,
4, {off}, 32767, 32767, 2,
false, true, "embedding:([], queries:[samsung, apple], query_weights:[0.4, 0.9])");
ASSERT_FALSE(results.ok());
ASSERT_EQ(results.error(), "Malformed vector query string: `query_weights` must sum to 1.0.");
}
TEST_F(CollectionVectorTest, TestInvalidVoiceQueryModel) {
nlohmann::json schema_json = R"({
"name": "test",
"fields": [
{"name": "name", "type": "string"}
],
"voice_query_model": {
"model_name": "invalid-model"
}
})"_json;
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_FALSE(collection_create_op.ok());
ASSERT_EQ("Unknown model namespace", collection_create_op.error());
schema_json = R"({
"name": "test",
"fields": [
{"name": "name", "type": "string"}
],
"voice_query_model": {
"model_name": "base.en"
}
})"_json;
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_FALSE(collection_create_op.ok());
ASSERT_EQ("Unknown model namespace", collection_create_op.error());
schema_json = R"({
"name": "test",
"fields": [
{"name": "name", "type": "string"}
],
"voice_query_model": "invalid"
})"_json;
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_FALSE(collection_create_op.ok());
ASSERT_EQ(collection_create_op.error(), "Parameter `voice_query_model` must be an object.");
schema_json = R"({
"name": "test",
"fields": [
{"name": "name", "type": "string"}
],
"voice_query_model": {
"model_name": 1
}
})"_json;
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_FALSE(collection_create_op.ok());
ASSERT_EQ("Parameter `voice_query_model.model_name` must be a non-empty string.", collection_create_op.error());
schema_json = R"({
"name": "test",
"fields": [
{"name": "name", "type": "string"}
],
"voice_query_model": {
"model_name": ""
}
})"_json;
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_FALSE(collection_create_op.ok());
ASSERT_EQ("Parameter `voice_query_model.model_name` must be a non-empty string.", collection_create_op.error());
}
TEST_F(CollectionVectorTest, TestVoiceQuery) {
nlohmann::json schema_json = R"({
"name": "test",
"fields": [
{"name": "name", "type": "string"}
],
"voice_query_model": {
"model_name": "ts/whisper/base.en"
}
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto coll = collection_create_op.get();
auto add_op = coll->add(R"({
"name": "Zara shirt"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
add_op = coll->add(R"({
"name": "Samsung Galaxy smartphone"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
auto results = coll->search("", {"name"}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 10000,
4, 7, fallback, 4, {off}, 100, 100, 2, 2, false, "", true, 0, max_score, 100, 0,
0, "exhaustive", 30000, 2, "", {}, {}, "right_to_left",
true, true, false, "", "", "", "UklGRrSFAABXQVZFZm10IBAAAAABAAEAgD4AAAB9AAACABAATElTVDIAAABJTkZPSU5BTRAAAABDYXlpcmJhc2kgU2suIDQASVNGVA0AAABMYXZmNjAuMy4xMDAAAGRhdGFWhQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD//wAAAAAAAAAAAQAAAAAAAAAAAP///////wEAAQAAAAAA//8AAAEAAAD///////8AAAAAAQAAAAAAAQABAP///////wAAAAD//wAAAQABAAAAAQD//////v///wAA//8AAAAAAAACAAEAAAAAAP7///8BAAEAAgAAAAAAAAABAAEA///+////AQACAAEA/////wEAAAABAAAAAAAAAAEAAQD/////AQAAAAAAAQAAAAEAAAD/////AAAAAP////////3/AAAAAP//AAD+/////v///wAAAAD///////////////8AAAAAAAABAAEAAQABAAEAAQABAAIAAQABAAAAAAABAAEAAQABAAEAAQABAAEAAAAAAAAAAAABAAEAAgACAAIAAQABAAAAAAABAAIAAwADAAIAAgACAAEAAAAAAAEAAQAAAAAA///+//7//v/+//7//v/+//7//v/+//7///////////////////8AAAAAAQABAAMABAAGAAgACQAJAAUAAQD//////v8AAAAA/f/3/+3/7P/q/+3/9f/7////AQD///r/9v/1//P/8P/v/+3/9P8BAAgAEAAUABAACQD+//j/9P/z//b/9P/z//j/AgAGAAEA9v/t/+z/7v/2//n/AAABAAMACgAFAAMAAAD2//j//P///wwAEgAWABQABgD3/+n/5//s//L//P8CAP//9//2//b//f///wUAEgAYABUAEQAMAAgABgADAAAAAAALABYAGgAWAA8ABAD///n/9P/y//L/+P/+//j/9//5//b//f8EAAQABAAIAA4AEgASAA8AFQAZABsAHQAaABkAFgASAAUA/P8AAAYAEwAcABsAFQANAAYABQABAPr/7f/q//D/9/8KABYADwANAAIA+P/7/wAABwARABgAHQAbABcAGgAbABcAFgAPAAkAAAAEAAwAFgAoACwAJwAbAAwAAAD2//n/AwD///v/+f/9/wYACwALAAcAAQD//wAAAAAFAAkACwAFAPz/+f/1//H/7P/i/+L/4f/g/+b/7//8/wAA8f/r/+D/4P/b/9H/xv/I/9P/6f/0//r/9//s//X/9v/y/+7/3P/U/9P/1P/f/+7/+P/8//f/7v/m/97/4P/i/+f/7//2//T/6f/i/+L/4v/q/+7/6f/n/+f/7v8AAA4AEwANAAEA9P/m/9z/2//a/+f/AAAHAAkABQD+/+3/5v/k/93/5v/s//j/DAAVACAAHwAbABoAFwAgACYALQA2ADoAOQA5ADoAOwA/AEMAQgA4ADUAMQAwADAAJgAlACUAJgAkACQAHgAfABoAHAAfAB0AIwAnACUAGwAXABQAFgAfABwAIwAbAAsABQDs/93/1f/Y/+P/4//c/93/2P/V/9b/0f/V/9r/4P/d/9//4v/i/+v/9//9//////8BAPr/9v/0//L/+P/8/wMACAADAAIA+P/2//3//f8EAAkABAAAAPv/+f/2//P/+f8CAAkACwAOAAwABAAJABgAJQAsACgAIAAXABAADwAcAC0ANgAwAC4AIgAaAA4ABQD5//H/7P/u//P/9/8AAP3//f8BAAoAGAAmACoAKAAeABkAEQAGAAQADAARAB8AKAAlADQAMQAmACAAFgAWABMADgAAAPf/8P/1/wEACAAVACAAHgAeAB0AGgAZABAADQD8//v/DQAlADQANQAdAP7/7f/c/97/5v/y/wAABgALAAIA4/++/6P/jf+U/6T/tv/I/87/2f/j/9n/0f/H/7j/uv+5/7f/wf+8/8D/yv/R/93/3v/b/9H/0P/Q/8//2f/g/+v/9v/3/+n/0P+7/7P/sf+z/8j/1P/d/93/zP+5/7H/uv/P/+L/8f/0/+n/2v/K/8r/x//T/9b/3v/p/9//2P/Y/9j/3v/i/93/1//a/9//8v////7/+f/7/wQAFQApAC4ANwAwACsANABCAE0ATQBGADIAGAALAAYABwATACIAJwAmABUA/f/w//H/AQAYADAAQgA5ACUAFgAVAB4AMABCAD0AOgAzACwAMgA9ADkAOAA+ADcANAA8ADwAOAA7AD8ANgA3ADMAMAAxACsALAAnAB0AGwAdABkADwALAAoABgAIAAoAFAAgACEAHgAcABUAFQAaABcAGQAgAB0AEgAOAAQAAQD///r//v/5//z/AwAKAAUABAD7/9n/xf+1/7P/vf/M/+z/9f/4//j/AAALABEAGAAWAAQA+v/u/+r/7v/1/wsAEgAXABIABgD2//L/+f8JAB8ALwA7ADkANAA1ACoAHgAZABMAHAAiACQAJQAuAD4ASgBSAFUARwA8ADIADQAAAAcABwAEAAMADQAUABMAGwAVAA4AEgANABUAEwAUABwAFgATABcAEAAKAAUABgAOABgAJQAuAD8APAA3ADEAHAAEAPH/5P/T/9j/2v/f/9v/zv/R/83/zv/H/8T/zv/E/6v/lv+J/4H/f/+Q/5v/sv+y/6D/mv+N/4z/kP+U/53/rP+x/7D/q/+j/5n/jP+A/3v/ev+B/4z/kf+W/5T/jP+Q/6D/pf+s/7v/yP/K/9D/0f/T/9X/2P/d/9//4v/m/+///v8PAA8ABwD///r/8f/w//f/9P/9/wUABwAQAAUA/P////f/8f/0//b/CwAlAEAAXQBcAE8ATAA4ADsAPgA7AEcATABHAFUAXQBNADoAJAAKAAEA+//w//j/AQAJAAkACQAGAP7/9P/w/+7/8P/w/+3/+f8EAAwAEwAIAPn/8P/y//P/9P/t//H/9f/z/wIAEQAIAAcABwD///n/8//6/wEABgD8//n/BQAMABQAGgAgAC0AOwA4AD0AOQBBAEwATgBVAEYATABVAFcAWABjAGYAZwBiAFIAQAAuACgAMAA4AEYATwBOAEkAOAAnABgAHAAaACUAJQA1AEYASABFAD0ANQAiAC0APgA/AEoASwBDADkAMAAnACsANgBIAFIARgAyAB4AEAAWACsAPAA6ADEAJQAXABEAHAAwAD0AQQA1ADIAKQAgABsAHQA1ADwAMQAwACkAJwAeAB0AEwARACIAKQArAB4AGwAlABwADwAYAB0AIAApACwAJAARAAAA+P/0/wEADwALAAMA8//d/8b/vv+5/6//u//A/8z/0v/Q/8f/sv+b/5T/lf+m/7T/qf+e/4//kv+b/5L/iv+K/4L/fP94/3j/bv9w/4H/iP+b/6X/tf+8/7//uv+d/5f/lP+a/53/qP+6/7b/yv/T/83/wP+v/6v/tv/A/8T/vv+7/8D/zf/V/87/3P/T/87/0//U/93/9P8DAAgAEgADAO7/4P/c/+T/8v/1//7/DwAXAB4ADQD5/+b/yv+//7b/s/+2/7T/vP/B/7T/p/+l/6T/tf/I/9T/1v/c/+b/4//6/wcADgAYABwAHgAQAAQA+//+/wMACgAbACAAHwALAAEA+v/w/+v/6//j/+7/BQD6/wIA/f/w/+//3P/c/+H/6//5/wEA/P////n//v8AAPX/8//v/+r/+P8FAAgACwAPAAcA+P/s/+//+P/5//z/AQAUABIACAARACIAJAApADIALwA9AEUASQBHAE0AWwBcAGIAaABpAGAAYgBtAIQAjQCaAKMAjgBxAFMAPAA3AD4ATQBoAHYAcgBjAFYAQQA6ADQAJgApADoATQBOAE4AVQBvAG8AZQBbAEYAOgAsADYAPgBIAFcAUgBmAGoAZABVAD4AMAAeABIADAAAAAkABwAFAP3/AAAKABAAEAALAAwADAAGAAgAFgAUABgAHAAfAB8AJwAvABwAGgAWABkAGwAkACEAEAAKAP3/5P/d/9v/1P/L/9X/6f/t//P/7//p/+//5f/c/+f/AAATAB4AFgAWABwAIgAkAC8APwBQAE8AOAAdAAgA///4//T/7f/n/+H/2//R/8v/xP/V/8v/wf+7/8L/zf/B/8b/0P/Y/9X/1v/P/8T/uv+u/6b/rv+p/57/qf+v/7f/wf+0/6n/l/+X/6H/nv+h/5v/qf+w/6r/vP/B/8X/x//D/87/x//H/8f/1v/l//D/8P/k/9b/0//M/9X/9/8JABYAFAANAAwAGAAhABkADQAeACgAKQAxACsAIAAoACMAHwAdABcACwAJAAYACwAYACYAIAAaABAACQALAAsAEQAQAAcA/v8CAAsAGgAZAAcA8//m/+f/7v///wIAAQD6/+r/3v/P/9H/1//B/7D/sf+x/6//sf+r/7P/tv+r/7X/tf+x/6v/pv+h/5j/pP+t/7P/tP+9/83/3P/g/9L/0//Q/9z/4v/2/wkADAAgAC8AMAAlACcAJgAZACIAMAAsADoATgBXAF8AVABMAEUAKwAmACQAJAAaAAwABQAFAAcA+P/l/+j/4v/e/9z/xv/Z/+H/8f8PAB0AJQAnAEQASwBZAGQAdgCDAIUAmwCTAJkAoAChAKkApQC+ANIA0wDZAM8AqACKAH8AZgBlAFMAOgA0AEUAUgBVAFoAPgBBAEAAJgAgABIAGAAjAB0AFQASAPf/6f/r/+D/2f/e/9T/y//L/8X/1f/c/9P/2P/R/7r/yv/P/9T/6f/w//j/BwAUABsAPABUAFgAWQBWAFYAUABHAEEANQAzAB4AEwAYABwAKAAzACcAIgAQAOz/0P+0/6n/rf+m/6r/rf+t/6//rP+r/7L/u/+x/77/xP+z/63/q/+j/6H/nf+Y/5v/l/+A/3j/ev9r/1z/Zf97/3v/ev91/27/cv9g/17/X/9r/27/ff+Y/6T/sv+v/8D/3v/n////EAASACcALQAqAC0ALgAnABMAFwAYABYABwAAAPP/6v/n/9H/xP+0/6r/pf+X/4X/hv+T/5f/jf+W/53/nv+g/6b/t//O/9n/4v/Z/+H/+P8CAPz/CwAJAAEAEAAMABYAKwAnADAANAA5ACwACgAGAAYAAQANAAUA9v/m/+L/3//i/+z/7f/r/+X/8f/2//n/4//i/+//6/8LABsAKQA/AFUAXQBfAF8AVQA+ADUAOwA6AEMAUwBYAE4ASwA/ADYAMAAlACkAIwAnADcALgA6AEAAQQBCAE0AWABWAG8AaQBbAFIASwBKAEMARABJAEgAMgAtACoAKgAvACwAGgAIAAQA/P/q////DAAcADAASABbAFEAWABGAEwAYABhAGoAdAB2AHYAewBxAG0AYABWAFcAQQAsACEAGQANAA0AEwAfACwAGgAHAAUAAwADAA4AEQAQAA8ADQAQAA8ACAAJAAAA9//q/+P/9//9//b/7/8GABcAIAArAC0AJAAcACcAMAA8AE4AWwBhAG8AagBkAGkAZQBXAEUAQgBBAEQASABJAEUANAAkAA8ACAAJAP3/8P/m/+P/5v/4//f/8f/r/+3/6//c/9v/5v/2//H/7v/v/9L/0//T/9H/5f/v//P/5//S/8j/uf+9/7T/sv+x/6f/rv+o/6D/n/+V/4P/gv+N/5n/qf+3/8T/wv+5/7j/pP+X/5P/n/+e/5P/mf+c/5n/lP+T/4v/hf9n/2H/V/9Q/1D/PP88/z//S/9V/1v/Z/9s/3X/df90/4D/iP+O/5H/mP+f/6//uP+4/8b/yv/J/8b/sv+w/7n/u//B/7//yP/N/9j/4v/q//H/9f/3//3/BQAHAAQA+f8EAAsABwACAAMA8v/e/8//xf/M/7v/tv+v/7L/n/+U/4r/ev9+/4D/ev97/5D/of+y/8L/1f/T/9T/0v/Y/+b/7/8JABUALQAxACoAQAAzADQAMwAqAC8AQwBSAFMAWgBXAFkAWgBZAGIAZABtAHEAfgCEAH8AfwB7AHsAbQB3AHAAYABoAGQAXwBfAFsAUABRAFIAQwBDADoARQBKAEoATgBTAFsAXQBxAHAAbgCFAIUAiACNAJAAmwCcAJ4AlgCKAJYAlACeAJoAmwCeAJwAnQCLAJYAhwB3AGgATwBTAFYAQQAgABoABwAFAAAA+/8KAAEAAAAAAPz/AAAMAA8ADwAZACEAKQAlACoAKgAvADMAOQA/ADQAJgAZAPr/6f/V/8z/yP/B/8L/xP+7/8D/y//J/8//3P/o//D/+v/5/wcADwAVABQAFwAkACIALAAtADAANQAtACoALQApADIANAAxADEAOwBJAFIAWgBcAFEARwBGAE0ATABYAFoAWgBcAE0AQwA/AEYARQBKAEoATgBDADYAKgAZABQADQD+/+//4v/b/9H/yf/J/8T/wf/N/7r/tv+m/5H/m/+d/6n/sf+6/7b/qv+b/4z/hv+N/5f/n/+h/6r/rv+Y/5L/mv+a/5T/kv+O/4b/gv+B/4L/eP9w/3D/Z/9d/2b/a/9u/3b/iP+C/5D/qP+x/8D/uv/A/83/0f/L/9X/3f/d/93/5P/r//H/8P/U/8v/zP/Q/8v/0v/Y/8z/xv/C/7z/r/+3/7b/sf+1/7b/v//E/8z/wf/I/9T/2v/k/+n/6f/5/wUAEgAiADQAOQAsACwAIAAjAC0AKQAeACMAIAAhAC4ALQAkABUAFAABAPX//v/+//r/9f/5/wIAFQAiACEAHAAcABYAFAAWACQAHwAWACMAIgAoAD8AQQBGAEQAPgAyAB8AJQAYABsAFAAVABcA+//9//j/7P/t//L/8P/n/9b/w/+3/6//ov+Y/5z/lP+X/6D/qv+o/5//kv+K/5j/nf+r/7r/0v/b/+n/5f/p//T/7//6//v/7P/q/+//7f/x//P/8v/w//L/+P/2//b///8JAA0AHQA0AEQAWgBnAG8AgACCAIUAhACQAKUAnwCqALoAqwCcAJ8AnQCVAJQAgQBsAHEAYwBaAFgAVQBUAE8ATgBCAC0AJwAkADAAQQA4ADMAOQA/ADkAOAAzADMAOgA1AD4ARQBEAEoAQAA7AC4AIQAmAB4AHQAvAEEAQABAAEsASAA8ADYALgAQAPf/9f/1/+7/6//h/9z/7f/s/+b/8//t/+f/7//5/w0AEQAZABAAGgAoACYAHwAgAB4AGwARABoAJAAlADEALgAbAAgA/f/2/+v/4//a/9X/2f/H/73/w//L/8j/xP/I/87/1v/X/9L/xf++/8L/tP+y/63/r/+x/7b/xv/P/8//yf/K/8z/2P/r/wEABQADABcAGgAnACsAKAAoACYAKwAuADAAKwAoACMAGgAOAAsA/P/l/+X/7P/y//L/9f/s/93/4f/m/93/4//j/9P/xf++/7v/tf+1/77/t/+2/7b/sf+0/7D/pv+b/5f/lf+R/5n/o/+b/5n/mv+j/6b/ov+d/6r/s/++/8r/4P/6//v//v8IAAAACQASAA8AAwARABMADQAZACYANAA7AC4AJwAoACcAJQAiABgAEAATAAsADAARABsAKQAYAA4AAwD4//X/5//p//L/7P/g/93/4//o//D/AAD9//f/+v/y/+3/4f/l/+7/5//+//z/9/8IAPj/9/8FAAkAFAAoACEAKAAnAB4AIwAhABcAAQABAP7///8GAA0AFgAdABsAHwAZABQAHAAfADQAMQAzAEIANQAtADUAMwA4ADQAKAAfABMADgAJABwAIAAaABoAHAAeACUAJwA3AC0AGQAWABEAEQAUADMANwAtACUAFwAYACAAHgAXAB8AFwAcACgAJwAvACwAKQAuADUAOwBAAE8ATABHAEgARQBCAD0ASgBTAFgAVABUAFMATABVAFIAQgAqACUAKgAVABAACQAQAAIA///2/+//8v/m//L//v8FAAkACQAHAPv/+P/2//z/AAAQACsAOQBGAEwATAA8ACwAFgAGAAUA/P/z/+T/2P/X/8T/u/+1/53/iv96/3T/d/9q/3L/ZP9Z/23/Zv9n/2//Yv9e/1n/Vf9k/2n/ev+F/4v/nf+j/67/sv+7/8D/1v/b/9j/4//k/+j/5P/s/+L/3v/f/9f/3//X/9r/4P/O/8r/y//I/83/0f/Y/9D/zf/M/8b/2//h/+b/8f/9/w8ADgAVACEAJAAvAC4AMQBBAEsASwBEADYAKQAmACkAFQARABcAGQAVABoAEQAWABwAEQAQAPX/7//y//j/+P/z/+v/6//v/+b/5//s/+f/2//R/9v/6f/p/+f/8//y/9//2P/F/7b/pP+h/6D/nf+f/5L/of+q/6f/qP+r/6n/r//H/9L/5v/z//f/+f8GAB4AIwAxAEAARgBEAEAAOwBEAEUASwBdAGwAYABeAGMAYgBjAG0AaQBhAHAAdwCBAH4AhwCCAIIAggB4AGwAZgBXAFAARQBJAFIAVQBeAE4ASQBEAEIAQQBKAEoAQgBLAD4APgBTAF8AVwBdAGAASwBJAEQALQAwADcAMwAxACwAOgA5ADAAHQAUABMABgDv/8f/tP+q/5r/lP+M/4r/g/92/3H/Y/9R/1T/Wv9g/3f/jf+d/6j/vv/b/+b/6P/o//P//f8KABQADAAPABMABQAMAAsAAwANABkAIQAVAB0AFAAWAB4AGwAfABQADQAOAB8AIgAiACYAHgAYABwAGAAZACkAJAAvAEUAUQBfAGsAewCOAIgAgQB7AHUAcwBrAHUAfgB5AHEAbQBsAFYARQA4ACwALgAnACoAHwARABAAAwAGAAYA//8KABUAHAAmADMAOQBCAEwASwBNAEMAOwBFAEgAPwA0ADcAIwAUAA4A/f/r/+j/4v/W/9f/0v/Z/+H/2//a/9b/zf+//6//uf+w/6H/rP+3/67/ov+N/3v/eP97/3f/d/9+/37/f/93/3n/gf+G/4z/hv+B/4//kP+j/6X/pv+m/6z/uP+7/77/t/+x/6j/pP+r/6H/mf+i/6H/oP+h/5z/m/+Q/5H/hv+H/4n/gP+H/4P/cP9z/4H/iP+c/6P/pv+2/8L/0P/a/+j/6f/h/+3/8f/+/wcABAAKAAgADAAbABkAJwAzACoALQBIAEQASAA+ACYALAApACoAHgAYAB8AGAAaABgAGAAXABgADQAEAAUACwAGAAkABwD8/+P/2//k/+n/9v/2//f////8//b//v8FABAAIAAvADIAOwA5AEMAWABhAGMAWgBhAGYAYwBuAG8AbQBkAGUAdgBqAG4AZABaAFcANQAmACoAIgAjACIAEgAKAAQA+//6/+7/6v/m/+L/5P/Y/97/3v/d/+f/5v/1//z/8v/4/wEA+v/2/wMADgAWABcAFQAgADYAQwBCAD4ARwBPAFMAUABWAFAATABMAE8ASgBqAIcAgwCMAIYAhQCBAI4AjQCBAHMAewB9AGUAcgB5AGsAZABpAHUAeABpAFkAVgBUAFIASABCAEIAOwBJAEEAPQBFAEAAPwBIAEwAQwBHAEEAQQBCACoAJAAeABsAFAD///3/8v/g/8//wf+//7r/q/+h/5T/iv+I/4n/hf+C/4r/hf+Q/43/ff98/2v/W/9g/2L/Yv9q/3v/gv+I/4T/iv+U/5f/mf+f/6P/n/+g/6P/rP+k/6X/rP+1/7r/rv+s/67/v//R/9X/4v/o/+v/8f/n/+f/5v/j/9r/y//Q/8H/xf/Q/8z/zv/N/8P/t/+9/73/v//F/8L/uf+o/6n/pv+b/6T/pv+j/5v/of+3/8T/zv/U/9//4f/b/9H/1f/l/+L/5f/o/+r/9v8LAAkAEgAwACwAKgAxADgAOQArACsAJAAkADIAQABFAEMASQBHAEIAPgA7ADEAOQBNAFQAVgBWAFcAXQBXAE8ARgBGAEIAMwA0ACoALQA4AEMASgBIADkAIwAXAC4AHQALAAoADgAYABsAHwAoAC4ANwA3ACwAKgAuACkAGQAkABkAFwAQAAwADgAJABoAIwAoACwAMAA6AEkATABJAF0AWgBiAGsAcQB6AHQAcwB3AHgAcgBuAGMAYABeAFsAVQA5AC4AFQADAOz/3//R/8X/vv+3/8L/v/+2/8P/xf/A/8T/sv+w/7X/uf+4/7z/u//C/8v/2P/W/8f/2P/X/9j/2v/U/9H/wf+s/6v/mP+Y/6X/r/+//7z/xP/F/8X/x//D/9P/6f8CAB0AHwApADcAVABpAGMAbQCBAJcAmACcAJ0AoACoAKoAoACXAJAAfQBwAGsAbQBjAGwAeQBxAG0AZwBlAFgAQgA6ABkAFgAcABgAFAAHAPr/7//v/+P/6P/k/+r/6v/i/+j/5f/h/9T/zv/I/8H/wf+0/6z/ov+U/5X/jP+R/5H/h/9+/4z/l/+P/5H/jf+M/4j/gf+G/4b/ff92/2b/c/+F/4//lv+T/5j/nP+g/6H/pP+b/5//pP+h/5v/n/+c/6H/ov+X/5n/k/+a/6L/mv+o/8T/1v/X/+r/6f/s/+z/AwAFAPv/BAD6/wgAGAAfAC0AMAA1ADkAJwAsADwAPwA7ADAAKwA3ADcANwA6ADkAMAAzAC4AKQAuAC4AHgAPAAUA6v/y//X/6//k/9j/vv+w/6z/o/+5/7f/tf+3/7j/x//I/87/4P/f/9n/3P/s//3/DgAVABoAJAApACUAIgAlACsANgAwACkAJgAmAB8AEgD+//b/+v/7//P/3f/i/+H/5f/w//L/7P/4//L/7/8AAAsAEgASABYAGgASAAQA9P/z//z/6P/j/9//4P/e/9X/0P/P/8//4f/o//T/CQAbABUAGwAnAC0AOQBQAGAAagB3AIMAmQCiAJ0AogCfAKUAoACbAKQAoQCnALAApQCdAIkAdQBxAHMAZQBZAFkAWABbAFgASQBUAFUARwBGAEAARwBRAE8AUABRAGEAaABoAGsAdwBqAFsAXABkAF4AUwBTAFcAWgBiAFoAWABsAGoAbABoAGEAYgBkAFwAXABUAFAARAA9ADYAKwAnACQAIQAXABIAEgAMAAUABQAEAAEA//8BAAkAEAARABoADwAIAPb/2v/O/8b/x/+2/7n/s/+j/6b/pP+f/5X/h/9+/3z/d/95/3f/bv9m/2H/Wv9O/1D/Pv85/0L/Pf87/z7/Sv9Y/2X/cv96/4b/jP+a/5r/nP+i/6P/nv+Z/43/n/+w/6n/qf+i/6P/rP+0/77/uv/B/8P/yf/b/+P/3f/a/+L/8P/9/wsAGAAlAC0APABMAGIAcQBcAFYAUQBUAFUAVQBdAFAATQBTAFUAWgBcAGgATQBDADoARABPADgANgAmACIAEAD7//X/2//Q/8T/vf+//7H/s/+s/6b/sv+t/6r/r/+d/5r/n/+Y/57/m/+U/5//p/+z/7z/v//G/8D/sf+2/6n/nf+k/6D/pP+r/6P/nP+d/5v/mv+Y/5n/l/+R/4X/kv+X/6j/wP/L/9D/2v/3/wMADgAZABcAJwAxADgAOgBDAEQATwBOAEIAQgA+AEIAOQA4AEUARgBKAEsARABFAEUASABeAGsAewCVAJEApACrALUAwgC3ALkAuwC3AKkAogCbAI8AmQCSAI4AjACAAH8AeAB3AG8AagBqAGAAWwBVAE4ARgBDACgAKgAdABUAIAAVACAAIgAjACkAJgAeABwADwD8//f/6P/i/+3/5f/h/97/5v/o/+X/3v/p/+X/4v/n/9//0v/R/9D/0v/P/9j/3v/R/9v/0//V/97/2f/g/+z/9/8EAP//CwAOAAYABgAHAAgADQAVABgAFQAfAB4AIgAfABIADwATABUACwAHAAUABgAEAAYA/v/7//3///////7//P8AAAAAAQDt//L/+f/6/woAGQAXACkAHwAdADMAJAAcAAwADAALAAEAFgAWABoAHgAhABkAEAAOAB4AJQAtADQAJQAZABsAGwAgACoALQAwADsAPwA6ACwAKQAxACAA+P/t/93/2f/Y/9L/vv+w/6j/q/+b/5f/pP+t/67/of+g/5//n/+W/4//if+H/37/dv96/4v/kf+T/4//g/+L/3//ev95/3X/bf9v/1//Xv9v/3T/gf+K/5T/nv+g/5//l/+m/6//xP/B/7r/wv+w/7v/w/+4/63/pv+j/5z/mv+U/5X/mf+x/7//uv+x/67/uP/B/8r/1f/Z/+r/9f/2/wQACwAEAPX/7P/v/+z/8//y//X/BwARABQAFAAZABwAHwAtACkALwA7AD8AQQA7ADsARABJAEQAWABaAFcAWwBWAEwASwBMAEUATABGADAAJAAfABEAFwAUAAcACgAGAAkADAAMAAcADgAJAAQAEwAVABAAGAAaACMAKgAmACcANAA1ADkASQBaAG0AaABhAFkAVABVAEwAVABcAFkAUgBLAEIAQAA5ADYANgAtABoAEgAVABEAIgAlACIAIAAnABwAGwAuADMANQA+ADoAMgAvACkANQAvACsAPQBFAFMAVgBRAFQAXwBhAGUAdAB5AIQAigCLAIgAlQCJAIMAfABoAF8AWgBWAFsAVQBZAGUAXgBqAGEAXgBnAFoAWABPAFAAYQBfAF8AYABlAGAAUgBDADkAJQAXABgAGgApACwANAA7ADYAIQATAP3/5f/b/8b/w//A/7n/uv+t/6P/mf+B/3L/W/9M/07/Qf8u/y3/Nv86/0H/Sv9T/2H/d/9+/33/hv99/4f/ff97/43/ff98/3j/e/+L/33/hP96/2j/Zf9V/0L/Sf9M/0r/W/9g/1r/cP97/3b/b/9r/27/gf+O/53/sv++/9L/5f/z//P/9v/0//j/AwAHABQAIQAcAAsAAwD9//P/8v/u/+P/5f/s/+v/7v/v/+r/6P/Z/8r/vv+1/7n/xP/P/87/0f/q/wIACQAEAP7/6f/o//L//v8QAB8ALgAvACoAMgAuACIAFwAKAAYADgAOAA0AFQAeABMAHAAdAAgABgD1/93/2v/Z/9n/5f/o/wEADwAYACAAGwAYABEAGgAeABgAKQAlADAAQQA9AFEAVwBgAGUAZQBtAFkAPwAqAB4AGwANAAsAFgAUABAABAD0/+3/6f/d/8//zP/K/8v/2v/h/+j/8//2/wQABgAFAPn/9v/v/+3/8v/5//3/FgAhACYAOgA0ADUAOAAxACcAFAASAA0ABwANAA0AHwAmAC4AOgA5ADIAMgA0ADYAOQBIAEkASABJAEUAXABRAEYATwBYAG0AdAB1AHoAhACFAH0AdgB3AIAAdgBuAGcAYgBZAFIATwBHAFUAYwBmAGMAZQBuAGMAWwBZAGQAaABSAD0AQwBCAEAAOgA2ADEANgAwACcAEgALAAIABAAMABIAHAApACcAHQAIAA4AEAAQAAkACQANACMAIwAfACcAMQA2ACQAGQASAPb/6f/o/+b/5P/f/8T/tf+u/7D/sf+o/6P/hv+C/3D/X/9U/0X/Sf9O/1D/Xf9p/3D/c/9+/5D/n/+q/7X/uP+0/77/v//A/8r/2P/d/9z/5f/l/+X/5//o/+r/8v/n//D/+f8IAA4ACQANAAkA8//x//j/AQAFAAcAEAAYABAABgD8//v/8//z/w4AEwAXABsAHwArACMAGQATAB8AHwAXABUAHgAhACcAHQAaAAYA/f/3//H/9P/t//b/5f/a/8j/xf++/7P/sv+k/4//gf+D/4T/j/+N/5D/iv+S/5D/iv+S/6D/nf+e/6H/n/+c/5v/rP+v/8T/1P/Z/+b/6f/m/+3/9f/4/wgACQARABQAHAAXABwAGQAKAAMACAAFAAcAAAD+//3/9//7/+P/1f/R/8T/wP/I/8z/1v/T/9b/4f/p/+7/7v/l/+b/6/8BABEAFgAfACMALgA6AEYASwBPAE8AUgBFAEcAVgBgAFsAVwBSAFMATAA/AFIAPQAyACYAJQATACIALAAmACEAIQAgABgAHAAcAB4AEwAPABMAHAAbABcADwALABQAGAAhACwAKwA2ADwAQwBKAEkATABKAFcAUwBQAEwATQBUAEwATABHAFAAZABzAH4AegB0AGsAYgBNAEQASgBFADEAPQBAAC8AJAA1AEAAQwBNAEUANQA5ADwAPwBBAD4ANQA1ADYALQAjACYAHgAcACgAIwAVABEADAD7/+v/3v/f/8r/v//A/77/0f/I/8X/w//B/8P/u/+0/7v/vf+v/6T/qP+t/7H/pv+b/57/oP+l/6H/m/+X/6X/v/+6/7v/uP+z/7P/tP+v/7b/u/++/8H/uv+3/7n/uf+2/7j/tP+8/9D/2f/Y/93/4v/h/+P/5P/0//D/+v/2/+//7f/3/wkA+v8DAP//7//e/93//f8IAAQAEQAAAAAAEgAYAB4ADQAIAAEA8P/i/+j/5P/3//f/8v8HAAkAEgAWAAoADwANAAsABgAFAA8ABwABAPv/8//0/+f/5//o/9H/2v/V/8//1//G/8H/vv+6/7L/sP+2/6r/m/+N/4n/nv+k/6n/sv+1/8//3P/d/+T/9v/7/wIAEQAIABIAKgArADIAMwA/AEQATwBsAHYAeABsAGwAZwBTAEQASgBYAF4AWgBjAGoAbABsAFsATQBUAF8AfgCRAI0AiACWAJUAngCeAIUAeQBiAFEARQBEADwANgA7AEYAQgBBAD4ANgBKAEUASgBMADUAMAAtAB4AHgAiACgAKwAiABwAGQALAPP/2//I/7j/uf+5/7z/w/+y/63/qf+Z/5r/nf+f/6b/sf+3/9r/8P/u//7/EgAbABQAIAAnABoAFAAQAAsA7//g/9n/2f/f/9z/4v/b/8D/vv/K/8j/zP/T/9v/2v/t//b/9P/s/+3/8//w/+7/9v8FABMACwAIAA0ABAAAAAYACAAXABMACwDu/97/4v/S/8P/u/+z/6j/of+b/6T/pP+y/9L/2v/j//D/AwAFAPH/8v/2//f/7f/1//T/7f/t/+P/6v/h/+H/5P/t/wMAEAAhACoAJwAfABwAHgAYAAsAFwAmACEAHgAnABsAGgAZAB0AGgATABcABwAKAAcA/P8EAAQACAD//+v/xv/I/8//xf/P/97/5f/p/+n/2v/T/9z/0f/M/8X/v//B/7v/tf+r/6j/sP+t/6v/xP/P/+X/9//1//X/CAASABIAHgAnACYAHgAOAP7/7P/s//X/AAADAAkA/v/r/93/yP/P/8f/yv/A/87/9f8HAA0AFAALAP7//v/1/+7/8P/u/+//9//5//b/4f/Y/9f/2f/K/9L/x/+p/7n/xP/T/+H/8P/6//X/BAAPAAsABgAMABcAGAAqAEgATwA0ACwAJwAVACAAIAAVACkAJgAiADEALwAZABkAHgAYACAANwBNAFgAZQBXADUAKQAgABQACgApAB4AHwArACkAGgAUAPf/0P/Q/9X/9f/4//v/DwAZAA0AEgAqAB0AMwBHAD0AOQAlABQARQBCAEoAXwAwAEcARAD//wwAHgD1/8//9f/P/9X/8P/J/7j/0P/s/7f/+P/b/87/+P/7/wQA5v+s/57/uf/b/+z/IgAFAAwAMQD//w0AdgBdAGAAqQBmAFYAWgCbANMAzQDHAH8AVACHAGQAQACQAJ4AdACzAJMAsACJAJAAawA8AJcAIQB0ALgANwA4AFwA/f8+AKsAGABuAMsAUQAnAH0AeAAeAHkAVgDf/z0AFwDb/zsAigAEAIn/2f/c/5b/BQAcAK3/IABTANf/JQDV/4D/if/k/+n/zv/3/wYA/v/2/8T/eP8U/2r/a/9J/7b/fP88/xf/Xf9R/zX/af+E/yf/Iv+m/zP/Vv+u/5L/nf9e/6T/Zf8Y/7f/fv99/+3/pP+5/1f/Qv/u/1b/Pv9cALL/VP88AMT/vf+x/+f+gv8O/xEA0/9j/6QAwv8T/4j/sv+C/2v/GAB7/1z/pADi/ysAvQBQAEz/Kv8wAHT/z/+SAHUA//8iAO3/x/8CAAkAD/9YADgAdf+DAJX/uP9cAMX/bv9UALv//f/CADcAeADp/xAAgwCb/xwAGP/z/2YAlP9UAN0AxP8FAKz/RP+6AKIAuQCQAPoAAQBSAKYASQCxALwAagCs/4wAXgAhANQBfQAhAIcBYv8mAIYAGgDVABsAjgAJAOz+SQC7/8D/lgAHAMH/JACtAGz/uP/ZADkAKv+E/wQA1P/DADwAqwAfAP//5gDC/78AEAHmAJH/uQCeAH3/kgEtAMT/BwAFAA4AOf9KADP/GAD+ABT/eQC2/2P/AQAT/xMAMQClABMA+f/RAPP+mgDh/1v/swDo/+D/qwCZ/5D/rQDQ/1X/DQBnAFr/3f/TAIX+4f+jAV3+9QAPAbX9mQA1AZL/2gAoAb0AGwBAASoAVABSAVX/7gBG/9D/3wA0/9r/KQBd/3j/K/+EAG3+GQBUAT7/2v/M/wAAdP7GAHEAZf8LAh3/igEtAPf+WQFPAPX/KgGYAVz/VQH2AYj/kf9JAnn+LQFnAJX+YwAqAHAAcv9vABX+VQB0/xH+VACQ/6D+lP+U/0b/YP8V/nQADf8h/zIASv+cANT++P+l/4L/QgDt/5T+hwBFAAT/M/+ZAJYAMP5AAaf/Ff4RAvH9qP+rAIv+uP80AYX9GwHK/7L9dgFC/57/+P71/3n/uv+pAH0Axf0nARsBUfx+Ahr+8P7bAav/KABnAP3/RgDH/1j/GAI0/tcBcwAo/wsASgE+/lIAHgF5/yL/DwBTArT93AFSAoD9NQMOANH9yQEIAWj/2wDaALYAbAAwAJgBUP8MArMAlv5iAqP/9f4VAkL/sAB2/6b+9gHU/x//uQBV/zYAywAX/5AAx/9+/wYBHf8rAXUAMv+T/2oABP8SAIv/vACs/0z/VQJ1/fMBnP+h/bQBjv2aAan/l/5mARf/5/8Y/lcB3v9C/n8DI/3y/40CaP4UAW/+CQKJ/tP/ngCB/+7/7f6IAuP8/f8dAhH99v/cAAf/q/7n/tMB+P2A/34C/PuDAFsB4v1V/i4Brv4j/5sB9/wZAU/9AgDs/ov9eAJZ/MIC1P6q/GUD4/veAZ0BiPtJAqL+e/8S/xb/UQEN/wMBef6kAF8ABgDiAL7+kgID/gYCxf9vAEoCtfx3AYz/AwApABQAKgB5AIkBdf71Aa4Akf6nAWT/owD3AID/QwBq//EB3v6vAzn/pf/qAkj91gECAJsAnv/OAsoBfP3KBMH/UADoAXz/TgDyABEBBwEXAPX/mwGc/hABlP/UAcL/UgBiAXT/nwC8/6QBov4QAYoAO//IASb/bv+T/ysALwHd/0cBsAAt/zYDbf00AuYAav2tAjL/3ADT/xz/YACm/SICR/4m/24CwP5zAGgA0f/f/00Avf9G/zsAJgCE/xYA8f+q/8b/Wf+D//T/t/8N/44ALwBrACv+SACt/yD/8AHE/hAAUgA0AGX91gAXARf+xgG0/+7+LgFD/nT+xgAX/2n/RgFS/WoANwDi/nsAJgCEAYH/iQDZ/47+ugBJAP7/wv+rAAYA8/2tAjH+m/8cAvb9KQCtAGT/jv/P/4P/WP1NAYH/vf25Aa7+F/9eAIUAv/2jANIAO/vUA2D+Xv7/AZn91wAz/7AAHgFm/IgDnP00/mQDOf2zAUX/kQDi/zX/BQIZ/GYC2P6N/SIE+v0bAH4BGf9m/lIAWAH4/AACcAHn/FgCL//2/fQBD/+NAGn+twHF/rz/EwLa/CcDAf+fAFoAvP7bAlb/BgDvAJsA4gBaAOUALv8uAtz/2v+rAnv/BAD4AWz+3gIBAbr/OgDPAW3/aP9AAnf90QEoAm/9SQIOALn+NAAmAGkAef++AK7/uP/TAcf+Fv6dAmH+nQG/Abz9JgEoApf8oAIb/+oAWwDB/RsDA/4uAUQBQf7fABf/AP+1AAP/EgLH/RMAhf+s/hoCCv5DACYAtv4PAP//of6oAIMA0PtaBAv+f/+uAi39JQCQ/1cBef5FALwCe/2n/9wBKv3z/9oBwv6IACL/GwGk/dcANwFH/LMCtf+2/s8Awf4Q/0sB2P1KAHABhf5NAI8BfP7P/+sBtv6F/goFef17/OQFuf1m/ioGlvxgAKIClf5e/9kAmAFZ/X8CPQDh/RYDkP5a/T0EW/5j/kgCTf53AK3+RQJL/QIA3QGO/dkBVf6sAA8Bsv0NASAAov0WAyr9hwCVANz8wQPW/OX/6wHn/fMA4v3xAZH+Yf4xAqX8FQLi/swAmQAj/s4CNfzgAib/R/8/AE//EgEr/+kAuv28/4gCxvwUAwAAHP4aAVgARP5j/yQE1vqRAugCJPnhBzP+EfzLBzH7GwFFAjb+cwGZAWMA9P05BLT+FP01BJX+YfwqBRT9t/6dAx390v/j/rQAR////ikB5v7e/wkApwF3/QYByP8LAWj/nP5JA/X8QQJbAnD9HAQ6/vf+pQLD+/ADTAFn/qADj/9o/KQEgv/y/BwIa/zs/gQEIfo5A+z/1P7B/wICn/+M++EEJP1D/8UDl/wRAEgEfv1V/0MFnvxk/zIFL/kRBHYBOvv2BXD6+wR1AFL9TARn/DsDqf4aAS8Bz/2iAsv/tPwlAcH/jvslA1f8XgCvATD7cgAXAFT62wFNAdf4BwNV/hj7swIu/dX/W/9J/T4Adv4lAUD+DgCfAwD70gEbAZL6igW9/r/7TwNSAc36jgQ7AIr8BQLl/qIBQ/7dARoBtf2qARUCLfs2AqcA9P03Ac4Ar/9u+0cD+/t8//gBWv2sAKMAGAFT+2gGzf2X+0oFAPxR/38F8gLj+vUEfAJI9+sKhfzq/M8HMfysAI0CDP5yACkE3PtRApMDaPuOBIsCU/85/8wCBAAn++4EJfyB/c8DxfxOA5f7fAaO/1D5LwsA+UYBXQM5/rv+egLqAiv8ywRuAHv+kgJzAof6jgWK/+H6MwcU/TIBQwFS/0kB9v6KAaMCI/0uBab/h/osBlz+RvwaA2z/Zvs8ByD7zf/EBO7+X/8wATEARP17BD78XgFO/4gAFQFi+sEG5vzt/aEGU/v9/XQG6fccBKICAPxHBMb+hgDG/SYCVfzQATQAiv0YAi/9KgZn+0cCrP4FApT90fzpBi/1EAnz/dT8HgScAbv7ngGVAob4Pgje+hkAkgLK/qP9CAG1APj2Fwgp+eX9yQM0++3/AgO0/jX+tQGg/kv+cwESAZj5HwVu/tD8SAM4/03/FP0LBOb10ga9/jn9zAIL/nIAa/zSA7D5XwQx/sj7/wLj/1cB8/gRBhr7mf/RBor3XACBBt76UP5gB6n7TwPgAr75WgZY//b9igVI+4oBOQRJ+/EDxQIZ+qwA2gA1ANr/bv50A2f7KwEYAnj4cwOfAZ74GwbU+14AcAMk+xsDlP7L/lIC5wF6+UEEsQBs+ekHeP+B/RkH0fzw/CwB+f+W+T4IcfhWAY8Cf/zhAcj/7/36/FsDPvzeBZj95QE3BE75hAfh/g//pgYWAIT8KAfz/av72QaM/EoAOQFeAe78bANyAEr6eAS+/S0AEAEtAD7/kQNV+S8Fx/riAfQEHfdLCs/7rgK2BAv6gwV7AST8PgDNAh/80f98Bhv71AGTBAT9zv7uALT+RQOT/YsCjwNW9kAIBABv+fYE0v/U+JIEfQIX95EG9fov/0kCnfyxBDAArv5p+wgClf6++1UGFfzH/7ICwf7z+2b/nQIy+lYDAgR8/cIBBAKI+9oDn/3NBDX/wQCKAFH4jQuh90kCwgdL9kAH7f5C/hEBCP+f/8T7gf4MBTb5mwWJApH4FQhk+3T7UAoW9/L7Dgum9vMGDQD7AF4FtvbACT74svz/CB/5HgIZBk/9Vf3CAnb6YwCF/SIDewExAKX8gwKEBBb2qQpYAGf2Ggj0/yT5hwTSCVv4LACjBe/+CPesA9H8z/prDKb46gXzATX9kQBOAaD+ZgJmBRD6eAYH/sz9lAO4/J8DM/uuAyr7PgBHAIX+xP0MAOMAmf7/AiADBvk+B1f+YfxNBkP9iP6DAmABZ/78/pQAdAON9zgEbQJ8/CgD5wKe/H767QEIAcj5TQhY+rX9ygW297AFLf+d+9EFcf4h+pkFygDl+GIGHf0k/csCKv5M/ZH9jwYu/FIEzf+Z/HEBUvvr/jcGzf7hBHX+ewBt/lsBPPxyAW3+PQFGAWL8pQJd/8IDDvx2/cL90vw7AHz/V/4HBfQAEv6aAM3/9ftd/wsCzP3YBRID0v0b/oQC0PjIAm7/Av1tA+v6BAFBAy7/FP+f+uP/hwChAZgCB/67AAH/4PqyAwYBJ/9GAhL///7oBfgAL/5h/YL/Jf5v/oQFSf3//mQBoACz/DkALQJFAKX/zgG+AN8Aov59AhH9c/s+A30BrwHV/4D8IwJIAdv6fQC0AC0Hz/9/+AEAgQGmAOH+y/8XAWkEMgFM/5L+JgIs/+r5wQKqA0kCDgRY/UH+DQOb/O/8bQA0AVUCl/7nAMMDp/83//D6WgOqAV0CPAE5/sgB2QDJ/Qv+zv47Al4CcQDr/Of/pwQxBLz9rfyuAUUGWgBT/OX+XgQ4BI/+svt2/lMGPgTH+yv8T/9fBY0CkvyE/cABeAWSAt760P3/AdMD9wDw+kYAOwQ9Aff+VfsMAuQErv6v/I4AuwBpAiv/wP/1//P+zAG8AMn/wgCK/pv+yv+eAFMAl/+XAf/+Gf+BAH8A4v93AN7/owBfAnYClwDp/qT9sPyr/38ASQH2ACcDOgLEAV/+YP2t/QH6+frB++v65P4A/Lv6jf7A+xv+wv2v/44ADQFrA9QEtAOvBdgEvQQwBb0B3QawBf8FaQW6/50A1P4O/lb8Ev76/RP8h/vQ+Sb6PfoU+036Pftw+4v5cvqk+VH7QvzG/PH8JP4a/JL8rf7+/AcAB//1/nQA9P5xA2ADogTvBUkFdgUXBoEFUwYyBrUHIQb9AxUFhwTgBKED1gK4AWMCqAP1AZ8BXP5n/R/8efuM/MH7xPzW+/f6z/u3/MD9Yf+p/rv/lP9VACMBtgA5AU0APv8RAH3+7/1T/l38dP04/RX8Gf17/Oz8wfxI/Qb+ygD2AWcA1wF6ATQA6QEXAugBVwFGAZ8AagDC/+n/5f9v/2X/Sv0Y/YL94/3R/Yn8QP1y/q/+of5V/gD+nv06/iD+1P30/D3+8/zz/vb/7P6C/+b98/3n/v0AmgFyAfEAoADDAB8BIwKFAq4CTgLWAjUD2gKuAyIEDwNNA1UEsQLkAsoDzAJIA04C4gGDAfkA5QFxAbsBGAEkAT0AKgJFAakBgQJfAUkD0gGvAf8A+P+fAfoBAgICAiACQQKTAp0CEQM4AssCDwRqA28DMQQ7A34DhgObAh8DvwLlAqwCrwKbAgwCIAL/ATsAbAHkAZ4BDQLaAMP/tv8DAGIATQEaAcD/3P8xABEAwgDt/0sATQDt/1f/cv9Y/7D+lv5q/oj+wP7K//39vv0q/lP+8/5I/mr9Ufwd/eH+TP8g/ur93P2r/fz9Yv5d/2b+F/6c/uz9If9i/rD+yP5e/bL8a/2s/lX/+P7p/Xz9c/2M/BP+Dv9d/kv9Kv2E/AD8Rf4Z/tP9Gv25/Pf84f3W/gr/dv8YAPz9I/+M/5f/j//y/hD/xf55/0n/S/7v/Vb++f66/9UAbf9N/7z/+/9BAEIAgwAC/6f+RgFOA7AB+f6d/bL9K/8K/27/rv+B/8j9xPzc//QCswM8AEb+6f62/53/FP5t/Rf9wv2t/az+wwA6AWcAMgB8AcEBlQOmAwIDUwL9AcwBuAHHAckA7P8t/tv9f/6+/nf+Of7A/qj+9v7J/6IAZQDhAaAAbv9DAJYAbgC3/38AAAC8AOgAuQD5AbYB5wJ1A8QCGAJuAb8BqALuAtkBiwH9ALgAOQFiAc0BHgJTABQAUP9PAGcALP+o/8X+6P0//l7/tf4A/zX/U/61/lj/af/t/hcAnwFGAPQA+ABoANIA8wHSAAwAdQCMAAcAawBHAej/lv9fAIf/y/85AOT/qv8/APn/jv+7/77/2v+d/ygARQCE/3MAfwGGAI4AhQAEAbwA1QBXAM7/twD2/2P/M//V/tr/2/80AAsAbAC/Ab4BZwFoASgB5wDVAG0Asf/N/wIAuf6Y/m3/Qf9u/4n/CQC0/3YAtwFqAVADdAWvBrsGegbQBpQGJwfSBjYGHAaqBcYEEwTlA18DtAPKAkMB6f/K/pX+Gv2p+2X6NfnE94P2NvX8887zEfNG8ZHwvPCT72jv5u/i78XwufHN8n3zjfV/9j347Ppj/IL9v/4yAJUC/QNkBAwGLAr2EbQWjRQxES8QChHVEUwRNBLaEykS0gxeCGMItQk4CigK9wcqBC0BZAEKBPIFtQXFBBYE3QSmBUgGdwf5BxUIfAfLBuUGeQaaBF8CpwFnApQCAQIaAH79wfky+Ir2nPW/8truceyE6LvlmOOo43jjOOH73bTcSt4l4Dnhmt/l3gzgw+Ge40HmC+qH7ZbwWvIq9az5Iv6JAsIFswf9Cl8P6xPxFdYXnhqzHGgg2yJbI8Mi8yGbIasflh3SG6AaQhoaGKAU+BEMEf8PuQ3qCoUHcwS4AmkBvf4A/D/6N/o0+pT4QvfK9vb1QvYI9STz//YaADUFZwEN/WD8qQCLAh//U/5FAJsCawCK/EH6//ytAScCov47+wz88/4oAb3/Tf4T/qH+5gBkAkkDswPgBBAGaQYFB0kH3Qf/BpEFXwOsAqoCcQJiAbb8Kfns+hj8//nl9VbxX+8W7lnsTepp6LXmJuUw5EziceGE4XjiueMN5KzjPuVY6WHssO4a8Wr0ivmL/uUA6gKIBCYJ4w1UEE0S6BP3FlYamh0/HzEgfCFUIkwhFR/NHXwcBhvQGfcWcRL0DrwNtwy9CdYEQ//j/c4CHQkzCXMCMPwy+5j7AvhG83H0a/n1+ZL1LPOq9dP6r/0n/1D/hQBzBIMIRwr6CPQI3AsJEEwTWhP+EYcSsRbsGIwYsherF7YWcxMBEMwMfwwvCjIFQP+B+Rv1FvLj8C7rT+Nv3lTdXdyz2aDVMdK/0NTPjM/lztHLsMqtzWLP7NCe0sjW39sD4Bvhp+KO6VPxJPi3+239VwLkCWkQYBOiFZYYQxyDIf8k1iXTJykrqyw3LI4rHCtOKzEqEibsIG8dvxvNGU0Vcw9vCsYHoAU+Auv9V/mj9g71XPEJ6+vkWeKP6Jz08vk/9HrtTO4i8+rxDOxp7GD0q/p390Py+vPx/IkFGQiFBq8I8w+iFMYUHxFaD30Q/BNvFswUNxHMDxsUGhjIFy4WxxcDGXcW5RELDrQMZQqLBk0A3vgt9IzybPFg7MLk+uDF4h3kzt/+2RLYEdpx2rrWY9Oo0tjTp9Ui1gbXGtqY3u3gO+NG56TsS/NJ+X79XAD2BBILSxDrFNIX6RrqHaUgxCNMJ0cpFim+KcAqCC10Lf8qkCdpJE8hAB9AHGcY5hMwDzELCQiXBKkA8Pzv+ZL3+vHY6U3knuhj9ff8GfYe7M7tr/Mh8Yroxecp8iL6nval76rvnvdI/6kBIQMcCAYRTxYGFCEPvA54ElwWRRhrFasRkhCKEqMVXBbXFaAX2xmXFzwTsRAVD8cMsQZ8ANb7mfaQ8HLrAecR4pze8t193W7Z7dT/1OjYsNre18PTD9IL0TPPJNBJ1JbY49kD3CbfpeOl6KHtK/Se+wUBPgSyCCIOYRJJFfcYAh5lIjMl1SbiJ5oo1yooLmcwzS//LAgqhSdsJUsjCCCFHMAXQhJ/DpcL6geKA6r/mvtl+Gj0AO+S6Mnil+RJ8OT5PfZ368zo4O6v7oDmbeNG66XzFfNa7J/pwO/G+DX/CgORBzEPkRRYFHIRMhGLFGwYPhq1GFwUMREPE0IX0RhiFxYY/BozGzYYoBRDEkMPTQuMBh4AvPjB8I/swunP4/TbK9l+2gnZkdSM0UHU5tiR2vLXlNRy0xLVv9YQ1gXWLNlG3kDjw+ZW6rbvhPaA/foCTwYLCj8P7hNpFpcYjhzDIB0jniSHJXEmDSlnLF8uniziKDkmWCRaIrcecBukGRwYwhPNDJwIUwUSA7b/UftX9yLzdu8z6Q7iuOGc7aH7Uvvo76bqrfDx8uLpvuPX6sr2UPhB8OrrnfAd+2ID0gV6CGMOtxV6GKAWchTHFUcZoxsqHP0XfxJjEeoUExgdF8IW7BluHawbLBZ8ETkQhg8tDAoF5vsV9aHwD+v14wreDdvB2jPZbNR2z1vNkNCi1dHV6tIZ0tjTT9N80FXQstK11GHWVNkM3cbgxeXO65Hz9/sEAmEHdQ2aFfUc7CErJyYtMDPaNjU6cT7pQZNDq0OyQNc5azTDL+wpyCBwFkgOaAmwBn3/Bfao7t3p8uJI2VzV/Nd+2+LgyOw89cLve+d06ufuYebI13XVguLm6xToz+Dc5FT3LwziGo0jQy4BP2VNOFKJUdVTplYRVZ1ONEU1OjIt6yASFj8LwP+79n7wRei72t7MrsZNxWDALLbErMOpIKzWroOwabLFt2XDldER3FzjVOzz+mAKFxMwF48cziKgJIIjCSEDHs4bGhoBF0EQswrpCFQIkAWtAcX+YP2n/mj/vv5H/K/6HfyQ/cD+Jv57/YT/SgICBAQDYQLIAvcEaQbYBHAESQWcBgwHtQYSBmoHHwt8DMcJlAdOCo0JcQGu+rD/Yg2EGNAeECKIIhkeIBidDnn7suOt1FnWZtsg17bMh8nQ1oLtLQOvD/oYqieON0o/rTyJOSQ5fje1MkAsLiU6HnkWZA+1C34ILQPD+nHwieI00wLId8Agusqyr66BssK7jsVbzVTWnuPV8pr+/QJbAhIB3QLlBk0I8wROAekBSQNcA+EBtAAKAXsBtwB3/sP9zP2r/A76wvj/+wsCUQcGCa0JJQwWEQ0WmhdnFhATXxBeDtwLtgeMAf38MvoC+Av1rvOj9PD1K/hg/QkEjAgvDBYQjRMlFCwSMgqy+xb3MwiQH38mISKZI60qnir9HgMJW+yH01bKB9CW0r3KqcIVyrDh6f1AFeEj3S+XPDtGukd+Qs886zmVNmUwKisqKM0jAhvnECwKwAMC+X3p7NkByyO8ELDJqiusla+WtlvCXNE437PqefYHAuQK0gz/B2UBLv4A/34A7f1f9xr03vft+/35zfNT7rrtD/G38zryCvDG8tz4Pf8ABvwMqhHGE80WzhqsHIQb/hhDFXsP7AlDBbcB0P6S+5n3GPRk8yL0MvN48ELxjva2/e4EqwtoETIV4xdLF/ENtgCZASwYCy4DMp8rZCrhLYcojxbX+YPbh8eFxbvLtMqjwZG/Qc8X6dwD5xi7J0szYz0eRZtGv0NuQNs9+ziAMqYuuis1JB4V6gQi+gzy5eXh1DXEGLfcrZ2qjq84ukLEOM7j2ivqMPjMAWkIlwp/CUsG2gJDAcAArQEYAtMB6QHwAtsB0vvu8uTq0eX/5Hfobetb7D3vZPeuAqkM1hQJG8weHSC8H6gdPxqGFTsQ4Ay/C3wKnAcbBG8BTv3S9/fz3/Ei7+TrbOu27iLzEPmOAj0L6RFEFzgapxC1/+j9vxWHL+EvUyK0HncmOCWZEtzz2NTxwYPBesl6yQXDhMJ80PHmo//hFpQoUjSdOrA+1EGQRI1ElT8tN9AwMC8iLoEnlBcnBWv3B+6Z4vfS58M7t+ys4amtstbBP83008vdDe5G/oIIyQvRCwAL3wnsB0wFGwMeAYT/fv4t/sD77vQZ61Dh+dnm1jba+uF36sbxSfpnBdIQLBnIHAse2R+1IZwgmh3EG2sa5BZPES4Odg3zC8MGgv0D9KDtYenJ5NDgmeE65zDwrPl9AlUKNhJ3FrUMgQEnC5QqSkBPOMso1yiQMD4pfA3p6cnQEsgJzOPO6MvpyJjLv9gK7pYHxR3DLZg3BDwqPr5Co0cZRTI79TAaLsYuJiypH4gKYvbm6Qbi49QswyW0gaw8qgStwrg6ytHYY+F/6in5ngc4D5oPDgw5CC0IXwxkD30NBgrlCA4IEgR5/MXwpuLj1qrSMdUT3FPlPu4b9q3+9ArgFqEd1B62HP4ZKxgKGBEXqBO8D9ENxwx7C4wJMwWf/eL0eO4M6dXkBuW46FjsEPFj+5UKjhcYHyAfJxJLBgAOkyknPWw2tihDJp4oBx/kA/XitMlCvpzAKsQrxNPDE8jX04nlBv0WF0MsuTV1NTw1mzyQRa9HYkKwO8c4VDemM7UmHRF+++3re+Ac0t/AEbLJqCKnv6wcuvjLcNwf6bvzBgCQDIIVXBgEFGILpwZvBwEJpgbMBJMFEgPQ+sLwueZE2wrQFslJySTRtODp8Jb8cwbeEm0ehiNLJG4kTCOlIMEchhjqE8wP5QxgCakF/QJAABL9Q/gy8ZLpG+UJ5srppe6H9DD9wwb4ECIStgkFCacdDD31Rqo5pCv3JiMhjg4K8i/WBcJqvrbFecoIy6bNWNhd56j4vwzMHsMpjC5KMc82AUAqSIJJ3EP0Pek5NzXBLEoeFAvR9h3nStlRyY66ILFMrPSr27I9wvvRkd5u6ZDzkf0oB+UPzBLUD1oMPgoOCeoH8AcbCJ8EFP9N+F7wLecr3UzUQM++zxDY++Xb9CUByAkWEYoX9xsDH9cgbiGHIFEf7B7iHLMZpBWMEX0MdQWF/t73v/Dm6SXka+Gt4sTnJ+/294MAdgmDDpgJTgcAFTk1W017R9MzdScmJa0aWv6Y3DHDCrm9vEnCy8R5xyrQBd9a75oB4hR0IsMm5SWEKjo3cEW1TbVMx0c4QRY6oDG3JHwSDP2S6ffa8s1nwT24bLPishW3e8Eg0OTc4eWI7XX1if1TBS0LFQ5RDj4O3AwECesGWgmbC4sFxff06AbdY9UN0BLMhcsu1MnmyfndByoSzBreHh8eCRyOGSAYjhjxGFoVfxAkEYYVxBY9EucJ2P+p9Wbt5+UT3QHXUthM3xToPPTcAnwO3gx/CHITlTIoUltVeUHQLJ8kHB4TCqnsHdJHwx7DOcoS0cDUL9pZ5NzvvvurCW4XWB9mIQskkyp9NLE/e0gFSutBFjbTKx4icRXaAyzwa97b0JvI8MRpxRfIzMo6zxvXdeD953TuZPYl/dQBCgdPDsQSRxErDEMGXgAS/kIA8ACR+mDtJd8u0eDHlsa+y0nTit3x7B/+xQwKGAEfzh7/GfsX4xhMGfwYvBmDGUYVdRL9El0S8Q1cB9H+BfJh5Wzfut443/fgkuan7g34VAIFBl8GXBAWLohPh1cRRqgthR5gE6j/8+VGz3/Fw8fJzdLSpten4IDsB/dg/nIGsRFfHCUjMSbtKr808EGqTdVREEz4QI0zRSW2FJUCe/Hp3+zOT8JivhLDw8lOzxjUytkG4LHmaO9q+Of9GgAmA8UI+wtzC2AJMAdvA2f+A/zd+sD1H+rf24vQ+cvlzxbaB+Y78HT5CgNmCwMSOxWIFDYRiQ8CEuoVqBk6HS4f7xtaFS8R1A4ACqYAK/a47ATkr94o3pHhcea57KPz8/nj+9z6JQJuGrs/lFaVUNQ4ISOSFQ8EI+5v2ZnM0spD0PbXD9yh4mHwyP6XBtMI+A3VFC0bbSEKJ6os0jTxQe5LCklRPYEvpiCfDoz9ffFr5pvawtF/zjHON89h0zfZ/9zL3Hjb+93e5trz/P4EBtkM7hNwGIIYMhW7DuYDFvmC8T/qhuDI17HUiNVY1/3bxOXC8jX/lwiWDKgM3AvnDLgNXAwPCwULeQ2hEaYV/hVREYUMYgo5CPwC+/tV9trwpOpq5nPmw+jb6xjwOvaU+Cv2H/sXE9Q5YlQVU3A8riLhDbX5N+Yl1hvObs/N16Dhp+a362r2dAThC7gIXwO1ApUJMxUFId8qOjTEQMtLFUxzQB4vrx3VC4b6Uew74ZvagNq134ziXOBx35viHuVf4ZvbH9q53i7p2fbEBBsP2RXpGVgZvBLZB079svYq80HvRehD4WHfd+Gu47PkHOnn8Eb4Wf25ADUEUQefDFcTcxfHFqQT3xK8EhYRCQ3kBwEEGwKmAlAC0f8R/PX3jvKC7FjoP+eg6G/slfFE8W7unPZ0Fkk/rlXHT8A3Phyw/7jnYdnK1JnX1t/y6xLyYfAo8M730gFmAzX+L/hx90kA0REhJts1/0F4S3pMIkKZMGof6Q+VA7j8Vfek7xzpz+sO8/byUOrL4Knbx9ZN0sPRS9b83yru1v/kDTIUZBXaFM0Rugns/qf2Z/P682b0ifIf7w/s6ulZ50vlN+Vd6CDvo/i1AhIJGQzyDiMSxhLuDpULggt0DroQjxBjDY8IPAYJB9QHYQQQ/hf4CPOj7gPq4ua15u/qae8Z71rwKgBbITFABUqsPHkiKwRN6fvacNm53tfnFfbYApkBC/Vx65vtvfTt+F/68fnL+78EYBdmLIY7WEQZR0NBcDEeHXcLswD2//sFfgkBBAT7GvYv85XtWeaI4HHa7tPA0QPVhNpx4nrwQgCKBxoF9/9P/Cb4d/b3+6cF8ArJCKUCsvkI7xPn8eVh6VjtoPJ2+YT/VAJsBLUH4QoNDTUOWw6BDMgKtAvgDrgRBRNTEnIPyAqJBkUDIQC4/Lv51fa38lrueext7Rnte+k36TH3WRTVMh1CRjwQJssH7+rl2HzU6ttG7GoCYhPvEoUCr+/i5cXlkuxw92gB/gd8DrwYPiRQLHYxlTR3NA0ucSHSEGMCfv42BvAQxBROEQMJjvwq7XzfmNaE0ZDRt9cD4FTkM+W858XrE+6D7rnu9O0f7FXuTPdXAtIJIgy/Cp4ElftN9EfxsvKV9/0AswpAEAkQvwx1CEYD2f+o/oD/hgAIA7sG8AgVCOkF2wQBBJUCnwDc/+D/HgBf/yz9dfri+HP6jP14AAYAff0U/U8DshCGHi8n+SSMGKgEfvEY5SXhWuYE8/0EjRIgFQwLLvmY5zLdl9/e64X8FgxjGAQhGiRdIkAcPxVrEawSphacGLcXcBVME8gQEw3RB/sAhvrl9bHysO+Y7I7qI+l56Ibo0+hj55rjqN8R3cjckN5G4vnn3u5k9mb7tvpE9S3v3Oyb7l7z6fnhAP4Hdg4ZFM8WsBZxFTgUOhOVEYcQcxDjEIYQZg5QChQFnwD3/eL8V/wc/LX8Ov3K/KX64Pc39or23vja+5b+GQEOBG0H7wmRClQKjgrMC6YM4QuVCcUG8ARZBDsEUQToBLgGzAicCQEJoQecBvcFIAWEA/IAVv4H/M75DPdL9B3yNfC+7vLtyO6k8Bjz6PUf+Xn80v8MAyEGbQkVDc4QQhN5E/QRrg9qDcILbwovCUwHiQRfAX39J/m19LfweO2l6qDoCeiK6aXstO8j8TPxovFA8+/0a/WA9bT2Dvne+vj6H/o1+h78HP8fAW0BLQGoAZ0CqgIJAp0CCwW3B7kIngg9CVwLXg1pDbELEAoQCgoLJAs6Ci4JHQnLCZYJ5QfJBN4BSgDN/5P/Hv/T/qj+VP6Y/U799f1I/9QAagJdBFUG0wYSBZwBM/46/GD7V/pM+DL2pPXf9rH4Dfqg+3z+qALYBrwJdgu4DPQNXg52DesLDgsUC4QKhwhnBVwC4v8I/rv8hftN+vz4ePdu9QfzGfEt8Dvw8/At8ovzIvVQ9/r5fPyb/tL/2v/5/nH94vuP+pP5Ovm/+cb6O/sl+3D6VvrW+jX7qfrg+Hn33vdV+pX9NAAlAtsDeAVyBp8GkQZKB74I4wm9CvIL8gz4DLUL6QnJCH8IXgh8BzAG9wSdBA0FmQXaBbgFkQXRBNAC4P+o/Qv9V/2c/Vf9hvx5+4X6xPkL+ar4FvmQ+oL8VP4lAFQCQAVFCHoKSAscCycL8Qt/DM8L4wlQB1kEegHl/uT8wfuQ+zr8rPwf/PH6z/lV+T/54vgk+Lz36ffR+Fv6OPxM/iIAygG5ApoCjwEaAKD+Pv1M/Pn7KfwJ/IP7Fftb+8j7xvtX+2H6zfmW+XX5CPmZ+Lr4D/nP+DH4Ofh7+eP7Xv5cAH0BOgJ3AhwBBgFZA54GFwglBq8CkP93/jT/rwDJASkDUwVTB8MGJAMQ/0f8bvtw+6/7hvtO++z7P/2o/iX/3/7Y/m7/mADBAT4CEAM1BSYHjgUtBOgFZwztE4oXthRPC3X/0/Zi9Ov2KPz0AVgHvArGC7kKqAjZBcQC0v8U/cT7vfxzACIFkAjkCUkJWQfTBJcB1v1k+tP4+/ly/Kb+IwBXAX8COwOkApgARP58/Gb74Prn+i37xPuc/Bz90Pwe+274YvbE9Tz2Dfc7+BP6FfyU/Sn+Gf7l/QT+Pf5F/pb9Fv1V/ukAbQM9BKADwQIjAiUBmv4H+0X4CPgD+t/7Tvz/++v7dPxQ/Vz+l/88AS4DaASIBIwDpQIaAr0BOwG0ALEAVQHkATsBPP8H/bf7FvuZ+pv6xfs2/moBRwT+BpMJ9Qu4DVsOtw0lDGgK3QheBzgG3AX5BUQGewb5BSMEOgFp/m78p/tR+2761fkP+sP7hP4IAW4CyQKXAi8DMgQ8BCADngECAUoBnAFPAQEAwP5n/pD+qv3C+5r5Rvkm+8r9BQCgAEIArP/r/pH9GfzF+8b8dP6//zYAHQCj/1r+z/uh+KP2VPbF9tP2uvYJ9wj4ivlb+/D87/6nAfoEfQd4BwcG6gT9BKoF3gW3BSsF6ASABLYCnv8q/C/5bfZC9O3yL/OU9Db3g/rl/QQBVQPZBKYF7QXJBVwFzQQHBdgFzQZVB60HPAh9CEkHjARXAX7+9PyX/IT8NvyJ/Fb+SAHrA1wFtQVxBRMFtwSRBGoEmQQOBRwFSAT6AgYBVf9P/d364fji9hL23vWv9lT3yPez96f32vev+Kb50PoR/db/egNBBrYH2wcpBwgGngRnAsb/+v23/PX7w/q8+RL6U/wj/ywAXv84/d/7lf1QAB0DZASLBQ4GOAV3BPQC9QNQA6kCdwAz/cj6efe29pb3D/oR/en/LQPJBLgFJAZiB0gISwi1B2cGzAUaBzgHBQhKCMYHOAg2BpwDYADr/fn8cPyb/Wb9H/55/df8qv1n/tb+Kv4A/s/8ev6JABUBYgMnBD0HSAbUBBYDAwBY/7T9xf6C/v79WP7w/03+/fyZ+jf50Pjw9+f4SPnD+6L8/v7E/sT/pAD4ADr/kvzj+UD57Pin+CP5IPpQ+5/70fyb+9f7G/vM+i35Bfla+gX6oP6rAf0D7QV9B4cJEgjVBZ0EAgF4AAz+Dfxm/Lv8rP9NAEMDZQSHAxEDPALHAn0BggLsAlQDrATYBcQIhgvHCm0KygZUBE4BvgBoAfn/ewLAAH8DFAQ0A4YCzADb/yX+rP7u/dL90AC2ApkFoAQ4BngG6gSkA+gAywBG/yb/UwFBATgBeQBpACMBP/1V+4H3rvem9o33p/nN+Tr9cQBMAogD9gKZAaj/nfvI+2z6jvpM+y7+Sv2S/4wAHP/q/2P9/fwk+/T3nPb19uH5r/t2/igAUwHxAuADlAJuAY//fv3i/JP76fpX/C/8sv5v/9UBmAEtANH/KP58/pT9+f7eAGcDdgXGBc0FHgbABp8FcQVtBMIDJwM6AwcE8gNmA+sEOQV8BHECTAAYAOX+XgBs/xUAfQCiAfQCOQKwAzoDcQN/AskA2ABvAPoCHAOZA34C1AKdAd3+Rv7K+8H67Pmr+Y76pPpo+xP9JP4hAW4BtQHD/5n+DP1q+1/8zPv5/c7+gQBsAdj/1/5u/WH8ufk+9vL1BPZo95z5kfvX/RsAEwJuAnsBewBb//P+E/74/GP9Uf0K/mz/Mv9LAKj/8v/E/tP/DACm/qv/Vv52ABgBuQI8A+oBuAKYA4wDuwP0A/8DjwW0BZUE0gNxAmQCTQLoAecB+AFjAi4DGAMeBP8DegSHBOkDqgSsAwEFLASsBGIDvwK/Ak8DIgXGA2QD5wGRAO///fxY+1P5eflM+Qf6PvuA/G3+nP8WASMCWgGq/0b+5/0Z/uH9EP6r/+UBPgPbAwYDZAIEAS8AIf4Y/KD7Nfs2+7P8/P1O/zUAxgDxAbsBeAAT/6L+wf8h/yf/Y//y/of+3fyE/Cf7qPqN+6n7g/0s/Yn+CQA/APIB5QE/A5oDJwPWAyoC7gEaAi0CfgOaAtsCHQIuAeYAs/9+/0b9UP0G/Uv9Nf13/HH9hv03/wL/kwBeAZUBbQL/AUECeAIJAzAEQQQDBk4FAAUSBLMB0f8z/tj8mv38/TL9EP6E/T3+vv3M/sj+3/5N/TL83fz2/Nj/Yf/NAM8BsAJXA58BSQA9/sz8NPtx+5b6H/wB/tv/cQHQAHICZAIbAvkAz/+//73/mgDsAK8AIAHZ/7YBaP+1/sj9zv36/fj8OP0x/Qb+EP5x/mf+gP4DAOMAYgJtA0QDOATiBEoEJQV5AmYCMQGt/0T/iP5t/yj+5f4T/k7+kP+R/ZH+kv5+/k7/Rf7W/ycAFgBOAoUBqAIvA90DlgUMBF0F5QO1AucAb/8c/ob7/Pzi/OH+ov7+/gf/fP6e/vD9gfxW+yX7mvuQ/FX+iv6uAM0CqwK8ASMBW//F/qL9dPyU/Bf9V/2e/q7+bADkABUBsQDRAPYAcQCdAeIAMgIbAh0CBwHD/mP++/tG/FX7rPo7/Pv8ov5y/vn+EP7o/qz+K/+kAA8BZQMEBCwGbQZGCBoJpAh5CDEGxAR5AhEC/QE7AloB3gE+AjIAKACz/4r/af5S/ir+Ov4p/qn+jv8/AOEBtAKeA2cDYwToAxcE6gMBAscAa/9H/mf+Uf6o/pD/iQAbAGQAHAAu/qD+EP7C/Yr9a/54/t//lACIASABpwDm/77+9fzb+iX7bvqC+rj6v/yR/Xn9sf0j/Yf9rf2w/tz+IQASAMoBngIzA9sC8gCi/1T8Ofwt+/n7jPzL+4X+Y/0m/fz8d/vZ/Cv8hfz9/Oz9BAEAAkUEzQSDBSYGCgYjBmcEqwKKAV0BqQG+ASUD/gIFBKMEgQKjAZMAFwA0/yT+Jv5p/XAAJABOAFECAQNoBSEEuQXABa0E0QTHA7gCMQLbAX4BdQFcAewA/gG1/xwAKv+J/ZT+8vy0/sv+nP9LAb0CgQOkAtECwwKfAe0ADABK//T+M/5w/Z37IvxQ+vP5jvoL+Vv73vus/Sj/3//3ACQC5gA+AFUA1/3U/eD7Pf1f/av9hP6h/e7+rv1U/f77evt5+s76iPqS+5/9if63ACcCNANsA3MDugKRAtEAIgD+/mr+Ff/OAFUBOQJcAhUB6QB7/hT/6/y+/Kr86fty/UT9G/8+/5gBQAJIAvkDhQLNBCsEGQQjBAsDcQR2A24D2ALwATICUAFPATQAuP6Q//j+NgBXAFH/kgH0ALMBnwGHAdsBkwKXA7UCzAIfAx8CyAGX/vz+X/5p/Bn9Uvuh/HT9VP7BADIAKwIhA38BCQJWAbj/FQGj/yoASf6g/W3/+P4f/9z9r/0//Hb7B/vr+UX76fns+4X89Pyn/kD+eAC3AIkApAAbAIEAyP4g/vH+Cv9lAJwAggDc/wsAcv5J/qr+/Puj/ZT8av02/3r+9v9T/6MBuwC/AS4DeAMbBV4EFQYdBa8EogOmAy8DkgJ6AI7/2/+X/7z/u/43AAEArv8MATgAqv///8v+ZgDOAGoBtwJxA98EyQWKBLoCOAN2AGP/xv7Y/Tr+rv5A/kT/6gBXAFsDHgItAhQCrQDSAEH/DwCT/xsA8P4Y/+T/kP/GAEr/IABX/tf7//wu+4z6dPsS/Fn9Ov2P/j3/U//C/3f/4gDHAK8BvQEtAnICzgEdAl4B/QChAPz/9P1H/TH9fv14/tL9lP5w/mr/t/6X/eH9Y/zc/jH/Qf8dAjUC1QOcA5kDJgSBAlQDGAFTAPwAj/83AUMAvQCxAFkAKwFSAL8AVwBJ/xMACgBd/ycAPAH1AgoDpgNsAncCWwGb/9/+tP2m/ab9of2e/ur+ef8NADYA/QEJAU8BTwBd/mj+Nf90/2H/W//O/zQBAwBuAJgA//42AK7+sf7T/u38jP53/Q3+bP5E/hL9kv3l/aX8KP5W/QsAiwB6/1IBiwDZAPb/VwDc/+L+vv+Z/08AlQA2ALsB9P/5/wv/mv6m/u79If48/4IAEgCJATYBjQHAA7AB7AHUAQABTQFIAEEAUwCwAAsAsAAEAW8AFQFpAP//DQFX//7//P/t/kkBLQGIAO8CZwKcAgsCjABHAHj/6v9G/v3+J/7Z/mr+H/7V/7L+PACfACgAvf/R/gsAzP4h/7L/YP/m/+D+iP6g/vj+JQDc/i//cAD1/kL/IABe/qH+Y/4k/V/9Cv49/aD/wv8WAH4B/AL6AIv/TwGR/+cATf5x/goAF/4+/1f+7P6D/1D/PgEVARQAngCv/tD+y/+s/nAAav9nAEsAhwISAq0CFAMUAiYDTwFzAdwACQDxADf/NQGrAFkBzQHbAYECHgLvAcABhgFZARUAZgExAcQCNQOJAa4DQAJUAoIACAFlAZr/rQBF/5UA2/+1/7P/RQA7ABIAh//HAEQADv5PACn+V/+x/pb9sv5M/mL/z//OASj/aAGm/1L92P9s/BL/e/2k+xL+q/wz/sz+0v4h/zv/DgG4/9IArABZAPcA8P7V/pf/eP7e/oP/Ff7w/v7+jf4U/2//kf4O/tj9A/2q/Vf+e/5J/8QAj/+nAbQB6QCsARYB9wB1AFQBfQGAAZAAvQDbAaABcAD8ACEC9AHDAVMBPAA5AI//fAD0AckB/gFuAWsBbgGGAcgABgAdATAAsgC0/+3+e/8hAEEAlv9EAREB7gGCAqcA/QG8AKQAIgAh//D/pf6nAID+zP/w/0z/LP+2/1T+Q/7F/n/9nv2W/Nj7gvxN/L78Zv47/6/+GAA5/4n/jQCv/yUABf9o//L+Rf6K/+n9aQA//2wANQCk//sA1Pyy/37+bvya/sn7L/+U/p39yv8y/+UA5/9mATcBQwCWAU8AQP85Afb/aAD8AhMBAwITAur/2QHsAHUAEQEAAD8AQACUAJIASAIOAXEAIgICAbwBoAGtANIBzAA0AfAAGQBMAToAzABk/yoAwQDYAUoFMQGhAfIAIv/TALX+cv6XACUBZf++AOEB1QC2AXgAQv9dALH+h/78/sL9Pv6e/Xf+hf2Z/d//G//HAdkAkQAkAXIAgv+Q/W39Zf4M/6cA+P8fASMCBgDlAAMBCQD5AD8AdwB//u//wP+b/+MBaQBIAX8AOwFbAdsBmf5YAdMDIAK1AtP/eQAI/QL8JP4m/ZL+eQCq/+b+Tf5F/6X+wQCAAF//bgLfAKEA2wGHANb/lQH2AeABqQG0/rr+hf8E/Pj/6QE7ADwDEwIvANX/5v0l/E//pv7k/UQA2P8PAdgCyQKoAwAGfwdVBn0FUwThAlEC/wCI/4T+eP4o/FD70/yn+5z8Af1J+w/7J/w/+/v6ZPxJ/N78s/3H/a39YP/I/88AOwItA+gCJAN0A8sCHQLzAeoBNwIgA1IBkAErAgIAtgDdARMCygF3AeMAeQBbAPb+af88ALL+bP5o/nH9BP2a/aH9cf22/ej8B/wy/Ij82/wg/fL9YP4I/kP/IgBDAeECJAMaAlUBqQG2AK4A/AAAAX8BIAEUAP7/NAAwAE8A7//X//j/DAAl/57+Jv6S/RX/PP9x/wUAOADJ/1b+0P3B/WT9YP1X/c79AP+5/wAACwCh/wEA9wDqAGQACwAnAFUA/P/F/xAABgEuAqAC7wJpAvUB0ADg/2T/af+2/40ApwBMACoA1gATAV0AtQDuAAYBRAGTAawBZAGBAWoBQwHhAKsA7QDDANkAvgCQAAkBUgFOAYgAmwC2AFoAbQCJAMEAdQCT/8f/7P+E/2b/Tf6W/XH8I/vX+rn63voR++H7Mfzj+1D8C/0I/aH90P2U/VD++P1b/oX/EQC9AEMCxgJoAkYDgQRYBHUDrwInAgQB3/+A/qD9t/6nA1sIbAhvCEELbw2fCpwH4we7CG4FHwHL/qP64fY09uj1kvLV8FTzfPZ598b5pADoB0QMgA+pE3IWYhj5GnEbdRh/Ff4TrBBXCokEowJsAHH7i/Ws8UPw7e6x7aTrq+lD6f7ob+dQ5fLkHuZd5uXla+X25WvoGOy97q7wePRd+Xf9LQB9AocGYQtWDqYONw8cEecSqRM9ExsSRRHlD4ANWgrSBm4FwQW8A9UAIgCrAZ0CNwPYAzQEVAUEBm8FtARhBbwFUAbMBv4FZAUVBokGdwUFBXQEYAT0A6ADwgLKATwCxgFbAMT+ifut+N/5QAGCBu8DogP1CDALuAYnAwUGoQWr/6/6APWy7JvnXOiA5uTgeuH45+HsIvAA99QBGQxsFE8bryBqJC8ptyx/KfohLx40G5YS7AYO/wD6TPOE7FnnNeS04unin+OL4jXh1eKC5bDkT+Ia4pnj7OPR4m7i6eNg5y3ry+0t8DD0FfpFAK4EyAdlDH4SkxZKFxAZfxwwHZQaTxfgFA8SDA9IC/oF3wDn/RP91/p491r2H/gW+eb4qPro/agALQNOBtQHxAiICzgPVhCJDt4OehAfEAEOPQ3MDW0MXgqXCIIGdQSsA+sDvwIlAUIBzgEpARkAGwAyAEL/M/73/In5Qfbr+XICPgW1AiQFJwyjDZAJkQg/CdwF1QA+/aP2++zZ6CLpfeQa3lLgbOgv7VDwOPjAATwKMBTZHY0i8iS1KhAuHSknIksf2RuuEdcFYP369d3uwOnu5cjgZ91X3+ThCuF94EHkR+hl55zkDeQb5fLl3eZW5/fmJ+gy7I7vqfCS82D6ZgFwBbUIbA09Em4VXhbtFSwVGxUOFMEQkQwJCTAGYQPNAPf9rPsf++D6w/m4+Ef58fp9/Ef95/wu/Tj/BgFrAaABFgMTBRYHywmbDGwPExJRFCAVchT7EzEU4xN+Ei4QPA3/CqgJQQfWA6wBsQCo/pb8y/tb+/r69Prg+oj6qPow+/r74vui9/zz+fkzBN4FtwI9B60O3w79CmgJSAdOAlkAx/6U9T7p6uVq59fhmNzI4JnoFu4i9fj8dQL1DE0c4yWhJqMndy02L3gpNCPlH/QZOQ97BVP8BvEC6dbmN+OE27DYxNy336vfPeEj5YnneOjx6e7qNOr86WDrG+uO6UPqKO1v78DwVPND91H8QgIwBwwKvQyiEMkTnhT2EyQTDhIYDy0KigVnAgMAOv45/I759/f691T5M/vF/Nr9x/8IA5wE9QQ3BtwHLAjkB/YIkwn/CXgLywyeDEkMqw0xD6kP5Q5bDqAOgA4eDvYMrwt5C/8L2QprCLEHtwfQBygHdAX2A2QD0AOzAsoAZf8q/2kAdQBJ/239pfzZ/D37hfcE9uX7CAKyAXABEQX5BzMImAeuA0H9ZvpL+8/4ue5P5FfhneEQ4Hff4eKF6K3w2PoHAVMGrxIDIdYndygeKj4s0SpMJjcggxf7Cz4C9fp38aPmSeCJ3ovcP9pQ2obcNN814lDlHOdo6ensKu/M7gLt4uxz7grw0vCW8TXzNPXz+Bz95/9pAggGMQrqC1YMQA29DfEMPAuMCXEGOQMDAvwAef4S/Ir82v0c/oD+Vv/KAPUC9QU/CAIJzQleCwEN+gxRDLsMSw0SDV4M+wu2CwoMJA1qDcwMrAyVDToO2g1nDb8M5AtQCzIKagiXBnAFBgXlAxwCdAGJAacAIQDaAN4ABAE5ApsCvAGdABcASgC7/439S/uu+cb35/NE8Rb2+vxy/lL/ZgPOBrwHIQktCd4CSv36/ST+nPae6VHhmt3D21zdeN6e4DTnqPG9+sUAAAxPGv4jmSiRK2Mt9yqiJ2ojxhpWDz4FWv9f98LrFOTb4YXhs+FF5Grm9Obp6SDudvBs8Njw0PG97zbsq+kk6HHnPejV6QPqs+r87cHytfYZ+kL/kATOCNAM0Q8zEeoR1BOvFIUSMxDnDkUNZwrjB3kGwQTQA4QDGgNaAisCbgM3BAUEOgOFAtkCnAPBA0MDQQO7A0MEzwTmBSMHEwhsCfYKoQyxDRkOOw8REHQQ6RBuEd0RDBGaDwoOoQx4C4AJ/AYeBKsBhv/j/Wv8NPrm+Fv4Q/gO+I73xvf+91T4Ivky+gj7F/vH+Y72A/YG/W4D9QQ5CaMNnA6PD0MRnA4MBSv+jPwQ+2/zA+WR3LDZBNkr20TdMuKU6Tn0xf1JBCYPDRsFJNsoACrsKJUk7yASHTAV/gm2/777m/bo7LjmaOT440zkEOah5mLjkuKF5Jbl+eOs4QjiK+K64YTihOS855Dso/PX+DL7xP9HBqMLGg4cD5UQuxGKE4MUnxLQD3oOCw9lDhsLoggwB60FOwReA5QC7ADrAFEBbP8G/sn+EAHiAYwByAGIAn0EsQb5CK8KfwsRDrQRoxO/E/YTuxSiFD4UTxNxEF8NXQt1Ce0GBgQIAjIBogDN/9r+NP4s/hr/W/9E/kr9Zv2y/cL9zP2T/bb9vv3N/cj9N/2C/B398f7O/jv+eP4V/5j/Qf8w/0f+df0e+4L4+/hY+zL9HABgBKsEiQNjAjoAjPup9bXyrvDY7Snn/t823R3dTeAs5JzoEO8w9gr+KQX0C7gSQRg0HbwfIR9zHG8YUhVeEQoM2wVEAPv87vhy9P7waO6l7eftOu5L7Qbr/OlK6s/q4erB6u/rYu0z73jxNPP09bz5Nv7xASsDUARwBmsIOwnnCAgJEQmcCXAKJgrzCfYJTArzCtoKegqCCf0ICgmfCJoHIgatBTQGUwaTBoYH2QgzCsULrAyODA0Mwwt6C1cKngh9B3YHWgd1BtUFTgXkBCkFuwX/BcEFawUfBeoEbARVA0ECOAGcAEEAFwAHANX/OgDXAE8BhgHyAc8CqgPdA0gDlwL8ATABDAAY/jv8kPrf+Ef36/TD8lLx4e9v74bvDvDp77LvbO+87kzwQ/T0+Gf+kQPgBeoGhgcmB1YEQ/6l+Cz11fNF8b7rY+cc5hHom+sG8Av25PtTAeEGaAuxDkkRpxSwF1wYGReAFL0SxhEqEMcNxglPBhMEQgGk/Un5hvWW8jbw/O677FnqhenX6RDrHexQ7lzx8/SK+Mz6k/z4/cz/PwItA2sC5AExAv8BnwGxAnEEDgbzByoK1ww1DoUORA/wD5YPnA1YC3sJxAdJBvIEiAMHApcBywKDBJsFawY9B+UHqQexBk8F+gNDA50CCQK+AcsBnwLOA18E8QTNBfcGagg+CfQIbQflBcAEMgPCAfkACwGiAWMCvAKjAssC+gIZA+ECwAESAJ3+S/1R+1f52ve29rn1kvRL84jyMvIF8jXyKPKP8fTwKPF88SrxAPHJ8Vvzm/Ss9d32PPgK+h/8Tf5pAGsCvQN+BK0EPAWKBugH9gjACN0GvgTSAh4A+vyS+i/5DPnJ+Rv6/Pl8+tb7zP1VADUDqAXDByQKDgy/DJ4MzQxjDYUNnQwTCyAJnAYzBA8Cj/8d/U77zPnq97H1v/PQ8t/yQvP485z0I/XU9dr28vfT+MD5tvqM+4L8TP0i/vH+Qv9S/27/DQCDAOoAgwEdAuYCyQPTBKQFPgaBBqcG0wblBioHaAfpBwUIiAfeBjMGhQXgBHQEIwRABNAE4gTQBEQFowULBokGEgdWB5EHHgc7BpcFxgRsBKQDjQJlARgAJv/Y/bX82vtF+5X6o/kM+eT4Kvmd+VT6DfuM+xf8xvw0/XP9hP2X/cn9HP5y/of+bv6B/qb+mP6k/tb+Lv+u/xMAKQABAJv/cf9d/+z/2QCJAZUCEQOWAokBOQDl/i/+Xv6I/vr+4f6x/qP+t/4w/9r/WAEmA3kEyAVCB/QHuQf7BkYGBgWYA7MCcwFfAIb/Rf76/Ar8OftC+pD5tPkW+vb5xvnI+S36wvo/+5L7O/wp/Wr94fyt+1v6Jvkr+KX38PZh9p72Qvf698346vl8+2D9X//bANoBjwLCAlsCowEFAV4AAgAyAGUAdACeAPYAcAHOAVQCBAPNA74EiAUPBlEGegZ3BjkGAwbaBccFtgV0BRcFjwQtBEAETARyBJIErgTCBIUEIwS7Aw4DJQJLAbEAUAAbAEMASAA7AK4AdgFHAucClQNFBHgERwQHBPAD6wOAA4cCWwEoAfcAHwBP/9X+gf5W/pD+bP48/uT+pv9bAPIAZQHjAbECiAMFBAEEMQRtBFcETgQBBKoDbAMqAwoD9wKOAt4B4gBa/6j9G/zV+r35y/g9+Nr3qff/91j4g/i8+ML4kvgE+C73HvZN9eT0kPRf9Ez0tvRE9d/1SvaT9gv3xfe1+Kv5Y/rv+qT7aPxH/TL+6/56/xkAsQA/AckBTQLWApcDjQSABVkG6AZHB30HVgcZB84GWAbKBQIFKwSNAysD7QLNAqYCqwLKAvwCNANjA6gDtwOYAxoDuwLEAu8CMgO4A/0DHwSxBHcF+wUKBhsGnwYvB3MHIQeFBuQFGAU9BHYDkwJ7AWUAb/+e/vP9hv08/S39Mf1R/a/9KP5w/sj+YP+r/6v/j/9E/8/+If6j/e78G/yI+0j7Qvs6+0X7iPso/L78Kf2i/Sb+qf5J//r/ewDiAGEB2wEsAhcCuAF7AUcB4AA5AK//Pf+i/gP+U/2l/P37cfsZ+9H6vvrk+nD7Avxv/OH8Rf3m/Vj+df6D/mH+Of4I/pL9CP23/J/8yPxZ/Q7+dv7l/oD/UQD/AFwBjgF3AXcBaAEKAY0AAgCe/xX/cv4P/vT9LP5A/kj+Sf59/g//pv/3/wsAXQDLAEEBrgHqAS0CkgLtAiMDDwMAAxcDCAO3AjECwQFxAUYBQQFCAYkBFQLGAlwD5QOxBH4FLgbEBhMHVAd/B3MHMgetBvIFOAWLBNEDEQOgAkQC3AGxAbIB6QEeAu8BqgGqAe0B8AGzAXwBAgGOAD8Aqf8E/7H+sP7K/sz+WP4a/n7+Sf8xAMQAbwERAo8CQQO2A7UDngOaA4wDMgNgAnIBqAD6/zX/Xf6F/ZD8qPvo+lD6wPkw+Zz49fex97735ff698z3mPfA90D4iPhe+AH45fcd+E/4Tfgn+DL4ivgQ+an5DfpO+uH6uPts/Nr8KP3q/e/+sv8vAKIAawE/As0CDgNFA50D8gMyBEcEUwScBOYE1gScBF0ETARgBIkEdgQ7BDEEYARxBEYETASaBA0FSQUsBeYEqgS2BNEEigQPBLMDYwP8AnMCpQHiAIkAjAClAJ4AmACyAKkAeAAnAMf/b/8I/7L+c/4Z/tX9xP2p/ZD9t/0h/rf+Uv/D//j/AgAHAAQAvP9B/6T+7P1Y/dL8SPzs+/D7Ofx7/N78mf2d/pz/VwDTAPIA7wAuAf0BfQIuAp0BUgGNAG3/Tv5O/Zr8s/z9/LP89/s5+4f6HPpA+qP69vqT+yb8jPzw/If9dP6Q/9AAEwJLAz8E1gQtBYEF1QUVBiYG6AUtBT8EaQOFAnIBKwDr/sz9rfyO+436pvnh+Gv4M/gI+NP34vcy+I/4zfjz+Fn5Cvrn+gP8jP0k/5AAFQLmA9cFvAdZCUsK6Qp7C9ML2wuPCw8LoAp6CiYKiQkgCT0JwQkzCicKzAmGCV4JAQlLCFQHSgYoBfEDlwIqAdT/gv5L/UT8fPvb+nf6SvpV+on6r/ot+9X7b/zc/B/9ef3z/ZD+G/+c/xkAowArAZMB+AFfArQCBQM0A0gDQwMJA5oCDQJ0AcUAHwBt/6z+4f0X/XT8yPs0+936ffpS+mb6fPqj+sf69voT+yL7Wfuu+9/7EPyL/D79vf0a/pj+FP+I/wcAUQB6AJIAmQB5AEAA8P+W/zL/yv5b/uP9XP3//M38q/yO/Gr8WvxM/Hj8n/yW/In8nPzj/A79Mf1W/an9L/6n/hX/f//L//L/AQAkAFEAdgChAMgA9gArAUYBSgFRATsBLAE+AWYBpwH5ASUCTQKIAsUC4wL3Ak8DfgN6A2kDRQMAA6YCWQIIAt0B2wHkAQICDgIHAvAB2gG3AXgBJQHeAJEAMQDP/6j/z//j/wAASACjAAABQAFOAVUBRgEgAe0AugBlAAAApP8j/5f+Ef6i/Sf9jfz++2v77/qb+kn64/ld+fX4uvib+ID4afhU+GL4zPhW+fv5uPq9++D89v0e/2QAyAEdA0EEPgUOBrgGSAejB8QH4gfwB9MHuwd9B1EHEwfDBnkG/QVkBbME7gMIAw4CFgFDAJb/+f5a/uH9h/1Y/U39g/37/YX+Iv+//0oA8QCmAXICLgOCA34DTgMiA8sCbAL1AXQBEAHSALIAhQB6AIwApACtAKoApAC1AKgASADU/2z/NP8G/8/+if5a/lr+fP6m/tH+2P7h/tf+tf6k/on+Uf4z/iD+Ef4G/uD90/3f/fX9Gv4j/j3+a/6X/sb+5v7r/gv/Tv90/3j/Sf9E/0X/Tf8y/87+bP4m/rn9U/3m/If8a/xq/Gn8WPwp/Bv8SPym/Pb8T/2y/SH+pf4u/5T/AQB3AO0AWwHDAQICIQJjAqoCpQKAAl8CRgItAgwC4wHAAcEBtgGaAYcBlgHPARkCVAJ7AqsC4gINAzEDVQNBAy0DFAPlArACcAIxAg8C8AHyATYClwLmAgoDKwNSA4UDswOoA3kDMAMNA80CTQLBAVgBGAHWAH8AJAD5//T/3/+w/2L/Hf/o/rD+Q/6+/U798/yf/GT8SPw8/Ez8m/z7/D39f/20/fj9Vv69/ib/ef+//y0AmwAMAXwBtwHRAbABmQGbAZYBkwGGAXsBdgGFAYoBdwFkATEB7ACWACIApv8S/4b+Bf6a/Tr99fzn/AT9Of2J/QT+eP7J/iH/c/+5//X/EQD+/8//lP8l/6n+KP63/UT94vyc/Ff8J/wy/HP8rPzP/Pz8Uv3g/W3+6P5r/+//VwCyAP8AKwEnAQ4B8QDmANQAtAB6ABgAy/+J/1L/F/+9/lj+Df7a/ar9hv1Z/Tn9J/0N/en8xPzQ/AX9Of2N/RD+nv43/83/UwDkAIgBHgKNAtEC5wLtAgIDFQMDA7sCjAKPAo8CjQKMApgCrQK4ApoCbAJLAj8CPwIFAocB9gB/ADQACgDd/7f/p/+//wgAPgBbAIsA0ABLAd0BWgKvAtgC+gIkAzgDXAOQA7oDvQOnA5QDZAMgA8YCcwI4AuwBhQEgAaoAMQC4/z//xf4//rr9Ov2t/CH8xPuR+577zvsa/HP82vxX/dn9Uv6X/u3+Sv+f/+f/JQAyACsARwBmAHMAYABSAFMAWABWAGAAhQCsAMoAIQGNAdEB9AHoAd4B1wHdAeoBzgGTAXEBYQF1AaIBxQHvATgCgQKoAosCRQIPAs4BZAHjAGAAzv9M/+P+jP5L/i7+Qv5W/lj+b/6l/vT+RP94/7D/4P/g/7v/l/9k/xr/2v6b/mL+Pv4z/jL+Jf4I/vX9Gf5U/o/+sP6//sL+0f74/vr+zP67/tH+3/7y/gT/F/9a/8f/NwB8AMYANQG0ASMCdALGAg4DQgNqA1YDCwOoAlECCwLTAbQBngGAAVgBMgEaAQcB3wDMAMgA1ADSAJkAMQCa//j+c/5L/kv+NP4D/tX9sP2c/Zz9tv3R/fj9L/5d/lz+Nv4J/uD9y/3r/TT+Yv52/nj+h/6S/q3+tv6s/qr+pv6e/nH+JP7m/fv9Hf4r/jL+Ev4H/iD+Nf4u/h7+Ef4N/ib+Xf6L/sX+N/+y/ygAlAD0AE4BlAHKAfMBFQImAhcCGQIlAjUCVgJPAhwC+gHrAdIBkwE3AecAswCGAD8A2f+Q/3b/Xv81/wz/7/71/hX/Xv+i/9//PQDBADkBdgGzAQICSgJ/Aq4CwQLNAsECjAIxAtABfwFJASQB+QDkANIAuwCjAJUAlgCVAKkA3QD/ACEBdwGyAdwBwgG2AZYBeAE2Aej/3//q/+b/9f/s//j/+P/5//j/9P/q/+H/2//e/9//4P/e/+H/6P/6/wwAFwAdACEAHgAdAB8AIgAkACQAIQAcABgAFQAVABQAEAALAAMA+v/1//X/9v/3//j/+f/6//3///8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA");
ASSERT_TRUE(results.ok());
auto results_json = results.get();
ASSERT_EQ("Smartphone", results_json["request_params"]["voice_query"]["transcribed_query"].get<std::string>());
ASSERT_EQ(1, results_json["hits"].size());
ASSERT_EQ("1", results_json["hits"][0]["document"]["id"].get<std::string>());
}
TEST_F(CollectionVectorTest, TestInvalidVoiceQuery) {
nlohmann::json schema_json = R"({
"name": "test",
"fields": [
{"name": "name", "type": "string"}
],
"voice_query_model": {
"model_name": "ts/whisper/base.en"
}
})"_json;
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto collection = collection_create_op.get();
auto results = collection->search("*", {}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 10000,
4, 7, fallback, 4, {off}, 100, 100, 2, 2, false, "", true, 0, max_score, 100, 0,
0, "exhaustive", 30000, 2, "", {}, {}, "right_to_left",
true, true, false, "", "", "", "test");
ASSERT_FALSE(results.ok());
ASSERT_EQ("Invalid audio format. Please provide a 16-bit 16kHz wav file.", results.error());
}
TEST_F(CollectionVectorTest, TestInvalidHNSWParams) {
nlohmann::json schema_json = R"({
"name": "test",
"fields": [
{"name": "name", "type": "string"},
{
"name": "vector",
"type": "float[]",
"embed": {
"from": ["name"],
"model_config": {
"model_name": "ts/e5-small"
}
},
"hnsw_params": {
"ef_construction": "aaa",
"M": 16
}
}
]
})"_json;
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_FALSE(collection_create_op.ok());
ASSERT_EQ("Property `hnsw_params.ef_construction` must be a positive integer.", collection_create_op.error());
schema_json = R"({
"name": "test",
"fields": [
{"name": "name", "type": "string"},
{
"name": "vector",
"type": "float[]",
"embed": {
"from": ["name"],
"model_config": {
"model_name": "ts/e5-small"
}
},
"hnsw_params": {
"ef_construction": -100,
"M": 16
}
}
]
})"_json;
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_FALSE(collection_create_op.ok());
ASSERT_EQ("Property `hnsw_params.ef_construction` must be a positive integer.", collection_create_op.error());
schema_json = R"({
"name": "test",
"fields": [
{"name": "name", "type": "string"},
{
"name": "vector",
"type": "float[]",
"embed": {
"from": ["name"],
"model_config": {
"model_name": "ts/e5-small"
}
},
"hnsw_params": {
"ef_construction": 100,
"M": "aaa"
}
}
]
})"_json;
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_FALSE(collection_create_op.ok());
ASSERT_EQ("Property `hnsw_params.M` must be a positive integer.", collection_create_op.error());
schema_json = R"({
"name": "test",
"fields": [
{"name": "name", "type": "string"},
{
"name": "vector",
"type": "float[]",
"embed": {
"from": ["name"],
"model_config": {
"model_name": "ts/e5-small"
}
},
"hnsw_params": {
"ef_construction": 100,
"M": -100
}
}
]
})"_json;
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_FALSE(collection_create_op.ok());
ASSERT_EQ("Property `hnsw_params.M` must be a positive integer.", collection_create_op.error());
schema_json = R"({
"name": "test",
"fields": [
{"name": "name", "type": "string"},
{
"name": "vector",
"type": "float[]",
"embed": {
"from": ["name"],
"model_config": {
"model_name": "ts/e5-small"
}
},
"hnsw_params": {
"ef_construction": 100,
"M": 16
}
}
]
})"_json;
collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto collection = collection_create_op.get();
auto results = collection->search("*", {}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 10000,
4, 7, fallback, 4, {off}, 100, 100, 2, 2, false, "vector:([], ef:aaa)");
ASSERT_FALSE(results.ok());
ASSERT_EQ("Malformed vector query string: `ef` parameter must be a positive integer.", results.error());
results = collection->search("*", {}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 10000,
4, 7, fallback, 4, {off}, 100, 100, 2, 2, false, "vector:([], ef:-100)");
ASSERT_FALSE(results.ok());
ASSERT_EQ("Malformed vector query string: `ef` parameter must be a positive integer.", results.error());
results = collection->search("*", {}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 10000,
4, 7, fallback, 4, {off}, 100, 100, 2, 2, false, "vector:([], ef:0)");
ASSERT_FALSE(results.ok());
ASSERT_EQ("Malformed vector query string: `ef` parameter must be a positive integer.", results.error());
results = collection->search("*", {}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 10000,
4, 7, fallback, 4, {off}, 100, 100, 2, 2, false, "vector:([], ef:100)");
ASSERT_TRUE(results.ok());
}
TEST_F(CollectionVectorTest, TestHNSWParamsSummaryJSON) {
nlohmann::json schema_json = R"({
"name": "test",
"fields": [
{"name": "name", "type": "string"},
{
"name": "vector",
"type": "float[]",
"embed": {
"from": ["name"],
"model_config": {
"model_name": "ts/e5-small"
}
},
"hnsw_params": {
"ef_construction": 100,
"M": 16
}
}
]
})"_json;
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto collection = collection_create_op.get();
auto summary = collection->get_summary_json();
ASSERT_TRUE(summary["fields"][1]["hnsw_params"].is_object());
ASSERT_EQ(100, summary["fields"][1]["hnsw_params"]["ef_construction"].get<uint32_t>());
ASSERT_EQ(16, summary["fields"][1]["hnsw_params"]["M"].get<uint32_t>());
ASSERT_EQ(0, summary["fields"][0].count("hnsw_params"));
}
TEST_F(CollectionVectorTest, TestUpdatingSameDocument){
nlohmann::json schema_json = R"({
"name": "test",
"fields": [
{"name": "vector", "type": "float[]", "num_dim": 10}
]
})"_json;
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(collection_create_op.ok());
auto collection = collection_create_op.get();
std::mt19937 rng;
std::uniform_real_distribution<float> dist;
// generate 100 random documents
for (int i = 0; i < 100; i++) {
std::vector<float> vector(10);
std::generate(vector.begin(), vector.end(), [&](){ return dist(rng); });
nlohmann::json doc = {
{"vector", vector}
};
auto op = collection->add(doc.dump());
ASSERT_TRUE(op.ok());
}
std::vector<float> query_vector(10);
std::generate(query_vector.begin(), query_vector.end(), [&](){ return dist(rng); });
std::string query_vector_str = "vector:([";
for (int i = 0; i < 10; i++) {
query_vector_str += std::to_string(query_vector[i]);
if (i != 9) {
query_vector_str += ", ";
}
}
query_vector_str += "], k:10)";
auto results = collection->search("*", {}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>(), 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 10000,
4, 7, fallback, 4, {off}, 100, 100, 2, 2, false, query_vector_str);
ASSERT_TRUE(results.ok());
auto results_json = results.get();
ASSERT_EQ(results_json["found"].get<size_t>(), results_json["hits"].size());
// delete half of the documents
for (int i = 50; i < 99; i++) {
auto op = collection->remove(std::to_string(i));
ASSERT_TRUE(op.ok());
}
// update document with id 11 for 100 times
for (int i = 0; i < 100; i++) {
std::vector<float> vector(10);
std::generate(vector.begin(), vector.end(), [&](){ return dist(rng); });
nlohmann::json doc = {
{"vector", vector}
};
auto op = collection->add(doc.dump(), index_operation_t::UPDATE, "11");
ASSERT_TRUE(op.ok());
}
results = collection->search("*", {}, "",
{}, sort_fields, {2}, 10, 1, FREQUENCY,
{false}, Index::DROP_TOKENS_THRESHOLD,
spp::sparse_hash_set<std::string>(),
spp::sparse_hash_set<std::string>{"vector"}, 10, "", 30, 4, "title", 20, {}, {}, {}, 0,
"<mark>", "</mark>", {}, 1000, true, false, true, "", false, 10000,
4, 7, fallback, 4, {off}, 100, 100, 2, 2, false, query_vector_str);
ASSERT_TRUE(results.ok());
results_json = results.get();
ASSERT_EQ(results_json["found"].get<size_t>(), results_json["hits"].size());
}
TEST_F(CollectionVectorTest, TestCFModelResponseParsing) {
std::string res = R"(
{
"response": [
"data: {\"response\":\"0\"}\n\n",
"data: {\"response\":\"0\"}\n\n",
"data: {\"response\":\",\"}\n\n",
"data: {\"response\":\"\\n\"}\n\n",
"data: {\"response\":\"\\\"\"}\n\n",
"data: {\"response\":\"publish\"}\n\n",
"data: {\"response\":\"Date\"}\n\n",
"data: {\"response\":\"Year\"}\n\n",
"data: {\"response\":\"\\\":\"}\n\n",
"data: {\"response\":\" \"}\n\n",
"data: {\"response\":\"2\"}\n\n",
"data: {\"response\":\"0\"}\n\n",
"data: {\"response\":\"1\"}\n\n",
"data: {\"response\":\"1\"}\n\n",
"data: {\"response\":\",\"}\n\n",
"data: {\"response\":\"\\n\"}\n\n",
"data: {\"response\":\"\\\"\"}\n\n",
"data: {\"response\":\"title\"}\n\n",
"data: {\"response\":\"\\\":\"}\n\n",
"data: {\"response\":\" \\\"\"}\n\n",
"data: {\"response\":\"S\"}\n\n",
"data: {\"response\":\"OP\"}\n\n",
"data: {\"response\":\"A\"}\n\n",
"data: {\"response\":\"\\\",\"}\n\n",
"data: {\"response\":\"\\n\"}\n\n",
"data: {\"response\":\"\\\"\"}\n\n",
"data: {\"response\":\"top\"}\n\n",
"data: {\"response\":\"ics\"}\n\n",
"data: {\"response\":\"\\\":\"}\n\n",
"data: {\"response\":\" [\"}\n\n",
"data: {\"response\":\"\\n\"}\n\n",
"data: {\"response\":\"\\\"\"}\n\n",
"data: {\"response\":\"Links\"}\n\n",
"data: {\"response\":\" to\"}\n\n",
"data: {\"response\":\" x\"}\n\n",
"data: {\"response\":\"k\"}\n\n",
"data: {\"response\":\"cd\"}\n\n",
"data: {\"response\":\".\"}\n\n",
"data: {\"response\":\"com\"}\n\n",
"data: {\"response\":\"\\\",\"}\n\n",
"data: {\"response\":\"\\n\"}\n\n",
"data: {\"response\":\"\\\"\"}\n\n",
"data: {\"response\":\"Apr\"}\n\n",
"data: {\"response\":\"il\"}\n\n",
"data: {\"response\":\" fool\"}\n\n",
"data: {\"response\":\"s\"}\n\n",
"data: {\"response\":\"'\"}\n\n",
"data: {\"response\":\" com\"}\n\n",
"data: {\"response\":\"ics\"}\n\n",
"data: {\"response\":\"\\\",\"}\n\n",
"data: {\"response\":\"\\n\"}\n\n",
"data: {\"response\":\"\\\"\"}\n\n",
"data: {\"response\":\"Inter\"}\n\n",
"data: {\"response\":\"active\"}\n\n",
"data: {\"response\":\" com\"}\n\n",
"data: {\"response\":\"ics\"}\n\n",
"data: {\"response\":\"\\\",\"}\n\n",
"data: {\"response\":\"\\n\"}\n\n",
"data: {\"response\":\"\\\"\"}\n\n",
"data: {\"response\":\"Com\"}\n\n",
"data: {\"response\":\"ics\"}\n\n",
"data: {\"response\":\" with\"}\n\n",
"data: {\"response\":\" animation\"}\n\n",
"data: {\"response\":\"\\\",\"}\n\n",
"data: {\"response\":\"\\n\"}\n\n",
"data: {\"response\":\"\\\"\"}\n\n",
"data: {\"response\":\"Dynamic\"}\n\n",
"data: {\"response\":\" com\"}\n\n",
"data: {\"response\":\"ics\"}\n\n",
"data: {\"response\":\"\\\",\"}\n\n",
"data: {\"response\":\"\\n\"}\n\n",
"data: {\"response\":\"\\\"\"}\n\n",
"data: {\"response\":\"Com\"}\n\n",
"data: {\"response\":\"ics\"}\n\n",
"data: {\"response\":\" with\"}\n\n",
"data: {\"response\":\" audio\"}\n\n",
"data: {\"response\":\"\\\"\"}\n\n",
"data: {\"response\":\"\\n\"}\n\n",
"data: {\"response\":\" ],\"}\n\n",
"data: {\"response\":\"\\n\"}\n\n",
"data: {\"response\":\"\\\"\"}\n\n",
"data: {\"response\":\"trans\"}\n\n",
"data: {\"response\":\"cript\"}\n\n",
"data: {\"response\":\"\\\":\"}\n\n",
"data: {\"response\":\" \\\"\"}\n\n",
"data: {\"response\":\" \\\"\"}\n\n",
"data: {\"response\":\"\\n\"}\n\n",
"data: {\"response\":\"},\"}\n\n",
"data: {\"response\":\"\\n\"}\n\n",
"data: {\"response\":\"{\"}\n\n",
"data: {\"response\":\"\\n\"}\n\n",
"data: {\"response\":\"\\\"\"}\n\n",
"data: {\"response\":\"alt\"}\n\n",
"data: {\"response\":\"Title\"}\n\n",
"data: {\"response\":\"\\\":\"}\n\n",
"data: {\"response\":\" \\\"\"}\n\n",
"data: {\"response\":\"I\"}\n\n",
"data: {\"response\":\"'\"}\n\n",
"data: {\"response\":\"m\"}\n\n",
"data: {\"response\":\" currently\"}\n\n",
"data: {\"response\":\" getting\"}\n\n",
"data: {\"response\":\" totally\"}\n\n",
"data: {\"response\":\" black\"}\n\n",
"data: {\"response\":\"ed\"}\n\n",
"data: {\"response\":\" out\"}\n\n",
"data: {\"response\":\".\\\",\"}\n\n",
"data: {\"response\":\"\\n\"}\n\n",
"data: {\"response\":\"\\\"\"}\n\n",
"data: {\"response\":\"id\"}\n\n",
"data: {\"response\":\"\\\":\"}\n\n",
"data: {\"response\":\" \\\"\"}\n\n",
"data: {\"response\":\"1\"}\n\n",
"data: {\"response\":\"0\"}\n\n",
"data: {\"response\":\"0\"}\n\n",
"data: {\"response\":\"6\"}\n\n",
"data: {\"response\":\"\\\",\"}\n\n",
"data: {\"response\":\"\\n\"}\n\n",
"data: {\"response\":\"\\\"\"}\n\n",
"data: {\"response\":\"image\"}\n\n",
"data: {\"response\":\"Url\"}\n\n",
"data: {\"response\":\"\\\":\"}\n\n",
"data: {\"response\":\" \\\"\"}\n\n",
"data: {\"response\":\"https\"}\n\n",
"data: {\"response\":\"://\"}\n\n",
"data: {\"response\":\"im\"}\n\n",
"data: {\"response\":\"gs\"}\n\n",
"data: {\"response\":\".\"}\n\n",
"data: {\"response\":\"x\"}\n\n",
"data: {\"response\":\"k\"}\n\n",
"data: {\"response\":\"cd\"}\n\n",
"data: {\"response\":\".\"}\n\n",
"data: {\"response\":\"com\"}\n\n",
"data: {\"response\":\"/\"}\n\n",
"data: {\"response\":\"com\"}\n\n",
"data: {\"response\":\"ics\"}\n\n",
"data: {\"response\":\"/\"}\n\n",
"data: {\"response\":\"black\"}\n\n",
"data: {\"response\":\"out\"}\n\n",
"data: {\"response\":\".\"}\n\n",
"data: {\"response\":\"png\"}\n\n",
"data: {\"response\":\"\\\",\"}\n\n",
"data: {\"response\":\"\\n\"}\n\n",
"data: {\"response\":\"\\\"\"}\n\n",
"data: {\"response\":\"publish\"}\n\n",
"data: {\"response\":\"Date\"}\n\n",
"data: {\"response\":\"Day\"}\n\n",
"data: {\"response\":\"\\\":\"}\n\n",
"data: {\"response\":\" \"}\n\n",
"data: {\"response\":\"1\"}\n\n",
"data: {\"response\":\"8\"}\n\n",
"data: {\"response\":\",\"}\n\n",
"data: {\"response\":\"\\n\"}\n\n",
"data: {\"response\":\"\\\"\"}\n\n",
"data: {\"response\":\"publish\"}\n\n",
"data: {\"response\":\"Date\"}\n\n",
"data: {\"response\":\"Month\"}\n\n",
"data: {\"response\":\"\\\":\"}\n\n",
"data: {\"response\":\" \"}\n\n",
"data: {\"response\":\"1\"}\n\n",
"data: {\"response\":\",\"}\n\n",
"data: {\"response\":\"\\n\"}\n\n",
"data: {\"response\":\"\\\"\"}\n\n",
"data: {\"response\":\"publish\"}\n\n",
"data: {\"response\":\"Date\"}\n\n",
"data: {\"response\":\"Timestamp\"}\n\n",
"data: {\"response\":\"\\\":\"}\n\n",
"data: {\"response\":\" \"}\n\n",
"data: {\"response\":\"1\"}\n\n",
"data: {\"response\":\"3\"}\n\n",
"data: {\"response\":\"2\"}\n\n",
"data: {\"response\":\"6\"}\n\n",
"data: {\"response\":\"8\"}\n\n",
"data: {\"response\":\"6\"}\n\n",
"data: {\"response\":\"6\"}\n\n",
"data: {\"response\":\"4\"}\n\n",
"data: {\"response\":\"0\"}\n\n",
"data: {\"response\":\"0\"}\n\n",
"data: {\"response\":\",\"}\n\n",
"data: {\"response\":\"\\n\"}\n\n",
"data: {\"response\":\"\\\"\"}\n\n",
"data: {\"response\":\"publish\"}\n\n",
"data: {\"response\":\"Date\"}\n\n",
"data: {\"response\":\"Year\"}\n\n",
"data: {\"response\":\"\\\":\"}\n\n",
"data: {\"response\":\" \"}\n\n",
"data: {\"response\":\"2\"}\n\n",
"data: {\"response\":\"0\"}\n\n",
"data: {\"response\":\"1\"}\n\n",
"data: {\"response\":\"1\"}\n\n",
"data: {\"response\":\",\"}\n\n",
"data: {\"response\":\"\\n\"}\n\n",
"data: {\"response\":\"\\\"\"}\n\n",
"data: {\"response\":\"title\"}\n\n",
"data: {\"response\":\"\\\":\"}\n\n",
"data: {\"response\":\" \\\"\"}\n\n",
"data: {\"response\":\"Black\"}\n\n",
"data: {\"response\":\"out\"}\n\n",
"data: {\"response\":\"\\\",\"}\n\n",
"data: {\"response\":\"\\n\"}\n\n",
"data: {\"response\":\"\\\"\"}\n\n",
"data: {\"response\":\"top\"}\n\n",
"data: {\"response\":\"ics\"}\n\n",
"data: {\"response\":\"\\\":\"}\n\n",
"data: {\"response\":\" [\"}\n\n",
"data: {\"response\":\"\\n\"}\n\n",
"data: {\"response\":\"\\\"\"}\n\n",
"data: {\"response\":\"Links\"}\n\n",
"data: {\"response\":\" to\"}\n\n",
"data: {\"response\":\" x\"}\n\n",
"data: {\"response\":\"k\"}\n\n",
"data: {\"response\":\"cd\"}\n\n",
"data: {\"response\":\".\"}\n\n",
"data: {\"response\":\"com\"}\n\n",
"data: {\"response\":\"\\\",\"}\n\n",
"data: {\"response\":\"\\n\"}\n\n",
"data: {\"response\":\"\\\"\"}\n\n",
"data: {\"response\":\"Apr\"}\n\n",
"data: {\"response\":\"il\"}\n\n",
"data: {\"response\":\" fool\"}\n\n",
"data: {\"response\":\"s\"}\n\n",
"data: {\"response\":\"'\"}\n\n",
"data: {\"response\":\" com\"}\n\n",
"data: {\"response\":\"ics\"}\n\n",
"data: {\"response\":\"\\\",\"}\n\n",
"data: {\"response\":\"\\n\"}\n\n",
"data: {\"response\":\"\\\"\"}\n\n",
"data: {\"response\":\"Inter\"}\n\n",
"data: {\"response\":\"active\"}\n\n",
"data: {\"response\":\" com\"}\n\n",
"data: {\"response\":\"ics\"}\n\n",
"data: {\"response\":\"\\\",\"}\n\n",
"data: {\"response\":\"\\n\"}\n\n",
"data: {\"response\":\"\\\"\"}\n\n",
"data: {\"response\":\"Com\"}\n\n",
"data: {\"response\":\"ics\"}\n\n",
"data: {\"response\":\" with\"}\n\n",
"data: {\"response\":\" animation\"}\n\n",
"data: {\"response\":\"\\\",\"}\n\n",
"data: {\"response\":\"\\n\"}\n\n",
"data: {\"response\":\"\\\"\"}\n\n",
"data: {\"response\":\"Dynamic\"}\n\n",
"data: {\"response\":\" com\"}\n\n",
"data: {\"response\":\"ics\"}\n\n",
"data: {\"response\":\"\\\",\"}\n\n",
"data: {\"response\":\"\\n\"}\n\n",
"data: {\"response\":\"\\\"\"}\n\n",
"data: {\"response\":\"Com\"}\n\n",
"data: {\"response\":\"ics\"}\n\n",
"data: {\"response\":\" with\"}\n\n",
"data: {\"response\":\" audio\"}\n\n",
"data: {\"response\":\"\\\"\"}\n\n",
"data: {\"response\":\"\\n\"}\n\n",
"data: {\"response\":\" ],\"}\n\n",
"data: {\"response\":\"\\n\"}\n\n",
"data: {\"response\":\"\\\"\"}\n\n",
"data: {\"response\":\"\"}\n\ndata: [DONE]\n\n"
]
})";
auto parsed_string = CFConversationModel::parse_stream_response(res);
ASSERT_TRUE(parsed_string.ok());
ASSERT_EQ("00,\n\"publishDateYear\": 2011,\n\"title\": \"SOPA\",\n\"topics\": [\n\"Links to xkcd.com\",\n\"April fools' comics\",\n\"Interactive comics\",\n\"Comics with animation\",\n\"Dynamic comics\",\n\"Comics with audio\"\n ],\n\"transcript\": \" \"\n},\n{\n\"altTitle\": \"I'm currently getting totally blacked out.\",\n\"id\": \"1006\",\n\"imageUrl\": \"https://imgs.xkcd.com/comics/blackout.png\",\n\"publishDateDay\": 18,\n\"publishDateMonth\": 1,\n\"publishDateTimestamp\": 1326866400,\n\"publishDateYear\": 2011,\n\"title\": \"Blackout\",\n\"topics\": [\n\"Links to xkcd.com\",\n\"April fools' comics\",\n\"Interactive comics\",\n\"Comics with animation\",\n\"Dynamic comics\",\n\"Comics with audio\"\n ],\n\"", parsed_string.get());
}
TEST_F(CollectionVectorTest, TestInvalidOpenAIURL) {
nlohmann::json schema_json = R"({
"name": "test",
"fields": [
{"name": "name", "type": "string"},
{
"name": "vector",
"type": "float[]",
"embed": {
"from": ["name"],
"model_config": {
"model_name": "openai/text-embedding-3-small",
"api_key": "123",
"url": "invalid url"
}
}
}
]
})"_json;
auto collection_create_op = collectionManager.create_collection(schema_json);
ASSERT_FALSE(collection_create_op.ok());
ASSERT_EQ("OpenAI API error: ", collection_create_op.error());
}
TEST_F(CollectionVectorTest, TestRestoringImages) {
nlohmann::json schema_json = R"({
"name": "test",
"fields": [
{"name": "image", "type": "image", "store": false},
{"name": "embedding", "type":"float[]", "embed":{"from": ["image"], "model_config": {"model_name": "ts/clip-vit-b-p32"}}}
]
})"_json;
auto collection_create_op = collectionManager.create_collection(schema_json);
auto coll = collection_create_op.get();
auto add_op = coll->add(R"({
"name": "dog",
"image": "/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBwgHBgkIBwgKCgkLDRYPDQwMDRsUFRAWIB0iIiAdHx8kKDQsJCYxJx8fLT0tMTU3Ojo6Iys/RD84QzQ5OjcBCgoKDQwNGg8PGjclHyU3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3N//AABEIAJsAmwMBIgACEQEDEQH/xAAbAAACAgMBAAAAAAAAAAAAAAACAwEEAAUGB//EADUQAAICAQMCBAQDBwUBAAAAAAECAAMRBBIhBTETQVFhBiJxgRQjMkKRobHB0fEVJDNS4fD/xAAZAQADAQEBAAAAAAAAAAAAAAAAAQIDBAX/xAAmEQACAgMAAgICAQUAAAAAAAAAAQIRAxIhMUEEEyJRkSMyYXGB/9oADAMBAAIRAxEAPwDrMQguYAMapnjmRG2YBCMHzgIkCGBBEkRjCxJxMmQAnEniDmQYWAWBMwIGTI3RbA2GcQGImGLaPYVkkiASJBgmS5hZJYQciCcyCYtwslmEDdIJgZhsFlocRgMgLCAlUFEZmAycScSaCiQZgMjEkCMdBZkFpMjEYjMyczAJJWFALJmZh7DI2yKYAloDGN2SCkqgoRumGMZIJXMnUKFQWjtkFki1ChEGP2QdkWrHoyyDCEriz3hB5qXaHARmABEB5jWQJbHSRiVxb7wHuIibFZc4xIHeV67CxAmx0ukerOo1O1EQZAZu8uK2HFbOkTRotRbtKVEqfM8CWh0m3blra19iZT1fXbKVIVQi+RH7X95pOq/G9PSrKF1FlbM2C9YbLKhONxHl95soxOhYUvJ0N/T9RUM4DL6qcyiZs+n332KLa8tWwymJr+o/JcSK3RW5AYYI9ZnOKStEZMevRe6TulcWcyd8yTMbDcwF7xVlkWLSIOSsVltmxFk5iDbmR4sNkGw3dB3CKZ4vf7yXMe4zGDGA47SnZY6nOIK6snjEnctyrlF8HmS5wJTW0kiNLNjmVuifIyvJaS6gwaySOxkkN3xJ2XsFFtXQ7TkK6k9hzOZ+J/iC2wrTpyfzbCqKDwccf1m36neaOn3PnaduB9TOG1p/3uiwMhAPPzIz/Ob4+8Or46qLkdEv4jUJXp6rdqou0v33Yj9P0Lp+nZtZ1FKHsOSCyl7HY+npNfpdQ9LqQVXHr6zmupfFet/1i6jwiyVkj3OPSbwTfg0lL9nfWfEF+l0zV13Cqwjg4yF/vOF+Guude1fxfo9JrOoWPpLdTtu4G0j7xGu6zqNTWTXprCx4IJxg+mJb6V0s6fSdO6rm5ta9521VcKQOeR37+k1SUIvYiSc3SPSra2ptZG7qcRZY57xm6y47ypy3J4izWxbBBE8yTp8ONwldUQeYGBLBq2r6xbVnPEPKsbxyXBRWDiMetwOBFhXLYxEpoX1yuqBc+8XmNvQoJXw3pDj6DhJOqNrVpVdcExg6fUvfGZWqtdACG4jG1ZYzCSl4R7MI45PaSH/hqU5AEILWeMZ+koLqfEYjnIh+O1LZYcROEvZSePtIvolYyMSfy1znGJSXX1seQJj3m5sKpxEsbu5FbRS/EDrOjTqPT7KaiA/6l+0866vptRp9WlFy4uVQcr7ec9J09LG9c5Ckyh8QVUaq9ryi5FYrX2UTv+PF+V4OWc41VHF6YvdViwkOvvK3Uun6fXAm1SlwHDjgzbjTmp/yxwe8ix2dWV0XjsfWdPV1GXk5Na+pdO1A251dXBPiAc+3/s774I1+q1avVboRRXWpJcv3PHYev9oPR9PVvU31o6/9WE7zpmjqt6aa9Pp6kzyCBiVKcpQaYLhSqIzjEaVVSGYSv43gs25ckHkmA2tDqWAnkTizswpXUhuqtXyEimyr9vAiK7vEB3LiLc7mi2TRTxSUupFi2xSwVYsvWr8cmUNVqSFwin0zK+60AsM8ydG0TOWlWumzuHijiSKlAAI5lBbL66t65wO8zxrX+bd39oNtcNIY4y/OvJS0Wt1C1fmDJz5y1+LB78GVfw23hTk59Zg0tm7D+fadEYxTs4s2aeTjX8FyvVVJ27+ccth1bbdvE1q6GxbNzNkY5xLtFbHO1ivGDiV9fsyeSLVMwGpSRjkHEtaXXU1HDGVDpdp5Pn3kipS20jgjgyZQUkVino7N/p2Nmkuv42qOPrOc1NjlSDzmb3UOKdBp9MOGYhm95qdWFrz/AGndix6QpDnPaVmn2ZyNwxEvV+eF25X1HaWNWEVSckfQTNHaozkggdjNUIHcy60oOCmAnHH3nofw6y1UAF927y9PpOG1SgOrV/q7k5/hOo6BYtKjeQScZ5gvIn4J6+tGk1p3AgOM4moqsrtJXaVHvN58UMgvosevI8McmaV6G1W00stbAZIJ7ieV8huGVr0d2FKWOzLQFOK7Ih7lUhmbJB5HrBRjW1ni1biOMZkuiWAHhDjtEo7ypoiWRqOyf/Bl2qpdAFr+8F2LVYoXJimVQoGO/nBrvao4TiPJiuNRJx/JSl/URi3uFfTsMMfWZstT5fl4kWct4zgMwOdvrMZg7FtmM+WZP1SaR1Y88bduhDuucg4BxyfWWQjWIXOCOAPYyiLBYoBQEn5sHnaY78Qa0VQxK54AM6HE8aM1rx9LIHgllZuQPWTXYBUQX2s3GYNdumakeNU29s/tQ0p09iEhnbJwqZ+b7yHlV0a/RJpP3/syhgeC3y+ZMM0ObQ1LErvUYxx3xAN2k09a2MXUjhvMg9vv5x2icNbuV1cIu84+nGR5S4Si2khPDk/ul1C7dSza4gkkKccxWpuBtKgNkegzEKf98Sx4zmSw8a8Hcwz3HrOy6LoqanUonHJz5+hms0+o32MvkD8xA95seqaZdLTZgDaylvvNPpSiVKDnB8xLFZd1OoH44ofTH1nSdD1Tm5atw3eXPlOMvsqXW6esD5ic/bE6/wCGNI19y2/pzgn2EGvY07Ow1nSH6pVp7Gt2BFwynjPMo3/CV6KbNLqFdu5T/wBm7u1H4bTVkknBx9eJY0ur8QcTOWDHN/kNZJxVJnEtUr6i1LDsdcLhuJmq6bdsR/EpTjHLS78a116fVpqi21bl9P2h/mc+uqrNrC5g2xcgHtOOUPpk23Zpus0dar/JcAtpwWspZc9wcgQLmFx/4dx/7DiUtZdgoyJ+VxkLyT7Ae8TdrNU5dlsO0HkMMMolWp00jKpQuMnRaZMhmPDkZA9oxNFY6BvEQZHrK12qtYVh1X5FwcDknMNLVKgjIBHaOmZqSt+ygjIp3MBuJ7jgd+8OnStW48TVK28AhlU5AwcZB+/74Oa1Hh2sgyMcjlYVaqitcwZhjhiPbGYSv9mWNL9WLFllbEFQ4TI2pyZDvrF2tWuR3ZV/UV5yR6dxHLam8FsrgkZAznn/ABCr1aiwfLlgTjHocwUULZpiKbLN4yoQIMknzBj+n6wbSypYpsTncOIdlaNXu4DYxkg+/l5GVPwj1nxPG7HkHnj3gkk7KjKUeJ8CTVE6tqWXDleMnuPWWtIMOGIx95Tr0n+4FjtuI/Qdx49DLml/MsetjtweDibOaZrHIvY7r6q/Rr24/wCNsEes84p6gyNsz3GfpPRNSDqumajT1kF2Hyg8TnLPhGlLaHa/dWqt+XjktkEc/vE1WSNdJlJGs0BOo6rTqLRtVV+UeuRPTfh9kXSodxGTzx3nI29EO2uxbEBStV2BMDPIB+n6eJu9NbdTTTQxFaoMZ9fWEsiocJKzuLk8XQGvPcjDenMNKV09OScegM03TOp+FQTc+4ZO0Z54A/nH3avx62NQRXClhg8gZIOfUcQU1qXxvyK6+W1mirUISys3BXtObao1AYP5m3a2VyCI3/VepaXxyuSP0hQMkY7/AOZVTU6qy26y9amxgoQMfvM58jTdik1XGHZUXNTFWawV4bbyCPp/WLNdDIwryeOctnHPftxC8XULWfBD1Oc42+hxn/EVQHFFewDxBk9uceh/vIhGm6Cc94q2TqKggNhYlyCd273lBjrEO2vaVHAJGZYuGpa5iagFICqfrn/77iLYahmJNTD6IT/WWo2ZKeo06evUjwyWBPYgc9+0ahZs1h2zznB4/jEaW90fcufExlFPkfeS6ucEg8/qAPaTSZMJSiuGeEM2FFO8H5Qe0LQ0hQzWFnf9RDY/dALEMjF+c/KAeMRh25NmSDjAx5xoWoxWw5TbxxyZDnbWGZS2eBzxArsZd24nHYZ5Mx7Du8tv84tSqpFgbbPm4C4Cn+ghquzaDuUkd+5AxKouG0BQAM8kd45tWdmdxKnsT3EprnCWhhqOAKnU7h6wUZktc7shOAfcSqlpVtoOQ0YzMqfKQMYyPWZyteBwim+hOWZ87DwcAE8CRZZsXavPpx2MTZqAzrwEO7PB7TDqa2DgHt29zGotroNJNjltcoSNuMENk5+8YlwRlUHaOdxlJeOF5A/jAssG47GPfOJWrFw2VOo5ZyrFhnaeOcnt/OA5r3uSQvHI9CR2lFr25O7j0EXZaLlCjduJ3ZEbTY+ezYtmqtGrBYhSC2efvMrvVS424JXkjyJ85QbVONO2CBaTwzDt9pNGqXehuGSvBI84ga/RbWwFXUn5gCO/bEM6mrPzuQ3mFTjMpNdWu7aCwPme+Jm+s8sQT58SkyWjNMQSeP0do06jAZgvtKtB+Ro2jmzB7cRXQ7GOK0rIAO08/eM07oKH3LuJPBMBwCQPLJkNxUMesSdIm+C3arToWvcDIihrtHUPzGtuz/1WFq60dFLqCfeMrqrAUhBkSk0XZXGu8T56KztJ27bBgj3jfmH6gffEJlUcgDPMtafndnmTKZpGLkVLHbZkeXbiA1j2bc9/rLiqu1hjzigoAOB5xJ30j3QqwqtJ+QknvI0aM4xgnmWUUE4Ih0AA8cfNBy4OK2kokHSXKGYVnbKqqC5PbHlN7RY5JUscHymr6hWiaj5VAnPg+Q5yo6vk/F+lWmVWqLJ3wM94S6cj51yPL6wn7geWe0tr5jyE6rOPU19wYhVABPnAOnduQRNjqcC4ADjErooNbEjnMZSVmua3wXanZlm/aheEB+q0Z85YZVJHA4gvWhYkqItWjV5IOk0f/9k="
})"_json.dump());
auto summary = coll->get_summary_json();
ASSERT_EQ(1, summary["num_documents"]);
collectionManager.dispose();
delete store;
store = new Store("/tmp/typesense_test/collection_vector_search");
collectionManager.init(store, 1.0, "auth_key", quit);
auto load_op = collectionManager.load(8, 1000);
if(!load_op.ok()) {
LOG(ERROR) << load_op.error();
}
ASSERT_TRUE(load_op.ok());
coll = collectionManager.get_collection("test").get();
ASSERT_EQ(1, coll->get_summary_json()["num_documents"]);
}
TEST_F(CollectionVectorTest, TestDistanceThresholdWithIP) {
auto schema_json = R"({
"name": "products",
"fields":[
{"name": "name","type": "string"},
{"name": "rank_score", "type": "float"},
{"name": "embedding","type": "float[]", "num_dim":5, "optinal":true, "vec_dist": "ip"}
],
"default_sorting_field": "rank_score"
})"_json;
auto coll_op = collectionManager.create_collection(schema_json);
ASSERT_TRUE(coll_op.ok());
auto coll = coll_op.get();
std::mt19937 rng;
rng.seed(47);
std::uniform_real_distribution<> distrib(-1,1);
std::uniform_int_distribution<>distrib2(0,100);
nlohmann::json doc;
for (auto i = 0; i < 5; ++i) {
std::vector<float> vector(5);
std::generate(vector.begin(), vector.end(), [&](){ return distrib(rng); });
doc["name"] = "document_" + std::to_string(i);
doc["rank_score"] = distrib2(rng);
doc["embedding"] = vector;
ASSERT_TRUE(coll->add(doc.dump()).ok());
}
//results ids exceeding distance_threshold will have scores tie and will be sorted on rank score
std::map<std::string, std::string> req_params = {
{"collection", "products"},
{"q", "document"},
{"query_by", "name"},
{"sort_by", "_text_match:desc,"
"_vector_query(embedding:([0.11731103425347378, -0.6694758317235057, -0.6211945774857595, -0.27966758971688255, -0.4683744007950299],"
"distance_threshold:1)):asc,"
"rank_score:desc"},
{"exclude_fields", "embedding"}
};
nlohmann::json embedded_params;
std::string json_res;
auto now_ts = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
auto search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
auto res = nlohmann::json::parse(json_res);
ASSERT_EQ(5, res["found"].get<size_t>());
ASSERT_EQ(93, res["hits"][0]["document"]["rank_score"].get<size_t>());
ASSERT_EQ(0.2189185470342636, res["hits"][0]["vector_distance"].get<float>());
ASSERT_EQ(51, res["hits"][1]["document"]["rank_score"].get<size_t>());
ASSERT_EQ(0.7371898889541626, res["hits"][1]["vector_distance"].get<float>());
ASSERT_EQ(94, res["hits"][2]["document"]["rank_score"].get<size_t>());
ASSERT_EQ(3.4028232635611926e+38, res["hits"][2]["vector_distance"].get<float>());
ASSERT_EQ(80, res["hits"][3]["document"]["rank_score"].get<size_t>());
ASSERT_EQ(3.4028232635611926e+38, res["hits"][3]["vector_distance"].get<float>());
ASSERT_EQ(18, res["hits"][4]["document"]["rank_score"].get<size_t>());
ASSERT_EQ(3.4028232635611926e+38, res["hits"][4]["vector_distance"].get<float>());
// with missing field name
req_params = {
{"collection", "products"},
{"q", "document"},
{"query_by", "name"},
{"sort_by", "_text_match:desc,"
"_vector_query(embeddingx:([0.11731103425347378, -0.6694758317235057, -0.6211945774857595, -0.27966758971688255, -0.4683744007950299],"
"distance_threshold:1)):asc,"
"rank_score:desc"},
{"exclude_fields", "embedding"}
};
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
ASSERT_FALSE(search_op.ok());
ASSERT_EQ("Malformed vector query string: could not find a field named `embeddingx`.", search_op.error());
//inner product distances should work when distance_threshold is not given
req_params = {
{"collection", "products"},
{"q", "document"},
{"query_by", "name"},
{"sort_by", "_text_match:desc,_vector_query(embedding:([-100,-100,-100,-100,-100])):asc,rank_score:desc"},
{"exclude_fields", "embedding"}
};
now_ts = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
search_op = collectionManager.do_search(req_params, embedded_params, json_res, now_ts);
res = nlohmann::json::parse(json_res);
ASSERT_EQ(5, res["found"].get<size_t>());
ASSERT_EQ("document_1", res["hits"][0]["document"]["name"]);
ASSERT_EQ(-45.23314666748047, res["hits"][0]["vector_distance"].get<float>());
ASSERT_EQ("document_2", res["hits"][1]["document"]["name"]);
ASSERT_EQ(-38.66290283203125, res["hits"][1]["vector_distance"].get<float>());
ASSERT_EQ("document_4", res["hits"][2]["document"]["name"]);
ASSERT_EQ(-36.0988655090332, res["hits"][2]["vector_distance"].get<float>());
ASSERT_EQ("document_3", res["hits"][3]["document"]["name"]);
ASSERT_EQ(9.637892723083496, res["hits"][3]["vector_distance"].get<float>());
ASSERT_EQ("document_0", res["hits"][4]["document"]["name"]);
ASSERT_EQ(288.0364685058594, res["hits"][4]["vector_distance"].get<float>());
}
TEST_F(CollectionVectorTest, HybridSearchAuxScoreTest) {
nlohmann::json schema = R"({
"name": "test",
"fields": [
{
"name": "name",
"type": "string"
},
{
"name": "embedding",
"type": "float[]",
"embed": {
"from": [
"name"
],
"model_config": {
"model_name": "ts/e5-small"
}
}
}
]
})"_json;
EmbedderManager::set_model_dir("/tmp/typesense_test/models");
auto collection_create_op = collectionManager.create_collection(schema);
ASSERT_TRUE(collection_create_op.ok());
auto coll = collection_create_op.get();
auto add_op = coll->add(R"({
"name": "Nike running shoes for men",
"id": "0"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
add_op = coll->add(R"({
"name": "Nike running sneakers",
"id": "1"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
add_op = coll->add(R"({
"name": "adidas shoes",
"id": "2"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
add_op = coll->add(R"({
"name": "puma",
"id": "3"
})"_json.dump());
ASSERT_TRUE(add_op.ok());
bool use_aux_score = false;
auto res = coll->search("nike running shoes", {"name", "embedding"}, "", {},
{}, {2}, 10, 1,FREQUENCY, {true},
Index::DROP_TOKENS_THRESHOLD, spp::sparse_hash_set<std::string>(),
{"embedding"}, 10, "",
30, 4, "", 40,
{}, {}, {}, 0,"<mark>",
"</mark>", {}, 1000,true,
false, true, "", false,
6000*1000, 4, 7, fallback, 4,
{off}, INT16_MAX, INT16_MAX,2,
2, false, "", true,
0, max_score, 100, 0, 0,
"exhaustive", 30000, 2, "",
{},{}, "right_to_left", true,
true, false, "", "", "",
"", true, true, false, 0, true,
true, DEFAULT_FILTER_BY_CANDIDATES, use_aux_score).get();
ASSERT_EQ(4, res["hits"].size());
ASSERT_FLOAT_EQ(0.09585630893707275, res["hits"][0]["vector_distance"].get<float>());
ASSERT_FLOAT_EQ(0.07914221286773682, res["hits"][1]["vector_distance"].get<float>());
ASSERT_FLOAT_EQ(0.15472877025604248, res["hits"][2]["vector_distance"].get<float>());
ASSERT_FLOAT_EQ(0.2496563196182251, res["hits"][3]["vector_distance"].get<float>());
ASSERT_EQ(1736172819517014137, res["hits"][0]["text_match"].get<std::size_t>());
ASSERT_EQ(0, res["hits"][1]["text_match"].get<std::size_t>());
ASSERT_EQ(0, res["hits"][2]["text_match"].get<std::size_t>());
ASSERT_EQ(0, res["hits"][3]["text_match"].get<std::size_t>());
use_aux_score = true;
res = coll->search("nike running shoes", {"name", "embedding"}, "", {},
{}, {2}, 10, 1,FREQUENCY, {true},
Index::DROP_TOKENS_THRESHOLD, spp::sparse_hash_set<std::string>(),
{"embedding"}, 10, "",
30, 4, "", 40,
{}, {}, {}, 0,"<mark>",
"</mark>", {}, 1000,true,
false, true, "", false,
6000*1000, 4, 7, fallback, 4,
{off}, INT16_MAX, INT16_MAX,2,
2, false, "", true,
0, max_score, 100, 0, 0,
"exhaustive", 30000, 2, "",
{},{}, "right_to_left", true,
true, false, "", "", "",
"", true, true, false, 0, true,
true, DEFAULT_FILTER_BY_CANDIDATES, use_aux_score).get();
ASSERT_EQ(4, res["hits"].size());
ASSERT_FLOAT_EQ(0.09585630893707275, res["hits"][0]["vector_distance"].get<float>());
ASSERT_FLOAT_EQ(0.07914221286773682, res["hits"][1]["vector_distance"].get<float>());
ASSERT_FLOAT_EQ(0.15472877025604248, res["hits"][2]["vector_distance"].get<float>());
ASSERT_FLOAT_EQ(0.2496563196182251, res["hits"][3]["vector_distance"].get<float>());
ASSERT_EQ(1736172819517014137, res["hits"][0]["text_match"].get<std::size_t>());
ASSERT_EQ(2211897868288, res["hits"][1]["text_match"].get<std::size_t>());
ASSERT_EQ(1108091338752, res["hits"][2]["text_match"].get<std::size_t>());
ASSERT_EQ(0, res["hits"][3]["text_match"].get<std::size_t>()); //document with id:3 won't have any text_match
}
TEST_F(CollectionVectorTest, EmbedFieldMustBeFloatArray) {
Collection *coll1;
std::vector<field> fields = {
field("title", field_types::STRING, false),
field("embedding", field_types::STRING, false) // intentionally wrong type
};
nlohmann::json field_json;
field_json["name"] = "embedding";
field_json["type"] = "string"; // wrong type
field_json["embed"] = nlohmann::json::object();
field_json["embed"]["from"] = {"title"};
field_json["embed"]["model_config"] = nlohmann::json::object();
field_json["embed"]["model_config"]["model_name"] = "ts/e5-small";
std::vector<field> parsed_fields;
std::string fallback_field_type;
auto arr = nlohmann::json::array();
arr.push_back(field_json);
auto field_op = field::json_fields_to_fields(false, arr, fallback_field_type, parsed_fields);
ASSERT_FALSE(field_op.ok());
ASSERT_EQ("Fields with the `embed` parameter can only be of type `float[]`.", field_op.error());
// Try with int[] type as well
field_json["type"] = "int[]";
arr.clear();
arr.push_back(field_json);
parsed_fields.clear();
field_op = field::json_fields_to_fields(false, arr, fallback_field_type, parsed_fields);
ASSERT_FALSE(field_op.ok());
ASSERT_EQ("Fields with the `embed` parameter can only be of type `float[]`.", field_op.error());
}
| 370,078
|
C++
|
.cpp
| 4,228
| 74.035005
| 81,012
| 0.674929
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,747
|
system_metrics_test.cpp
|
typesense_typesense/test/system_metrics_test.cpp
|
#include <gtest/gtest.h>
#include "system_metrics.h"
TEST(SystemMetricsTest, ParsingNetworkStats) {
std::string proc_net_dev_path = std::string(ROOT_DIR)+"test/resources/proc_net_dev.txt";
uint64_t received_bytes, sent_bytes;
SystemMetrics::linux_get_network_data(proc_net_dev_path, received_bytes, sent_bytes);
ASSERT_EQ(324278716, received_bytes);
ASSERT_EQ(93933882, sent_bytes);
}
| 405
|
C++
|
.cpp
| 9
| 41.777778
| 92
| 0.747475
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,748
|
raft_server_test.cpp
|
typesense_typesense/test/raft_server_test.cpp
|
#include <gtest/gtest.h>
#include <string>
#include "raft_server.h"
TEST(RaftServerTest, ResolveNodesConfigWithHostNames) {
ASSERT_EQ("127.0.0.1:8107:8108,127.0.0.1:7107:7108,127.0.0.1:6107:6108",
ReplicationState::resolve_node_hosts("127.0.0.1:8107:8108,127.0.0.1:7107:7108,127.0.0.1:6107:6108"));
ASSERT_EQ("127.0.0.1:8107:8108,127.0.0.1:7107:7108,127.0.0.1:6107:6108",
ReplicationState::resolve_node_hosts("localhost:8107:8108,localhost:7107:7108,localhost:6107:6108"));
ASSERT_EQ("localhost:8107:8108localhost:7107:7108,127.0.0.1:6107:6108",
ReplicationState::resolve_node_hosts("localhost:8107:8108localhost:7107:7108,localhost:6107:6108"));
// hostname must be less than 64 chars
ASSERT_EQ("",
ReplicationState::resolve_node_hosts("typesense-node-2.typesense-service.typesense-"
"namespace.svc.cluster.local:6107:6108"));
}
| 960
|
C++
|
.cpp
| 15
| 54.333333
| 115
| 0.673036
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,749
|
string_utils_test.cpp
|
typesense_typesense/test/string_utils_test.cpp
|
#include <gtest/gtest.h>
#include "string_utils.h"
#include <iconv.h>
#include <unicode/translit.h>
#include <json.hpp>
#include <join.h>
TEST(StringUtilsTest, ShouldJoinString) {
std::vector<std::string> parts = {"foo", "bar", "baz", "bazinga"};
const std::string & joined_str1 = StringUtils::join(parts, "/");
ASSERT_STREQ("foo/bar/baz/bazinga", joined_str1.c_str());
const std::string & joined_str2 = StringUtils::join(parts, "/", 2);
ASSERT_STREQ("baz/bazinga", joined_str2.c_str());
const std::string & joined_str3 = StringUtils::join({}, "/");
ASSERT_STREQ("", joined_str3.c_str());
}
TEST(StringUtilsTest, HMAC) {
std::string digest1 = StringUtils::hmac("KeyVal", "{\"filter_by\": \"user_id:1080\"}");
ASSERT_STREQ("IvjqWNZ5M5ElcvbMoXj45BxkQrZG4ZKEaNQoRioCx2s=", digest1.c_str());
}
TEST(StringUtilsTest, UInt32Validation) {
std::string big_num = "99999999999999999999999999999999";
ASSERT_FALSE(StringUtils::is_uint32_t(big_num));
}
TEST(StringUtilsTest, ShouldSplitString) {
nlohmann::json obj1;
obj1["s"] = "Line one.\nLine two.\n";
nlohmann::json obj2;
obj2["s"] = "Line 1.\nLine 2.\n";
std::string text;
text = obj1.dump();
text += "\n" + obj2.dump();
std::vector<std::string> lines;
StringUtils::split(text, lines, "\n");
ASSERT_STREQ("{\"s\":\"Line one.\\nLine two.\\n\"}", lines[0].c_str());
ASSERT_STREQ("{\"s\":\"Line 1.\\nLine 2.\\n\"}", lines[1].c_str());
// empty string should produce empty list
std::vector<std::string> lines_empty;
StringUtils::split("", lines_empty, "\n");
ASSERT_TRUE(lines_empty.empty());
// restrict list of max_values
std::vector<std::string> lines_limited;
size_t end_index = StringUtils::split("a b c d e f", lines_limited, " ", false, true, 0, 3);
ASSERT_EQ(3, lines_limited.size());
ASSERT_EQ(6, end_index);
// start from an arbitrary position in string
std::vector<std::string> lines_custom_start;
end_index = StringUtils::split("a b c d e f", lines_custom_start, " ", false, true, 2, 100);
ASSERT_EQ(5, lines_custom_start.size());
ASSERT_EQ(11, end_index);
std::string comma_and_space = "foo, bar";
std::vector<std::string> comma_space_parts;
StringUtils::split(comma_and_space, comma_space_parts, ",");
ASSERT_STREQ("foo", comma_space_parts[0].c_str());
ASSERT_STREQ("bar", comma_space_parts[1].c_str());
// preserve trailing space
std::string str_trailing_space = "foo\nbar ";
std::vector<std::string> trailing_space_parts;
StringUtils::split(str_trailing_space, trailing_space_parts, "\n", false, false);
ASSERT_EQ(2, trailing_space_parts.size());
ASSERT_EQ("foo", trailing_space_parts[0]);
ASSERT_EQ("bar ", trailing_space_parts[1]);
}
TEST(StringUtilsTest, ShouldTrimString) {
std::string str = " a ";
StringUtils::trim(str);
ASSERT_STREQ("a", str.c_str());
str = "abc";
StringUtils::trim(str);
ASSERT_STREQ("abc", str.c_str());
str = " abc def";
StringUtils::trim(str);
ASSERT_STREQ("abc def", str.c_str());
str = " abc def ";
StringUtils::trim(str);
ASSERT_STREQ("abc def", str.c_str());
str = " ";
StringUtils::trim(str);
ASSERT_STREQ("", str.c_str());
}
TEST(StringUtilsTest, ShouldComputeSHA256) {
ASSERT_STREQ("c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2",
StringUtils::hash_sha256("foobar").c_str());
ASSERT_STREQ("d8705968091d40b60436675240712c584c187eef091514d4092483dc342ca3de",
StringUtils::hash_sha256("some random key").c_str());
ASSERT_STREQ("6613f67d3d78d48e2678faf55c33fabc5895c538ce70ea10218ce9b7eccbf394",
StringUtils::hash_sha256("791a27668b3e01fc6ab3482b6e6a36255154df3ecd7dcec").c_str());
}
TEST(StringUtilsTest, ShouldCheckFloat) {
ASSERT_TRUE(StringUtils::is_float("0.23"));
ASSERT_TRUE(StringUtils::is_float("9.872019290924072e-07"));
ASSERT_FALSE(StringUtils::is_float("4.2f"));
ASSERT_FALSE(StringUtils::is_float("-5.3f"));
ASSERT_FALSE(StringUtils::is_float("+6.2f"));
ASSERT_FALSE(StringUtils::is_float("0.x87"));
ASSERT_FALSE(StringUtils::is_float("1.0.0"));
ASSERT_FALSE(StringUtils::is_float("2f"));
ASSERT_FALSE(StringUtils::is_float("2.0f1"));
}
TEST(StringUtilsTest, ShouldParseQueryString) {
std::map<std::string, std::string> qmap;
std::string qs = "?q=bar&filter_by=points: >100 && points: <200";
StringUtils::parse_query_string(qs, qmap);
ASSERT_EQ(2, qmap.size());
ASSERT_EQ("bar", qmap["q"]);
ASSERT_EQ("points: >100 && points: <200", qmap["filter_by"]);
qs = "?q=bar&filter_by=points%3A%20%3E100%20%26%26%20points%3A%20%3C200";
qmap.clear();
StringUtils::parse_query_string(qs, qmap);
ASSERT_EQ(2, qmap.size());
ASSERT_EQ("bar", qmap["q"]);
ASSERT_EQ("points: >100 && points: <200", qmap["filter_by"]);
qs = "?q=bar&filter_by=points%3A%20%3E100%20%26%26%20points%3A%20%3C200&";
qmap.clear();
StringUtils::parse_query_string(qs, qmap);
ASSERT_EQ(2, qmap.size());
ASSERT_EQ("bar", qmap["q"]);
ASSERT_EQ("points: >100 && points: <200", qmap["filter_by"]);
qs = "q=bar&filter_by=baz&&";
qmap.clear();
StringUtils::parse_query_string(qs, qmap);
ASSERT_EQ(2, qmap.size());
ASSERT_EQ("bar", qmap["q"]);
ASSERT_EQ("baz&", qmap["filter_by"]);
qs = "q=bar&filter_by=";
qmap.clear();
StringUtils::parse_query_string(qs, qmap);
ASSERT_EQ(2, qmap.size());
ASSERT_EQ("bar", qmap["q"]);
ASSERT_EQ("", qmap["filter_by"]);
qs = "q=bread && breakfast&filter_by=";
qmap.clear();
StringUtils::parse_query_string(qs, qmap);
ASSERT_EQ(2, qmap.size());
ASSERT_EQ("bread && breakfast", qmap["q"]);
ASSERT_EQ("", qmap["filter_by"]);
qs = "q=bread & breakfast&filter_by=";
qmap.clear();
StringUtils::parse_query_string(qs, qmap);
ASSERT_EQ(3, qmap.size());
ASSERT_EQ("bread ", qmap["q"]);
ASSERT_EQ("", qmap[" breakfast"]);
ASSERT_EQ("", qmap["filter_by"]);
qs = "q=bar&filter_by=&";
qmap.clear();
StringUtils::parse_query_string(qs, qmap);
ASSERT_EQ(2, qmap.size());
ASSERT_EQ("bar", qmap["q"]);
ASSERT_EQ("", qmap["filter_by"]);
qs = "q=bar&filter_by=points :> 100&enable_typos";
qmap.clear();
StringUtils::parse_query_string(qs, qmap);
ASSERT_EQ(3, qmap.size());
ASSERT_EQ("bar", qmap["q"]);
ASSERT_EQ("points :> 100", qmap["filter_by"]);
ASSERT_EQ("", qmap["enable_typos"]);
qs = "foo=bar&baz=&bazinga=true";
qmap.clear();
StringUtils::parse_query_string(qs, qmap);
ASSERT_EQ(3, qmap.size());
ASSERT_EQ("bar", qmap["foo"]);
ASSERT_EQ("", qmap["baz"]);
ASSERT_EQ("true", qmap["bazinga"]);
qs = "foo=bar&bazinga=true&foo=buzz";
qmap.clear();
StringUtils::parse_query_string(qs, qmap);
ASSERT_EQ(2, qmap.size());
ASSERT_EQ("buzz", qmap["foo"]);
ASSERT_EQ("true", qmap["bazinga"]);
qs = "filter_by=points:>100&bazinga=true&filter_by=points:<=200";
qmap.clear();
StringUtils::parse_query_string(qs, qmap);
ASSERT_EQ(2, qmap.size());
ASSERT_EQ("points:>100&&points:<=200", qmap["filter_by"]);
ASSERT_EQ("true", qmap["bazinga"]);
qs = "filter_by=points:>100 && brand:= nike&bazinga=true&filter_by=points:<=200";
qmap.clear();
StringUtils::parse_query_string(qs, qmap);
ASSERT_EQ(2, qmap.size());
ASSERT_EQ("points:>100 && brand:= nike&&points:<=200", qmap["filter_by"]);
ASSERT_EQ("true", qmap["bazinga"]);
qs = "foo";
qmap.clear();
StringUtils::parse_query_string(qs, qmap);
ASSERT_EQ(1, qmap.size());
ASSERT_EQ("", qmap["foo"]);
qs = "?foo=";
qmap.clear();
StringUtils::parse_query_string(qs, qmap);
ASSERT_EQ(1, qmap.size());
ASSERT_EQ("", qmap["foo"]);
qs = "?foo";
qmap.clear();
StringUtils::parse_query_string(qs, qmap);
ASSERT_EQ(1, qmap.size());
ASSERT_EQ("", qmap["foo"]);
qs = "?";
qmap.clear();
StringUtils::parse_query_string(qs, qmap);
ASSERT_EQ(0, qmap.size());
qs = "";
qmap.clear();
StringUtils::parse_query_string(qs, qmap);
ASSERT_EQ(0, qmap.size());
qs = "&";
qmap.clear();
StringUtils::parse_query_string(qs, qmap);
ASSERT_EQ(0, qmap.size());
qs = "&&";
qmap.clear();
StringUtils::parse_query_string(qs, qmap);
ASSERT_EQ(0, qmap.size());
}
TEST(StringUtilsTest, ShouldParseStringifiedList) {
std::string str = "John Galt, Random Jack";
std::vector<std::string> strs;
StringUtils::split_to_values(str, strs);
ASSERT_EQ(2, strs.size());
ASSERT_EQ("John Galt", strs[0]);
ASSERT_EQ("Random Jack", strs[1]);
strs.clear();
str = "`John Galt`, `Random, Jack`";
StringUtils::split_to_values(str, strs);
ASSERT_EQ(2, strs.size());
ASSERT_EQ("John Galt", strs[0]);
ASSERT_EQ("Random, Jack", strs[1]);
strs.clear();
str = "`John Galt, `Random, Jack`";
StringUtils::split_to_values(str, strs);
ASSERT_EQ(2, strs.size());
ASSERT_EQ("John Galt, Random", strs[0]);
ASSERT_EQ("Jack", strs[1]);
strs.clear();
str = "`Traveller's \\`delight\\`!`, Not wrapped, Last word";
StringUtils::split_to_values(str, strs);
ASSERT_EQ(3, strs.size());
ASSERT_EQ("Traveller's \\`delight\\`!", strs[0]);
ASSERT_EQ("Not wrapped", strs[1]);
ASSERT_EQ("Last word", strs[2]);
strs.clear();
str = "`John Galt`";
StringUtils::split_to_values(str, strs);
ASSERT_EQ(1, strs.size());
ASSERT_EQ("John Galt", strs[0]);
}
TEST(StringUtilsTest, ShouldTrimCurlySpaces) {
ASSERT_EQ("foo {bar}", StringUtils::trim_curly_spaces("foo { bar }"));
ASSERT_EQ("foo {bar}", StringUtils::trim_curly_spaces("foo { bar }"));
ASSERT_EQ("", StringUtils::trim_curly_spaces(""));
ASSERT_EQ("{}", StringUtils::trim_curly_spaces("{ }"));
ASSERT_EQ("foo {bar} {baz}", StringUtils::trim_curly_spaces("foo { bar } { baz}"));
}
TEST(StringUtilsTest, ContainsWord) {
ASSERT_TRUE(StringUtils::contains_word("foo bar", "foo"));
ASSERT_TRUE(StringUtils::contains_word("foo bar", "bar"));
ASSERT_TRUE(StringUtils::contains_word("foo bar baz", "bar"));
ASSERT_TRUE(StringUtils::contains_word("foo bar baz", "foo bar"));
ASSERT_TRUE(StringUtils::contains_word("foo bar baz", "bar baz"));
ASSERT_FALSE(StringUtils::contains_word("foobar", "bar"));
ASSERT_FALSE(StringUtils::contains_word("foobar", "foo"));
ASSERT_FALSE(StringUtils::contains_word("foobar baz", "bar"));
ASSERT_FALSE(StringUtils::contains_word("foobar baz", "bar baz"));
ASSERT_FALSE(StringUtils::contains_word("baz foobar", "foo"));
}
TEST(StringUtilsTest, ShouldSplitRangeFacet){
std::string range_facets_string = "score(fail:[0, 40], pass:[40, 100]), grade(A:[80,100], B:[60, 80], C:[40, 60])";
std::vector<std::string> range_facets;
StringUtils::split_facet(range_facets_string, range_facets);
ASSERT_EQ("score(fail:[0, 40], pass:[40, 100])", range_facets[0]);
ASSERT_EQ("grade(A:[80,100], B:[60, 80], C:[40, 60])", range_facets[1]);
std::string facets_string = "score, grade";
std::vector<std::string> facets;
StringUtils::split_facet(facets_string, facets);
ASSERT_EQ("score", facets[0]);
ASSERT_EQ("grade", facets[1]);
std::string mixed_facets_string = "score, grade(A:[80,100], B:[60, 80], C:[40, 60]), rank";
std::vector<std::string> mixed_facets;
StringUtils::split_facet(mixed_facets_string, mixed_facets);
ASSERT_EQ("score", mixed_facets[0]);
ASSERT_EQ("grade(A:[80,100], B:[60, 80], C:[40, 60])", mixed_facets[1]);
ASSERT_EQ("rank", mixed_facets[2]);
// empty string should produce empty list
std::vector<std::string> lines_empty;
StringUtils::split_facet("", lines_empty);
ASSERT_TRUE(lines_empty.empty());
}
void tokenizeTestHelper(const std::string& filter_query, const std::vector<std::string>& tokenList) {
std::queue<std::string> tokenizeOutput;
auto tokenize_op = StringUtils::tokenize_filter_query(filter_query, tokenizeOutput);
ASSERT_TRUE(tokenize_op.ok());
for (auto const& token: tokenList) {
ASSERT_EQ(token, tokenizeOutput.front());
tokenizeOutput.pop();
}
ASSERT_TRUE(tokenizeOutput.empty());
}
TEST(StringUtilsTest, TokenizeFilterQuery) {
std::string filter_query;
std::vector<std::string> tokenList;
filter_query = "name: Steve Smith";
tokenList = {"name: Steve Smith"};
tokenizeTestHelper(filter_query, tokenList);
filter_query = "name: `Toccata & Fugue`";
tokenList = {"name: `Toccata & Fugue`"};
tokenizeTestHelper(filter_query, tokenList);
filter_query = "name: [Steve Smith, `Jack & Jill`]";
tokenList = {"name: [Steve Smith, `Jack & Jill`]"};
tokenizeTestHelper(filter_query, tokenList);
filter_query = "age:[10..100]";
tokenList = {"age:[10..100]"};
tokenizeTestHelper(filter_query, tokenList);
filter_query = "age:>20 && category:= [`Running Shoes, Men`, Sneaker]";
tokenList = {"age:>20", "&&", "category:= [`Running Shoes, Men`, Sneaker]"};
tokenizeTestHelper(filter_query, tokenList);
filter_query = "location:(48.906, 2.343, 5 mi)";
tokenList = {"location:(48.906, 2.343, 5 mi)"};
tokenizeTestHelper(filter_query, tokenList);
filter_query = "((age: <5 || age: >10) && category:= [shoes]) || is_curated: true";
tokenList = {"(", "(", "age: <5", "||", "age: >10", ")", "&&", "category:= [shoes]", ")", "||", "is_curated: true"};
tokenizeTestHelper(filter_query, tokenList);
filter_query = "((age:<5||age:>10)&&location:(48.906,2.343,5mi))||tags:AT&T";
tokenList = {"(", "(", "age:<5", "||", "age:>10", ")", "&&", "location:(48.906,2.343,5mi)", ")", "||", "tags:AT&T"};
tokenizeTestHelper(filter_query, tokenList);
filter_query = "((age: <5 || age: >10) && category:= [shoes]) &&"
" $Customers(customer_id:=customer_a && (product_price:>100 && product_price:<200))";
tokenList = {"(", "(", "age: <5", "||", "age: >10", ")", "&&", "category:= [shoes]", ")", "&&",
"$Customers(customer_id:=customer_a && (product_price:>100 && product_price:<200))"};
tokenizeTestHelper(filter_query, tokenList);
}
void splitIncludeExcludeTestHelper(const std::string& include_exclude_fields, const std::vector<std::string>& expected) {
std::vector<std::string> output;
auto tokenize_op = StringUtils::split_include_exclude_fields(include_exclude_fields, output);
ASSERT_TRUE(tokenize_op.ok());
ASSERT_EQ(expected.size(), output.size());
for (auto i = 0; i < output.size(); i++) {
ASSERT_EQ(expected[i], output[i]);
}
}
TEST(StringUtilsTest, SplitIncludeExcludeFields) {
std::string include_fields;
std::vector<std::string> tokens;
include_fields = " id, title , count ";
tokens = {"id", "title", "count"};
splitIncludeExcludeTestHelper(include_fields, tokens);
include_fields = "id, $Collection(title, pref*),count";
tokens = {"id", "$Collection(title, pref*)", "count"};
splitIncludeExcludeTestHelper(include_fields, tokens);
include_fields = "id, $Collection(title, pref*), count, ";
tokens = {"id", "$Collection(title, pref*)", "count"};
splitIncludeExcludeTestHelper(include_fields, tokens);
include_fields = "$Collection(title, pref*) as coll";
tokens = {"$Collection(title, pref*) as coll"};
splitIncludeExcludeTestHelper(include_fields, tokens);
include_fields = "id, $Collection(title, pref*) as coll , count, ";
tokens = {"id", "$Collection(title, pref*) as coll", "count"};
splitIncludeExcludeTestHelper(include_fields, tokens);
include_fields = "$Collection(title, pref*: merge) as coll";
tokens = {"$Collection(title, pref*: merge) as coll"};
splitIncludeExcludeTestHelper(include_fields, tokens);
include_fields = "$product_variants(id,$inventory(qty,sku,$retailer(id,title: merge) as retailer_info)) as variants";
tokens = {"$product_variants(id,$inventory(qty,sku,$retailer(id,title: merge) as retailer_info)) as variants"};
splitIncludeExcludeTestHelper(include_fields, tokens);
std::string exclude_fields = " id, title, $Collection(title), count,";
tokens = {"id", "title", "$Collection(title)", "count"};
splitIncludeExcludeTestHelper(exclude_fields, tokens);
exclude_fields = " id, title , count, $Collection(title), $product_variants(id,$inventory(qty,sku,$retailer(id,title)))";
tokens = {"id", "title", "count", "$Collection(title)", "$product_variants(id,$inventory(qty,sku,$retailer(id,title)))"};
splitIncludeExcludeTestHelper(exclude_fields, tokens);
}
TEST(StringUtilsTest, SplitReferenceIncludeExcludeFields) {
std::string include_fields = "$retailer(id,title,strategy:merge) as retailer_info, strategy:merge) as variants, foo", token;
size_t index = 0;
auto tokenize_op = Join::split_reference_include_exclude_fields(include_fields, index, token);
ASSERT_TRUE(tokenize_op.ok());
ASSERT_EQ("$retailer(id,title,strategy:merge) as retailer_info", token);
ASSERT_EQ(", strategy:merge) as variants, foo", include_fields.substr(index));
include_fields = "$inventory(qty,sku,$retailer(id,title, strategy : merge) as retailer_info) as inventory) as variants, foo";
index = 0;
tokenize_op = Join::split_reference_include_exclude_fields(include_fields, index, token);
ASSERT_TRUE(tokenize_op.ok());
ASSERT_EQ("$inventory(qty,sku,$retailer(id,title, strategy : merge) as retailer_info) as inventory", token);
ASSERT_EQ(") as variants, foo", include_fields.substr(index));
std::string exclude_fields = "$Collection(title), $product_variants(id,$inventory(qty,sku,$retailer(id,title)))";
index = 0;
tokenize_op = Join::split_reference_include_exclude_fields(exclude_fields, index, token);
ASSERT_TRUE(tokenize_op.ok());
ASSERT_EQ("$Collection(title)", token);
ASSERT_EQ(", $product_variants(id,$inventory(qty,sku,$retailer(id,title)))", exclude_fields.substr(index));
exclude_fields = "$inventory(qty,sku,$retailer(id,title)), foo)";
index = 0;
tokenize_op = Join::split_reference_include_exclude_fields(exclude_fields, index, token);
ASSERT_TRUE(tokenize_op.ok());
ASSERT_EQ("$inventory(qty,sku,$retailer(id,title))", token);
ASSERT_EQ(", foo)", exclude_fields.substr(index));
}
| 18,516
|
C++
|
.cpp
| 394
| 41.967005
| 131
| 0.647441
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,750
|
personalization_model_test.cpp
|
typesense_typesense/test/personalization_model_test.cpp
|
#include <gtest/gtest.h>
#include <string>
#include <filesystem>
#include "personalization_model.h"
#include "collection_manager.h"
class PersonalizationModelTest : public ::testing::Test {
protected:
std::string temp_dir;
Store *store;
CollectionManager& collectionManager = CollectionManager::get_instance();
std::atomic<bool> quit = false;
void SetUp() override {
temp_dir = (std::filesystem::temp_directory_path() / "personalization_model_test").string();
system(("rm -rf " + temp_dir + " && mkdir -p " + temp_dir).c_str());
std::string test_dir = "/tmp/typesense_test/models";
system(("rm -rf " + test_dir + " && mkdir -p " + test_dir).c_str());
EmbedderManager::set_model_dir(test_dir);
// Create test collection
std::string state_dir_path = "/tmp/typesense_test/personalization_model_test";
Config::get_instance().set_data_dir(state_dir_path);
LOG(INFO) << "Truncating and creating: " << state_dir_path;
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
nlohmann::json collection_schema = R"({
"name": "companies",
"fields": [
{"name": "name", "type": "string"}
]
})"_json;
store = new Store(state_dir_path);
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.create_collection(collection_schema);
}
void TearDown() override {
std::string test_dir = "/tmp/typesense_test";
system(("rm -rf " + test_dir).c_str());
collectionManager.dispose();
delete store;
}
std::string get_onnx_model_archive() {
std::string content = "This is a sample ONNX model content";
std::string filename = (temp_dir + "/model.onnx");
std::ofstream file(filename);
file << content;
file.close();
std::string archive_name = (temp_dir + "/model.tar.gz");
std::string command = "tar -czf " + archive_name + " -C " + temp_dir + " model.onnx";
system(command.c_str());
std::ifstream archive_file(archive_name, std::ios::binary);
std::string archive_content((std::istreambuf_iterator<char>(archive_file)), std::istreambuf_iterator<char>());
archive_file.close();
std::filesystem::remove(filename);
std::filesystem::remove(archive_name);
return archive_content;
}
std::string get_invalid_onnx_model_archive() {
std::string content = "This is an invalid ONNX model content";
std::string filename = (temp_dir + "/model.txt");
std::ofstream file(filename);
file << content;
file.close();
std::string archive_name = (temp_dir + "/model.tar.gz");
std::string command = "tar -czf " + archive_name + " -C " + temp_dir + " model.txt";
system(command.c_str());
std::ifstream archive_file(archive_name, std::ios::binary);
std::string archive_content((std::istreambuf_iterator<char>(archive_file)), std::istreambuf_iterator<char>());
return archive_content;
}
};
TEST_F(PersonalizationModelTest, ValidateModelBasic) {
nlohmann::json valid_model = {
{"id", "test-model"},
{"name", "ts/tyrec-1"},
{"collection", "companies"},
{"type", "recommendation"}
};
auto result = PersonalizationModel::validate_model(valid_model);
ASSERT_EQ(result.error(), "");
ASSERT_TRUE(result.ok());
}
TEST_F(PersonalizationModelTest, ValidateModelMissingFields) {
nlohmann::json invalid_model = {
{"name", "ts/tyrec-1"},
{"collection", "companies"}
};
auto result = PersonalizationModel::validate_model(invalid_model);
ASSERT_FALSE(result.ok());
ASSERT_EQ(result.code(), 400);
ASSERT_EQ(result.error(), "Missing or invalid 'id' field.");
}
TEST_F(PersonalizationModelTest, ValidateModelInvalidName) {
nlohmann::json invalid_model = {
{"id", "test-model"},
{"name", "invalid/tyrec-1"},
{"collection", "companies"},
{"type", "recommendation"}
};
auto result = PersonalizationModel::validate_model(invalid_model);
ASSERT_FALSE(result.ok());
ASSERT_EQ(result.code(), 400);
ASSERT_EQ(result.error(), "Model namespace must be 'ts'.");
}
TEST_F(PersonalizationModelTest, ValidateModelInvalidType) {
nlohmann::json invalid_model = {
{"id", "test-model"},
{"name", "ts/tyrec-1"},
{"collection", "companies"},
{"type", "invalid"}
};
auto result = PersonalizationModel::validate_model(invalid_model);
ASSERT_FALSE(result.ok());
ASSERT_EQ(result.code(), 400);
ASSERT_EQ(result.error(), "Invalid type. Must be either 'recommendation' or 'search'.");
}
TEST_F(PersonalizationModelTest, ValidateModelInvalidModelName) {
nlohmann::json invalid_model = {
{"id", "test-model"},
{"name", "ts/invalid-model"},
{"collection", "companies"},
{"type", "recommendation"}
};
auto result = PersonalizationModel::validate_model(invalid_model);
ASSERT_FALSE(result.ok());
ASSERT_EQ(result.code(), 400);
ASSERT_EQ(result.error(), "Invalid model name for type. Use 'tyrec-1' for recommendation and 'tyrec-2' for search.");
}
TEST_F(PersonalizationModelTest, GetModelSubdir) {
std::string model_id = "test-model";
std::string expected_path = EmbedderManager::get_model_dir() + "/per_" + model_id;
std::string result = PersonalizationModel::get_model_subdir(model_id);
ASSERT_EQ(result, expected_path);
ASSERT_TRUE(std::filesystem::exists(result));
}
TEST_F(PersonalizationModelTest, DeleteModel) {
std::string model_id = "test-model";
std::string model_path = PersonalizationModel::get_model_subdir(model_id);
// Create a dummy file in the model directory
std::ofstream test_file(model_path + "/test.txt");
test_file.close();
auto result = PersonalizationModel::delete_model(model_id);
ASSERT_TRUE(result.ok());
ASSERT_FALSE(std::filesystem::exists(model_path));
}
TEST_F(PersonalizationModelTest, CreateModel) {
std::string model_id = "test-model";
std::string model_path = PersonalizationModel::get_model_subdir(model_id);
std::string model_data = get_onnx_model_archive();
nlohmann::json model_json = {
{"id", model_id},
{"name", "ts/tyrec-1"},
{"collection", "companies"},
{"type", "recommendation"}
};
auto result = PersonalizationModel::create_model(model_id, model_json, model_data);
ASSERT_TRUE(result.ok());
ASSERT_TRUE(std::filesystem::exists(model_path));
}
TEST_F(PersonalizationModelTest, CreateModelFailsWithInvalidArchive) {
std::string model_id = "test-model";
std::string model_path = PersonalizationModel::get_model_subdir(model_id);
std::string invalid_model_data = get_invalid_onnx_model_archive();
nlohmann::json model_json = {
{"id", model_id},
{"name", "ts/tyrec-1"},
{"collection", "companies"},
{"type", "recommendation"}
};
auto result = PersonalizationModel::create_model(model_id, model_json, invalid_model_data);
ASSERT_FALSE(result.ok());
ASSERT_EQ(result.code(), 400);
ASSERT_EQ(result.error(), "Missing required model.onnx file in archive");
}
TEST_F(PersonalizationModelTest, UpdateModel) {
std::string model_id = "test-model";
std::string model_path = PersonalizationModel::get_model_subdir(model_id);
std::string model_data = get_onnx_model_archive();
nlohmann::json model_json = {
{"id", model_id},
{"name", "ts/tyrec-1"},
{"collection", "companies"},
{"type", "recommendation"}
};
std::string updated_model_data = get_onnx_model_archive();
auto update_result = PersonalizationModel::update_model(model_id, model_json, updated_model_data);
ASSERT_TRUE(update_result.ok());
ASSERT_TRUE(std::filesystem::exists(model_path));
}
| 8,034
|
C++
|
.cpp
| 186
| 36.715054
| 121
| 0.643783
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,751
|
query_analytics.cpp
|
typesense_typesense/src/query_analytics.cpp
|
#include "query_analytics.h"
#include "logger.h"
#include <algorithm>
#include <mutex>
#include "string_utils.h"
QueryAnalytics::QueryAnalytics(size_t k, bool enable_auto_aggregation)
: k(k), max_size(k * 2), auto_aggregation_enabled(enable_auto_aggregation) {
}
void QueryAnalytics::add(const std::string& key, const std::string& expanded_key,
const bool live_query, const std::string& user_id, uint64_t now_ts_us) {
if(live_query) {
// live query must be aggregated first to their final form as they could be prefix queries
if(now_ts_us == 0) {
now_ts_us = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
}
if(!umutex.try_lock()) {
// instead of locking we just skip incrementing keys during consolidation time
return ;
}
auto& queries = user_prefix_queries[user_id];
if(queries.size() < 100) {
// only live queries could send expanded queries
const std::string& actual_key = expand_query ? expanded_key : key;
if(actual_key.size() < max_query_length) {
queries.emplace_back(actual_key, now_ts_us);
}
}
umutex.unlock();
} else {
if(!lmutex.try_lock()) {
// instead of locking we just skip incrementing keys during consolidation time
return ;
}
auto it = local_counts.find(key);
if(it != local_counts.end()) {
it.value()++;
} else if(local_counts.size() < max_size && key.size() < max_query_length) {
// skip count when map has become too large (to prevent abuse)
local_counts.emplace(key, 1);
}
lmutex.unlock();
}
}
void QueryAnalytics::serialize_as_docs(std::string& docs) {
std::shared_lock lk(lmutex);
std::string key_buffer;
for(auto it = local_counts.begin(); it != local_counts.end(); ++it) {
it.key(key_buffer);
nlohmann::json doc;
doc["id"] = std::to_string(StringUtils::hash_wy(key_buffer.c_str(), key_buffer.size()));
doc["q"] = key_buffer;
doc["$operations"]["increment"]["count"] = it.value();
docs += doc.dump(-1, ' ', false, nlohmann::detail::error_handler_t::ignore) + "\n";
}
if(!docs.empty()) {
docs.pop_back();
}
}
void QueryAnalytics::reset_local_counts() {
std::unique_lock lk(lmutex);
local_counts.clear();
}
size_t QueryAnalytics::get_k() {
return k;
}
void QueryAnalytics::compact_user_queries(uint64_t now_ts_us) {
std::unique_lock lk(umutex);
std::vector<std::string> keys_to_delete;
for(auto& kv: user_prefix_queries) {
auto& queries = kv.second;
int64_t last_consolidated_index = -1;
for(uint32_t i = 0; i < queries.size(); i++) {
if(now_ts_us - queries[i].timestamp < QUERY_FINALIZATION_INTERVAL_MICROS) {
break;
}
uint64_t diff_micros = (i == queries.size()-1) ? (now_ts_us - queries[i].timestamp) :
(queries[i + 1].timestamp - queries[i].timestamp);
if(diff_micros > QUERY_FINALIZATION_INTERVAL_MICROS) {
add(queries[i].query, queries[i].query, false, "");
last_consolidated_index = i;
}
}
queries.erase(queries.begin(), queries.begin() + last_consolidated_index+1);
if(queries.empty()) {
keys_to_delete.push_back(kv.first);
}
}
for(auto& key: keys_to_delete) {
user_prefix_queries.erase(key);
}
}
std::unordered_map<std::string, std::vector<QueryAnalytics::QWithTimestamp>> QueryAnalytics::get_user_prefix_queries() {
std::unique_lock lk(umutex);
return user_prefix_queries;
}
tsl::htrie_map<char, uint32_t> QueryAnalytics::get_local_counts() {
std::unique_lock lk(lmutex);
return local_counts;
}
void QueryAnalytics::set_expand_query(bool expand_query) {
this->expand_query = expand_query;
}
bool QueryAnalytics::is_auto_aggregation_enabled() const {
return auto_aggregation_enabled;
}
| 4,246
|
C++
|
.cpp
| 106
| 31.915094
| 120
| 0.6
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,752
|
array.cpp
|
typesense_typesense/src/array.cpp
|
#include "array.h"
uint32_t array::at(uint32_t index) {
return for_select(in, index);
}
bool array::contains(uint32_t value) {
uint32_t index = for_linear_search(in, length, value);
return index != length;
}
uint32_t array::indexOf(uint32_t value) {
return for_linear_search(in, length, value);
}
bool array::append(uint32_t value) {
uint32_t size_required = unsorted_append_size_required(value, length+1);
if(size_required+FOR_ELE_SIZE > size_bytes) {
// grow the array first
size_t new_size = (size_t) (size_required * FOR_GROWTH_FACTOR);
uint8_t *new_location = (uint8_t *) realloc(in, new_size);
if(new_location == NULL) {
abort();
}
in = new_location;
size_bytes = (uint32_t) new_size;
}
uint32_t new_length_bytes = for_append_unsorted(in, length, value);
if(new_length_bytes == 0) {
abort();
}
if(value < min) min = value;
if(value > max) max = value;
length_bytes = new_length_bytes;
length++;
return true;
}
void array::load(const uint32_t *sorted_array, const uint32_t array_length, const uint32_t m, const uint32_t M) {
min = m;
max = M;
uint32_t size_required = (uint32_t) (unsorted_append_size_required(max, array_length) * FOR_GROWTH_FACTOR);
uint8_t *out = (uint8_t *) malloc(size_required * sizeof *out);
memset(out, 0, size_required);
uint32_t actual_size = for_compress_unsorted(sorted_array, out, array_length);
free(in);
in = nullptr;
in = out;
length = array_length;
size_bytes = size_required;
length_bytes = actual_size;
}
bool array::insert(size_t index, const uint32_t* values, size_t num_values) {
if(index >= length) {
return false;
}
uint32_t *curr_array = uncompress(length+num_values);
memmove(&curr_array[index+num_values], &curr_array[index], sizeof(uint32_t)*(length-index));
uint32_t m = min, M = max;
for(size_t i=0; i<num_values; i++) {
uint32_t value = values[i];
if(value < m) m = value;
if(value > M) M = value;
curr_array[index+i] = value;
}
load(curr_array, length+num_values, m, M);
delete [] curr_array;
return true;
}
void array::remove_index(uint32_t start_index, uint32_t end_index) {
uint32_t *curr_array = uncompress();
uint32_t *new_array = new uint32_t[length];
uint32_t new_index = 0;
uint32_t curr_index = 0;
min = std::numeric_limits<uint32_t>::max();
max = std::numeric_limits<uint32_t>::min();
while(curr_index < length) {
if(curr_index < start_index || curr_index >= end_index) {
new_array[new_index++] = curr_array[curr_index];
if(curr_array[curr_index] < min) min = curr_array[curr_index];
if(curr_array[curr_index] > max) max = curr_array[curr_index];
}
curr_index++;
}
uint32_t size_required = (uint32_t) (unsorted_append_size_required(max, new_index) * FOR_GROWTH_FACTOR);
uint8_t *out = (uint8_t *) malloc(size_required * sizeof *out);
memset(out, 0, size_required);
uint32_t actual_size = for_compress_unsorted(new_array, out, new_index);
delete[] curr_array;
delete[] new_array;
free(in);
in = out;
length = new_index;
size_bytes = size_required;
length_bytes = actual_size;
}
| 3,366
|
C++
|
.cpp
| 91
| 31.307692
| 113
| 0.629424
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,753
|
sorted_array.cpp
|
typesense_typesense/src/sorted_array.cpp
|
#include "sorted_array.h"
#include "array_utils.h"
#include "logger.h"
void sorted_array::load(const uint32_t *sorted_array, const uint32_t array_length) {
min = array_length != 0 ? sorted_array[0] : 0;
max = array_length > 1 ? sorted_array[array_length-1] : min;
uint32_t size_required = (uint32_t) (sorted_append_size_required(max, array_length) * FOR_GROWTH_FACTOR);
uint8_t *out = (uint8_t *) malloc(size_required * sizeof *out);
memset(out, 0, size_required);
uint32_t actual_size = for_compress_sorted(sorted_array, out, array_length);
free(in);
in = nullptr;
in = out;
length = array_length;
size_bytes = size_required;
length_bytes = actual_size;
}
size_t sorted_array::append(uint32_t value) {
if(value < max) {
// we will have to re-encode the whole sequence again
uint32_t* arr = uncompress(length+1);
// find the index of the element which is >= to `value`
uint32_t found_val;
uint32_t gte_index = for_lower_bound_search(in, length, value, &found_val);
for(size_t j=length; j>gte_index; j--) {
arr[j] = arr[j-1];
}
arr[gte_index] = value;
load(arr, length+1);
delete [] arr;
return gte_index;
} else {
uint32_t size_required = sorted_append_size_required(value, length+1);
size_t min_expected_size = size_required + FOR_ELE_SIZE;
if(size_bytes < min_expected_size) {
// grow the array first
size_t new_size = min_expected_size * FOR_GROWTH_FACTOR;
uint8_t *new_location = (uint8_t *) realloc(in, new_size);
if(new_location == NULL) {
abort();
}
in = new_location;
size_bytes = (uint32_t) new_size;
//LOG(INFO) << "new_size: " << new_size;
}
uint32_t new_length_bytes = for_append_sorted(in, length, value);
if(new_length_bytes == 0) return false;
length_bytes = new_length_bytes;
length++;
if(value < min) min = value;
if(value > max) max = value;
return length-1;
}
}
bool sorted_array::insert(size_t index, uint32_t value) {
if(index >= length) {
return false;
}
uint32_t *curr_array = uncompress(length+1);
memmove(&curr_array[index+1], &curr_array[index], sizeof(uint32_t)*(length-index));
curr_array[index] = value;
load(curr_array, length+1);
delete [] curr_array;
return true;
}
uint32_t sorted_array::at(uint32_t index) {
return for_select(in, index);
}
bool sorted_array::contains(uint32_t value) {
if(length == 0) {
return false;
}
uint32_t actual;
for_lower_bound_search(in, length, value, &actual);
return actual == value;
}
uint32_t sorted_array::indexOf(uint32_t value) {
if(length == 0) {
return length;
}
uint32_t actual;
uint32_t index = for_lower_bound_search(in, length, value, &actual);
if(actual == value) {
return index;
}
return length;
}
// returns the first element in the sequence which does not compare less than |value|.
uint32_t sorted_array::lower_bound_search_bits(const uint8_t *in, uint32_t imin, uint32_t imax, uint32_t base,
uint32_t bits, uint32_t value, uint32_t *actual) {
uint32_t imid;
uint32_t v;
while (imin + 1 < imax) {
imid = imin + ((imax - imin) / 2);
v = for_select_bits(in, base, bits, imid);
if (v >= value) {
imax = imid;
}
else if (v < value) {
imin = imid;
}
}
v = for_select_bits(in, base, bits, imin);
if (v >= value) {
*actual = v;
return imin;
}
v = for_select_bits(in, base, bits, imax);
*actual = v;
return imax;
}
// returns the first element in the sequence which does not compare less than |value|.
uint32_t sorted_array::lower_bound_search(const uint32_t *in, uint32_t imin, uint32_t imax,
uint32_t value, uint32_t *actual) {
uint32_t imid;
uint32_t v;
while (imin + 1 < imax) {
imid = imin + ((imax - imin) / 2);
v = in[imid];
if (v >= value) {
imax = imid;
}
else if (v < value) {
imin = imid;
}
}
v = in[imin];
if (v >= value) {
*actual = v;
return imin;
}
v = in[imax];
*actual = v;
return imax;
}
void sorted_array::binary_search_indices(const uint32_t *values, int low_vindex, int high_vindex,
int low_index, int high_index, uint32_t base, uint32_t bits,
uint32_t *indices) {
uint32_t actual_value = 0;
if(high_vindex >= low_vindex && high_index >= low_index) {
size_t pivot_vindex = (low_vindex + high_vindex) / 2;
uint32_t in_index = lower_bound_search_bits(in+METADATA_OVERHEAD, low_index, high_index, base, bits,
values[pivot_vindex], &actual_value);
if(actual_value == values[pivot_vindex]) {
indices[pivot_vindex] = in_index;
} else {
indices[pivot_vindex] = length;
}
binary_search_indices(values, low_vindex, pivot_vindex-1, low_index, in_index, base, bits, indices);
binary_search_indices(values, pivot_vindex+1, high_vindex, in_index, high_index, base, bits, indices);
}
}
void sorted_array::binary_search_indices(const uint32_t *values, int low_vindex, int high_vindex, int low_index,
int high_index, uint32_t *indices) {
uint32_t actual_value = 0;
if(high_vindex >= low_vindex && high_index >= low_index) {
size_t pivot_vindex = (low_vindex + high_vindex) / 2;
uint32_t in_index = lower_bound_search(values, low_index, high_index,
values[pivot_vindex], &actual_value);
if(actual_value == values[pivot_vindex]) {
indices[pivot_vindex] = in_index;
} else {
indices[pivot_vindex] = length;
}
binary_search_indices(values, low_vindex, pivot_vindex-1, low_index, in_index, indices);
binary_search_indices(values, pivot_vindex+1, high_vindex, in_index, high_index, indices);
}
}
void sorted_array::indexOf(const uint32_t *values, const size_t values_len, uint32_t *indices) {
if(values_len == 0) {
return ;
}
uint32_t base = *(uint32_t *)(in + 0);
uint32_t bits = *(in + 4);
uint32_t low_index, high_index;
uint32_t actual_value = 0;
// identify the upper and lower bounds of the search space
int head = -1;
do {
head++;
low_index = lower_bound_search_bits(in+METADATA_OVERHEAD, 0, length-1, base, bits, values[head], &actual_value);
} while(head < int(values_len - 1) && actual_value > values[head]);
int tail = values_len;
do {
tail--;
high_index = lower_bound_search_bits(in+METADATA_OVERHEAD, 0, length-1, base, bits, values[tail], &actual_value);
} while(tail > 0 && actual_value < values[tail]);
for(int i = 0; i < head; i++) {
indices[i] = length;
}
for(int j = values_len-1; j > tail; j--) {
indices[j] = length;
}
// recursively search within the bounds for all values
binary_search_indices(values, head, tail, low_index, high_index, base, bits, indices);
}
void sorted_array::remove_value(uint32_t value) {
if(length == 0) {
return ;
}
// A lower bound search returns the first element in the sequence that is >= `value`
// So, `found_val` will be either equal or greater than `value`
uint32_t found_val;
uint32_t found_index = for_lower_bound_search(in, length, value, &found_val);
if(found_val != value) {
return ;
}
uint32_t *curr_array = uncompress();
if(found_index + 1 < length) {
memmove(&curr_array[found_index], &curr_array[found_index+1], sizeof(uint32_t) * (length - found_index - 1));
}
size_t new_length = (length == 0) ? 0 : (length - 1);
load(curr_array, new_length);
delete [] curr_array;
}
void sorted_array::remove_values(uint32_t *sorted_values, uint32_t sorted_values_length) {
uint32_t *curr_array = uncompress();
uint32_t *new_array = new uint32_t[length];
uint32_t new_index = 0;
uint32_t sorted_values_index = 0;
uint32_t curr_index = 0;
while(curr_index < length) {
if(sorted_values_index < sorted_values_length && sorted_values[sorted_values_index] == curr_array[curr_index]) {
curr_index++;
sorted_values_index++;
} else {
new_array[new_index++] = curr_array[curr_index++];
}
}
load(new_array, new_index);
delete[] curr_array;
delete[] new_array;
}
size_t sorted_array::numFoundOf(const uint32_t *values, const size_t values_len) {
size_t num_found = 0;
if(length == 0 || values_len == 0) {
return num_found;
}
uint32_t low_index, high_index;
uint32_t actual_value = 0;
if(length > values_len) {
uint32_t base = *(uint32_t *)(in + 0);
uint32_t bits = *(in + 4);
// identify the upper and lower bounds of the search space
int head = -1;
do {
head++;
low_index = lower_bound_search_bits(in+METADATA_OVERHEAD, 0, length-1, base, bits, values[head], &actual_value);
} while(head < int(values_len - 1) && actual_value > values[head]);
int tail = values_len;
do {
tail--;
high_index = lower_bound_search_bits(in+METADATA_OVERHEAD, 0, length-1, base, bits, values[tail], &actual_value);
} while(tail > 0 && actual_value < values[tail]);
// recursively search within the bounds for all values
binary_count_indices(values, head, tail, low_index, high_index, base, bits, num_found);
} else {
// identify the upper and lower bounds of the search space
uint32_t* src = uncompress(length);
int head = -1;
do {
head++;
low_index = lower_bound_search(values, 0, values_len-1, src[head], &actual_value);
} while(head < int(length - 1) && actual_value > src[head]);
int tail = length;
do {
tail--;
high_index = lower_bound_search(values, 0, values_len-1, src[tail], &actual_value);
} while(tail > 0 && actual_value < src[tail]);
// recursively search within the bounds for all values
binary_count_indices(src, head, tail, values, low_index, high_index, num_found);
delete [] src;
}
return num_found;
}
void sorted_array::binary_count_indices(const uint32_t *values, int low_vindex, int high_vindex, int low_index,
int high_index, uint32_t base, uint32_t bits, size_t& num_found) {
uint32_t actual_value = 0;
if(high_vindex >= low_vindex && high_index >= low_index) {
int pivot_vindex = (low_vindex + high_vindex) / 2;
uint32_t in_index = lower_bound_search_bits(in+METADATA_OVERHEAD, low_index, high_index, base, bits,
values[pivot_vindex], &actual_value);
//LOG(INFO) << "pivot_vindex: " << pivot_vindex << ", values[pivot_vindex]: " << values[pivot_vindex];
if(actual_value == values[pivot_vindex]) {
//LOG(INFO) << actual_value;
num_found++;
}
binary_count_indices(values, low_vindex, pivot_vindex-1, low_index, in_index, base, bits, num_found);
binary_count_indices(values, pivot_vindex+1, high_vindex, in_index, high_index, base, bits, num_found);
}
}
void sorted_array::binary_count_indices(const uint32_t *values, int low_vindex, int high_vindex,
const uint32_t* src, int low_index, int high_index, size_t &num_found) {
uint32_t actual_value = 0;
if(high_vindex >= low_vindex && high_index >= low_index) {
int pivot_vindex = (low_vindex + high_vindex) / 2;
uint32_t in_index = lower_bound_search(src, low_index, high_index, values[pivot_vindex], &actual_value);
//LOG(INFO) << "pivot_vindex: " << pivot_vindex << ", values[pivot_vindex]: " << values[pivot_vindex];
if(actual_value == values[pivot_vindex]) {
//LOG(INFO) << actual_value;
num_found++;
}
binary_count_indices(values, low_vindex, pivot_vindex-1, src, low_index, in_index, num_found);
binary_count_indices(values, pivot_vindex+1, high_vindex, src, in_index, high_index, num_found);
}
}
uint32_t sorted_array::last() {
return (length == 0) ? UINT32_MAX : max;
}
| 12,910
|
C++
|
.cpp
| 309
| 33.436893
| 125
| 0.588311
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,754
|
field.cpp
|
typesense_typesense/src/field.cpp
|
#include <store.h>
#include "field.h"
#include "magic_enum.hpp"
#include "embedder_manager.h"
#include <stack>
#include <collection_manager.h>
#include <regex>
Option<bool> field::json_field_to_field(bool enable_nested_fields, nlohmann::json& field_json,
std::vector<field>& the_fields,
string& fallback_field_type, size_t& num_auto_detect_fields) {
if(field_json["name"] == "id") {
// No field should exist with the name "id" as it is reserved for internal use
// We cannot throw an error here anymore since that will break backward compatibility!
LOG(WARNING) << "Collection schema cannot contain a field with name `id`. Ignoring field.";
return Option<bool>(true);
}
if(!field_json.is_object() ||
field_json.count(fields::name) == 0 || field_json.count(fields::type) == 0 ||
!field_json.at(fields::name).is_string() || !field_json.at(fields::type).is_string()) {
return Option<bool>(400, "Wrong format for `fields`. It should be an array of objects containing "
"`name`, `type`, `optional` and `facet` properties.");
}
if(field_json.count("store") != 0 && !field_json.at("store").is_boolean()) {
return Option<bool>(400, std::string("The `store` property of the field `") +
field_json[fields::name].get<std::string>() + std::string("` should be a boolean."));
}
if(field_json.count("drop") != 0) {
return Option<bool>(400, std::string("Invalid property `drop` on field `") +
field_json[fields::name].get<std::string>() + std::string("`: it is allowed only "
"during schema update."));
}
if(field_json.count(fields::facet) != 0 && !field_json.at(fields::facet).is_boolean()) {
return Option<bool>(400, std::string("The `facet` property of the field `") +
field_json[fields::name].get<std::string>() + std::string("` should be a boolean."));
}
if(field_json.count(fields::optional) != 0 && !field_json.at(fields::optional).is_boolean()) {
return Option<bool>(400, std::string("The `optional` property of the field `") +
field_json[fields::name].get<std::string>() + std::string("` should be a boolean."));
}
if(field_json.count(fields::index) != 0 && !field_json.at(fields::index).is_boolean()) {
return Option<bool>(400, std::string("The `index` property of the field `") +
field_json[fields::name].get<std::string>() + std::string("` should be a boolean."));
}
if(field_json.count(fields::sort) != 0 && !field_json.at(fields::sort).is_boolean()) {
return Option<bool>(400, std::string("The `sort` property of the field `") +
field_json[fields::name].get<std::string>() + std::string("` should be a boolean."));
}
if(field_json.count(fields::infix) != 0 && !field_json.at(fields::infix).is_boolean()) {
return Option<bool>(400, std::string("The `infix` property of the field `") +
field_json[fields::name].get<std::string>() + std::string("` should be a boolean."));
}
if(field_json.count(fields::locale) != 0){
if(!field_json.at(fields::locale).is_string()) {
return Option<bool>(400, std::string("The `locale` property of the field `") +
field_json[fields::name].get<std::string>() + std::string("` should be a string."));
}
if(!field_json[fields::locale].get<std::string>().empty() &&
field_json[fields::locale].get<std::string>().size() != 2) {
return Option<bool>(400, std::string("The `locale` value of the field `") +
field_json[fields::name].get<std::string>() + std::string("` is not valid."));
}
}
if (field_json.count(fields::reference) != 0 && !field_json.at(fields::reference).is_string()) {
return Option<bool>(400, "Reference should be a string.");
} else if (field_json.count(fields::reference) == 0) {
field_json[fields::reference] = "";
}
if (field_json.count(fields::async_reference) == 0) {
field_json[fields::async_reference] = false;
} else if (!field_json.at(fields::async_reference).is_boolean()) {
return Option<bool>(400, std::string("The `async_reference` property of the field `") +
field_json[fields::name].get<std::string>() + std::string("` should be a boolean."));
} else if (field_json[fields::async_reference].get<bool>() &&
field_json[fields::reference].get<std::string>().empty()) {
return Option<bool>(400, std::string("The `async_reference` property of the field `") +
field_json[fields::name].get<std::string>() + std::string("` is only applicable if "
"`reference` is specified."));
}
if(field_json.count(fields::stem) != 0) {
if(!field_json.at(fields::stem).is_boolean()) {
return Option<bool>(400, std::string("The `stem` property of the field `") +
field_json[fields::name].get<std::string>() + std::string("` should be a boolean."));
}
if(field_json[fields::stem] && field_json[fields::type] != field_types::STRING && field_json[fields::type] != field_types::STRING_ARRAY) {
return Option<bool>(400, std::string("The `stem` property is only allowed for string and string[] fields."));
}
if(field_json[fields::stem].get<bool>()) {
std::string locale;
if(field_json.count(fields::locale) != 0) {
locale = field_json[fields::locale].get<std::string>();
}
auto stem_validation = StemmerManager::get_instance().validate_language(locale);
if(!stem_validation) {
return Option<bool>(400, std::string("The `locale` value of the field `") +
field_json[fields::name].get<std::string>() + std::string("` is not supported for stem."));
}
}
} else {
field_json[fields::stem] = false;
}
if (field_json.count(fields::range_index) != 0) {
if (!field_json.at(fields::range_index).is_boolean()) {
return Option<bool>(400, std::string("The `range_index` property of the field `") +
field_json[fields::name].get<std::string>() +
std::string("` should be a boolean."));
}
auto const& type = field_json["type"];
if (field_json[fields::range_index] &&
type != field_types::INT32 && type != field_types::INT32_ARRAY &&
type != field_types::INT64 && type != field_types::INT64_ARRAY &&
type != field_types::FLOAT && type != field_types::FLOAT_ARRAY) {
return Option<bool>(400, std::string("The `range_index` property is only allowed for the numerical fields`"));
}
} else {
field_json[fields::range_index] = false;
}
if(field_json["name"] == ".*") {
if(field_json.count(fields::facet) == 0) {
field_json[fields::facet] = false;
}
if(field_json.count(fields::optional) == 0) {
field_json[fields::optional] = true;
}
if(field_json.count(fields::index) == 0) {
field_json[fields::index] = true;
}
if(field_json.count(fields::locale) == 0) {
field_json[fields::locale] = "";
}
if(field_json.count(fields::sort) == 0) {
field_json[fields::sort] = false;
}
if(field_json.count(fields::infix) == 0) {
field_json[fields::infix] = false;
}
if(field_json[fields::optional] == false) {
return Option<bool>(400, "Field `.*` must be an optional field.");
}
if(field_json[fields::facet] == true) {
return Option<bool>(400, "Field `.*` cannot be a facet field.");
}
if(field_json[fields::index] == false) {
return Option<bool>(400, "Field `.*` must be an index field.");
}
if (!field_json[fields::reference].get<std::string>().empty()) {
return Option<bool>(400, "Field `.*` cannot be a reference field.");
}
field fallback_field(field_json["name"], field_json["type"], field_json["facet"],
field_json["optional"], field_json[fields::index], field_json[fields::locale],
field_json[fields::sort], field_json[fields::infix]);
if(fallback_field.has_valid_type()) {
fallback_field_type = fallback_field.type;
num_auto_detect_fields++;
} else {
return Option<bool>(400, "The `type` of field `.*` is invalid.");
}
the_fields.emplace_back(fallback_field);
return Option<bool>(true);
}
if(field_json.count(fields::facet) == 0) {
field_json[fields::facet] = false;
}
if(field_json.count(fields::index) == 0) {
field_json[fields::index] = true;
}
if(field_json.count(fields::locale) == 0) {
field_json[fields::locale] = "";
}
if(field_json.count(fields::store) == 0) {
field_json[fields::store] = true;
}
if(field_json.count(fields::sort) == 0) {
if(field_json["type"] == field_types::INT32 || field_json["type"] == field_types::INT64 ||
field_json["type"] == field_types::FLOAT || field_json["type"] == field_types::BOOL ||
field_json["type"] == field_types::GEOPOINT || field_json["type"] == field_types::GEOPOINT_ARRAY) {
if((field_json.count(fields::num_dim) == 0) || (field_json[fields::facet])) {
field_json[fields::sort] = true;
} else {
field_json[fields::sort] = false;
}
} else {
field_json[fields::sort] = false;
}
} else if (!field_json[fields::sort].get<bool>() &&
(field_json["type"] == field_types::GEOPOINT || field_json["type"] == field_types::GEOPOINT_ARRAY)) {
return Option<bool>(400, std::string("The `sort` property of the field `") +=
field_json[fields::name].get<std::string>() += "` having `" + field_json["type"].get<std::string>() +=
"` type cannot be `false`. The sort index is used during GeoSearch.");
}
if(field_json.count(fields::infix) == 0) {
field_json[fields::infix] = false;
}
if(field_json[fields::type] == field_types::OBJECT || field_json[fields::type] == field_types::OBJECT_ARRAY) {
if(!enable_nested_fields) {
return Option<bool>(400, "Type `object` or `object[]` can be used only when nested fields are enabled by "
"setting` enable_nested_fields` to true.");
}
}
if(field_json.count(fields::embed) != 0) {
if(field_json[fields::type] != field_types::FLOAT_ARRAY) {
return Option<bool>(400, "Fields with the `embed` parameter can only be of type `float[]`.");
}
if(!field_json[fields::embed].is_object()) {
return Option<bool>(400, "Property `" + fields::embed + "` must be an object.");
}
auto& embed_json = field_json[fields::embed];
if(field_json[fields::embed].count(fields::from) == 0) {
return Option<bool>(400, "Property `" + fields::embed + "` must contain a `" + fields::from + "` property.");
}
if(!field_json[fields::embed][fields::from].is_array()) {
return Option<bool>(400, "Property `" + fields::embed + "." + fields::from + "` must be an array.");
}
if(field_json[fields::embed][fields::from].empty()) {
return Option<bool>(400, "Property `" + fields::embed + "." + fields::from + "` must have at least one element.");
}
if(embed_json.count(fields::model_config) == 0) {
return Option<bool>(400, "Property `" + fields::embed + "." + fields::model_config + "` not found.");
}
auto& model_config = embed_json[fields::model_config];
if(model_config.count(fields::model_name) == 0) {
return Option<bool>(400, "Property `" + fields::embed + "." + fields::model_config + "." + fields::model_name + "`not found");
}
if(!model_config[fields::model_name].is_string()) {
return Option<bool>(400, "Property `" + fields::embed + "." + fields::model_config + "." + fields::model_name + "` must be a string.");
}
if(model_config[fields::model_name].get<std::string>().empty()) {
return Option<bool>(400, "Property `" + fields::embed + "." + fields::model_config + "." + fields::model_name + "` cannot be empty.");
}
if(model_config.count(fields::indexing_prefix) != 0) {
if(!model_config[fields::indexing_prefix].is_string()) {
return Option<bool>(400, "Property `" + fields::embed + "." + fields::model_config + "." + fields::indexing_prefix + "` must be a string.");
}
}
if(model_config.count(fields::query_prefix) != 0) {
if(!model_config[fields::query_prefix].is_string()) {
return Option<bool>(400, "Property `" + fields::embed + "." + fields::model_config + "." + fields::query_prefix + "` must be a string.");
}
}
for(auto& embed_from_field : field_json[fields::embed][fields::from]) {
if(!embed_from_field.is_string()) {
return Option<bool>(400, "Property `" + fields::embed + "." + fields::from + "` must contain only field names as strings.");
}
}
}
auto DEFAULT_VEC_DIST_METRIC = magic_enum::enum_name(vector_distance_type_t::cosine);
if(field_json.count(fields::num_dim) == 0) {
field_json[fields::num_dim] = 0;
field_json[fields::vec_dist] = DEFAULT_VEC_DIST_METRIC;
} else {
if(!field_json[fields::num_dim].is_number_unsigned() || field_json[fields::num_dim] == 0) {
return Option<bool>(400, "Property `" + fields::num_dim + "` must be a positive integer.");
}
if(field_json[fields::type] != field_types::FLOAT_ARRAY) {
return Option<bool>(400, "Property `" + fields::num_dim + "` is only allowed on a float array field.");
}
if(field_json[fields::facet].get<bool>()) {
return Option<bool>(400, "Property `" + fields::facet + "` is not allowed on a vector field.");
}
if(field_json[fields::sort].get<bool>()) {
return Option<bool>(400, "Property `" + fields::sort + "` cannot be enabled on a vector field.");
}
if(field_json.count(fields::vec_dist) == 0) {
field_json[fields::vec_dist] = DEFAULT_VEC_DIST_METRIC;
} else {
if(!field_json[fields::vec_dist].is_string()) {
return Option<bool>(400, "Property `" + fields::vec_dist + "` must be a string.");
}
auto vec_dist_op = magic_enum::enum_cast<vector_distance_type_t>(field_json[fields::vec_dist].get<std::string>());
if(!vec_dist_op.has_value()) {
return Option<bool>(400, "Property `" + fields::vec_dist + "` is invalid.");
}
}
}
if(field_json.count(fields::hnsw_params) != 0) {
if(!field_json[fields::hnsw_params].is_object()) {
return Option<bool>(400, "Property `" + fields::hnsw_params + "` must be an object.");
}
if(field_json[fields::hnsw_params].count("ef_construction") != 0 &&
(!field_json[fields::hnsw_params]["ef_construction"].is_number_unsigned() ||
field_json[fields::hnsw_params]["ef_construction"] == 0)) {
return Option<bool>(400, "Property `" + fields::hnsw_params + ".ef_construction` must be a positive integer.");
}
if(field_json[fields::hnsw_params].count("M") != 0 &&
(!field_json[fields::hnsw_params]["M"].is_number_unsigned() ||
field_json[fields::hnsw_params]["M"] == 0)) {
return Option<bool>(400, "Property `" + fields::hnsw_params + ".M` must be a positive integer.");
}
// remove unrelated properties except for m ef_construction and M
auto it = field_json[fields::hnsw_params].begin();
while(it != field_json[fields::hnsw_params].end()) {
if(it.key() != "max_elements" && it.key() != "ef_construction" && it.key() != "M" && it.key() != "ef") {
it = field_json[fields::hnsw_params].erase(it);
} else {
++it;
}
}
if(field_json[fields::hnsw_params].count("ef_construction") == 0) {
field_json[fields::hnsw_params]["ef_construction"] = 200;
}
if(field_json[fields::hnsw_params].count("M") == 0) {
field_json[fields::hnsw_params]["M"] = 16;
}
} else {
field_json[fields::hnsw_params] = R"({
"M": 16,
"ef_construction": 200
})"_json;
}
if(field_json.count(fields::optional) == 0) {
// dynamic type fields are always optional
bool is_dynamic = field::is_dynamic(field_json[fields::name], field_json[fields::type]);
field_json[fields::optional] = is_dynamic;
}
bool is_obj = field_json[fields::type] == field_types::OBJECT || field_json[fields::type] == field_types::OBJECT_ARRAY;
bool is_regexp_name = field_json[fields::name].get<std::string>().find(".*") != std::string::npos;
if (is_regexp_name && !field_json[fields::reference].get<std::string>().empty()) {
return Option<bool>(400, "Wildcard field cannot have a reference.");
}
if(is_obj || (!is_regexp_name && enable_nested_fields &&
field_json[fields::name].get<std::string>().find('.') != std::string::npos)) {
field_json[fields::nested] = true;
field_json[fields::nested_array] = field::VAL_UNKNOWN; // unknown, will be resolved during read
} else {
field_json[fields::nested] = false;
field_json[fields::nested_array] = 0;
}
if(field_json[fields::type] == field_types::GEOPOINT && field_json[fields::sort] == false) {
LOG(WARNING) << "Forcing geopoint field `" << field_json[fields::name].get<std::string>() << "` to be sortable.";
field_json[fields::sort] = true;
}
auto vec_dist = magic_enum::enum_cast<vector_distance_type_t>(field_json[fields::vec_dist].get<std::string>()).value();
if (!field_json[fields::reference].get<std::string>().empty()) {
std::vector<std::string> tokens;
StringUtils::split(field_json[fields::reference].get<std::string>(), tokens, ".");
if (tokens.size() < 2) {
return Option<bool>(400, "Invalid reference `" + field_json[fields::reference].get<std::string>() + "`.");
}
tokens.clear();
StringUtils::split(field_json[fields::name].get<std::string>(), tokens, ".");
if (tokens.size() > 2) {
return Option<bool>(400, "`" + field_json[fields::name].get<std::string>() + "` field cannot have a reference."
" Only the top-level field of an object is allowed.");
}
}
the_fields.emplace_back(
field(field_json[fields::name], field_json[fields::type], field_json[fields::facet],
field_json[fields::optional], field_json[fields::index], field_json[fields::locale],
field_json[fields::sort], field_json[fields::infix], field_json[fields::nested],
field_json[fields::nested_array], field_json[fields::num_dim], vec_dist,
field_json[fields::reference], field_json[fields::embed], field_json[fields::range_index],
field_json[fields::store], field_json[fields::stem], field_json[fields::hnsw_params],
field_json[fields::async_reference])
);
if (!field_json[fields::reference].get<std::string>().empty()) {
// Add a reference helper field in the schema. It stores the doc id of the document it references to reduce the
// computation while searching.
auto f = field(field_json[fields::name].get<std::string>() + fields::REFERENCE_HELPER_FIELD_SUFFIX,
field_types::is_array(field_json[fields::type].get<std::string>()) ? field_types::INT64_ARRAY : field_types::INT64,
false, field_json[fields::optional], true);
f.nested = field_json[fields::nested];
the_fields.emplace_back(std::move(f));
}
return Option<bool>(true);
}
bool field::flatten_obj(nlohmann::json& doc, nlohmann::json& value, bool has_array, bool has_obj_array,
bool is_update, const field& the_field, const std::string& flat_name,
const std::unordered_map<std::string, field>& dyn_fields,
std::unordered_map<std::string, field>& flattened_fields) {
if(value.is_object()) {
has_obj_array = has_array;
auto it = value.begin();
while(it != value.end()) {
const std::string& child_field_name = flat_name + "." + it.key();
if(it.value().is_null()) {
if(!has_array) {
// we don't want to push null values into an array because that's not valid
doc[child_field_name] = nullptr;
}
field flattened_field;
flattened_field.name = child_field_name;
flattened_field.type = field_types::NIL;
flattened_fields[child_field_name] = flattened_field;
if(!is_update) {
// update code path requires and takes care of null values
it = value.erase(it);
} else {
it++;
}
} else {
flatten_obj(doc, it.value(), has_array, has_obj_array, is_update, the_field, child_field_name,
dyn_fields, flattened_fields);
it++;
}
}
} else if(value.is_array()) {
for(const auto& kv: value.items()) {
flatten_obj(doc, kv.value(), true, has_obj_array, is_update, the_field, flat_name, dyn_fields, flattened_fields);
}
} else { // must be a primitive
if(doc.count(flat_name) != 0 && flattened_fields.find(flat_name) == flattened_fields.end()) {
return true;
}
std::string detected_type;
bool found_dynamic_field = false;
field dyn_field(the_field.name, field_types::STRING, false);
for(auto dyn_field_it = dyn_fields.begin(); dyn_field_it != dyn_fields.end(); dyn_field_it++) {
auto& dynamic_field = dyn_field_it->second;
if(dynamic_field.is_auto() || dynamic_field.is_string_star()) {
continue;
}
if(std::regex_match(flat_name, std::regex(dynamic_field.name))) {
detected_type = dynamic_field.type;
found_dynamic_field = true;
dyn_field = dynamic_field;
break;
}
}
if(!found_dynamic_field) {
if(!field::get_type(value, detected_type)) {
return false;
}
if(std::isalnum(detected_type.back()) && has_array) {
// convert singular type to multi valued type
detected_type += "[]";
}
}
if(has_array) {
doc[flat_name].push_back(value);
} else {
doc[flat_name] = value;
}
field flattened_field = found_dynamic_field ? dyn_field : the_field;
flattened_field.name = flat_name;
flattened_field.type = detected_type;
flattened_field.optional = true;
flattened_field.nested = true;
flattened_field.nested_array = has_obj_array;
int sort_op = flattened_field.sort ? 1 : -1;
int infix_op = flattened_field.infix ? 1 : -1;
flattened_field.set_computed_defaults(sort_op, infix_op);
flattened_fields[flat_name] = flattened_field;
}
return true;
}
Option<bool> field::flatten_field(nlohmann::json& doc, nlohmann::json& obj, const field& the_field,
std::vector<std::string>& path_parts, size_t path_index,
bool has_array, bool has_obj_array, bool is_update,
const std::unordered_map<std::string, field>& dyn_fields,
std::unordered_map<std::string, field>& flattened_fields) {
if(path_index == path_parts.size()) {
// end of path: check if obj matches expected type
std::string detected_type;
bool found_dynamic_field = false;
for(auto dyn_field_it = dyn_fields.begin(); dyn_field_it != dyn_fields.end(); dyn_field_it++) {
auto& dynamic_field = dyn_field_it->second;
if(dynamic_field.is_auto() || dynamic_field.is_string_star()) {
continue;
}
if(std::regex_match(the_field.name, std::regex(dynamic_field.name))) {
detected_type = obj.is_object() ? field_types::OBJECT : dynamic_field.type;
found_dynamic_field = true;
break;
}
}
if(!found_dynamic_field) {
if(!field::get_type(obj, detected_type)) {
if(obj.is_null() && the_field.optional) {
// null values are allowed only if field is optional
return Option<bool>(true);
}
return Option<bool>(400, "Field `" + the_field.name + "` has an incorrect type.");
}
if(std::isalnum(detected_type.back()) && has_array) {
// convert singular type to multi valued type
detected_type += "[]";
}
}
has_obj_array = has_obj_array || ((detected_type == field_types::OBJECT) && has_array);
// handle differences in detection of numerical types
bool is_numericaly_valid = (detected_type != the_field.type) &&
( (detected_type == field_types::INT64 &&
(the_field.type == field_types::INT32 || the_field.type == field_types::FLOAT)) ||
(detected_type == field_types::INT64_ARRAY &&
(the_field.type == field_types::INT32_ARRAY || the_field.type == field_types::FLOAT_ARRAY)) ||
(detected_type == field_types::FLOAT_ARRAY && the_field.type == field_types::GEOPOINT_ARRAY) ||
(detected_type == field_types::FLOAT_ARRAY && the_field.type == field_types::GEOPOINT && !has_obj_array) ||
(detected_type == field_types::INT64_ARRAY && the_field.type == field_types::GEOPOINT && !has_obj_array) ||
(detected_type == field_types::INT64_ARRAY && the_field.type == field_types::GEOPOINT_ARRAY)
);
if(detected_type == the_field.type || is_numericaly_valid) {
if(the_field.is_object()) {
flatten_obj(doc, obj, has_array, has_obj_array, is_update, the_field, the_field.name,
dyn_fields, flattened_fields);
} else {
if(doc.count(the_field.name) != 0 && flattened_fields.find(the_field.name) == flattened_fields.end()) {
return Option<bool>(true);
}
if(has_array) {
doc[the_field.name].push_back(obj);
} else {
doc[the_field.name] = obj;
}
field flattened_field = the_field;
flattened_field.type = detected_type;
flattened_field.nested = (path_index > 1);
flattened_field.nested_array = has_obj_array;
flattened_fields[the_field.name] = flattened_field;
}
return Option<bool>(true);
} else {
if(has_obj_array && !the_field.is_array()) {
return Option<bool>(400, "Field `" + the_field.name + "` has an incorrect type. "
"Hint: field inside an array of objects must be an array type as well.");
}
return Option<bool>(400, "Field `" + the_field.name + "` has an incorrect type.");
}
}
const std::string& fragment = path_parts[path_index];
const auto& it = obj.find(fragment);
if(it != obj.end()) {
if(it.value().is_array()) {
has_array = true;
for(auto& ele: it.value()) {
has_obj_array = has_obj_array || ele.is_object();
Option<bool> op = flatten_field(doc, ele, the_field, path_parts, path_index + 1, has_array,
has_obj_array, is_update, dyn_fields, flattened_fields);
if(!op.ok()) {
return op;
}
}
return Option<bool>(true);
} else {
return flatten_field(doc, it.value(), the_field, path_parts, path_index + 1, has_array, has_obj_array,
is_update, dyn_fields, flattened_fields);
}
} else if(!the_field.optional) {
return Option<bool>(404, "Field `" + the_field.name + "` not found.");
}
return Option<bool>(true);
}
Option<bool> field::flatten_doc(nlohmann::json& document,
const tsl::htrie_map<char, field>& nested_fields,
const std::unordered_map<std::string, field>& dyn_fields,
bool is_update, std::vector<field>& flattened_fields) {
std::unordered_map<std::string, field> flattened_fields_map;
for(auto& nested_field: nested_fields) {
if(!nested_field.index) {
continue;
}
std::vector<std::string> field_parts;
StringUtils::split(nested_field.name, field_parts, ".");
if(field_parts.size() > 1 && document.count(nested_field.name) != 0) {
// skip explicitly present nested fields
continue;
}
auto op = flatten_field(document, document, nested_field, field_parts, 0, false, false,
is_update, dyn_fields, flattened_fields_map);
if(op.ok()) {
continue;
}
if(op.code() == 404 && (is_update || nested_field.optional)) {
continue;
} else {
return op;
}
}
document[".flat"] = nlohmann::json::array();
for(auto& kv: flattened_fields_map) {
document[".flat"].push_back(kv.second.name);
if(kv.second.type != field_types::NIL) {
// not a real field so we won't add it
flattened_fields.push_back(kv.second);
}
}
return Option<bool>(true);
}
void field::compact_nested_fields(tsl::htrie_map<char, field>& nested_fields) {
std::vector<std::string> nested_fields_vec;
for(const auto& f: nested_fields) {
nested_fields_vec.push_back(f.name);
}
for(auto& field_name: nested_fields_vec) {
nested_fields.erase_prefix(field_name + ".");
}
}
Option<bool> field::json_fields_to_fields(bool enable_nested_fields, nlohmann::json &fields_json, string &fallback_field_type,
std::vector<field>& the_fields) {
size_t num_auto_detect_fields = 0;
const tsl::htrie_map<char, field> dummy_search_schema;
for(size_t i = 0; i < fields_json.size(); i++) {
nlohmann::json& field_json = fields_json[i];
auto op = json_field_to_field(enable_nested_fields,
field_json, the_fields, fallback_field_type, num_auto_detect_fields);
if(!op.ok()) {
return op;
}
if(!the_fields.empty() && !the_fields.back().embed.empty()) {
auto validate_res = validate_and_init_embed_field(dummy_search_schema, field_json, fields_json, the_fields.back());
if(!validate_res.ok()) {
return validate_res;
}
}
}
if(num_auto_detect_fields > 1) {
return Option<bool>(400,"There can be only one field named `.*`.");
}
return Option<bool>(true);
}
Option<bool> field::validate_and_init_embed_field(const tsl::htrie_map<char, field>& search_schema, nlohmann::json& field_json,
const nlohmann::json& fields_json,
field& the_field) {
const std::string err_msg = "Property `" + fields::embed + "." + fields::from +
"` can only refer to string, string array or image (for supported models) fields.";
bool found_image_field = false;
for(auto& field_name : field_json[fields::embed][fields::from].get<std::vector<std::string>>()) {
auto embed_field = std::find_if(fields_json.begin(), fields_json.end(), [&field_name](const nlohmann::json& x) {
return x["name"].get<std::string>() == field_name;
});
if(embed_field == fields_json.end()) {
const auto& embed_field2 = search_schema.find(field_name);
if (embed_field2 == search_schema.end()) {
return Option<bool>(400, err_msg);
} else if (embed_field2->type != field_types::STRING && embed_field2->type != field_types::STRING_ARRAY && embed_field2->type != field_types::IMAGE) {
return Option<bool>(400, err_msg);
}
} else if((*embed_field)[fields::type] != field_types::STRING &&
(*embed_field)[fields::type] != field_types::STRING_ARRAY &&
(*embed_field)[fields::type] != field_types::IMAGE) {
return Option<bool>(400, err_msg);
}
}
const auto& model_config = field_json[fields::embed][fields::model_config];
size_t num_dim = field_json[fields::num_dim].get<size_t>();
auto res = EmbedderManager::get_instance().validate_and_init_model(model_config, num_dim);
if(!res.ok()) {
return Option<bool>(res.code(), res.error());
}
LOG(INFO) << "Model init done.";
field_json[fields::num_dim] = num_dim;
the_field.num_dim = num_dim;
return Option<bool>(true);
}
Option<bool> field::fields_to_json_fields(const std::vector<field>& fields, const string& default_sorting_field,
nlohmann::json& fields_json) {
bool found_default_sorting_field = false;
// Check for duplicates in field names
std::map<std::string, std::vector<const field*>> unique_fields;
for(const field & field: fields) {
unique_fields[field.name].push_back(&field);
if(field.name == "id") {
continue;
}
nlohmann::json field_val;
field_val[fields::name] = field.name;
field_val[fields::type] = field.type;
field_val[fields::facet] = field.facet;
field_val[fields::optional] = field.optional;
field_val[fields::index] = field.index;
field_val[fields::sort] = field.sort;
field_val[fields::infix] = field.infix;
field_val[fields::locale] = field.locale;
field_val[fields::store] = field.store;
field_val[fields::stem] = field.stem;
field_val[fields::range_index] = field.range_index;
if(field.embed.count(fields::from) != 0) {
field_val[fields::embed] = field.embed;
}
field_val[fields::nested] = field.nested;
if(field.nested) {
field_val[fields::nested_array] = field.nested_array;
}
if(field.num_dim > 0) {
field_val[fields::num_dim] = field.num_dim;
field_val[fields::vec_dist] = field.vec_dist == ip ? "ip" : "cosine";
}
if (!field.reference.empty()) {
field_val[fields::reference] = field.reference;
field_val[fields::async_reference] = field.is_async_reference;
}
fields_json.push_back(field_val);
if(!field.has_valid_type()) {
return Option<bool>(400, "Field `" + field.name +
"` has an invalid data type `" + field.type +
"`, see docs for supported data types.");
}
if(field.name == default_sorting_field && !field.is_sortable()) {
return Option<bool>(400, "Default sorting field `" + default_sorting_field +
"` is not a sortable type.");
}
if(field.name == default_sorting_field) {
if(field.optional) {
return Option<bool>(400, "Default sorting field `" + default_sorting_field +
"` cannot be an optional field.");
}
if(field.is_geopoint()) {
return Option<bool>(400, "Default sorting field cannot be of type geopoint.");
}
found_default_sorting_field = true;
}
if(field.is_dynamic() && !field.nested && !field.optional) {
return Option<bool>(400, "Field `" + field.name + "` must be an optional field.");
}
if(field.name == ".*" && !field.index) {
return Option<bool>(400, "Field `" + field.name + "` cannot be marked as non-indexable.");
}
if(!field.index && field.facet) {
return Option<bool>(400, "Field `" + field.name + "` cannot be a facet since "
"it's marked as non-indexable.");
}
if(!field.is_sort_field() && field.sort) {
return Option<bool>(400, "Field `" + field.name + "` cannot be a sortable field.");
}
}
if(!default_sorting_field.empty() && !found_default_sorting_field) {
return Option<bool>(400, "Default sorting field is defined as `" + default_sorting_field +
"` but is not found in the schema.");
}
// check for duplicate field names in schema
for(auto& fname_fields: unique_fields) {
if(fname_fields.second.size() > 1) {
// if there are more than 1 field with the same field name, then
// a) only 1 field can be of static type
// b) only 1 field can be of dynamic type
size_t num_static = 0;
size_t num_dynamic = 0;
for(const field* f: fname_fields.second) {
if(f->name == ".*" || f->is_dynamic()) {
num_dynamic++;
} else {
num_static++;
}
}
if(num_static != 0 && num_static > 1) {
return Option<bool>(400, "There are duplicate field names in the schema.");
}
if(num_dynamic != 0 && num_dynamic > 1) {
return Option<bool>(400, "There are duplicate field names in the schema.");
}
}
}
return Option<bool>(true);
}
| 39,837
|
C++
|
.cpp
| 742
| 41.056604
| 162
| 0.54718
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,755
|
personalization_model_manager.cpp
|
typesense_typesense/src/personalization_model_manager.cpp
|
#include "personalization_model_manager.h"
#include "sole.hpp"
#include <glog/logging.h>
Option<nlohmann::json> PersonalizationModelManager::get_model(const std::string& model_id) {
std::shared_lock lock(models_mutex);
auto it = models.find(model_id);
if (it == models.end()) {
return Option<nlohmann::json>(404, "Model not found");
}
return Option<nlohmann::json>(it->second);
}
Option<std::string> PersonalizationModelManager::add_model(nlohmann::json& model_json, std::string model_id, const bool write_to_disk, const std::string model_data) {
std::unique_lock lock(models_mutex);
if (models.find(model_id) != models.end()) {
return Option<std::string>(409, "Model id already exists");
}
model_json["id"] = model_id.empty() ? sole::uuid4().str() : model_id;
model_id = model_json["id"];
model_json["model_path"] = PersonalizationModel::get_model_subdir(model_json["id"]);
auto validate_op = PersonalizationModel::validate_model(model_json);
if(!validate_op.ok()) {
return Option<std::string>(validate_op.code(), validate_op.error());
}
if(write_to_disk) {
auto model_key = get_model_key(model_json["id"]);
auto create_op = PersonalizationModel::create_model(model_json["id"], model_json, model_data);
if(!create_op.ok()) {
return Option<std::string>(create_op.code(), create_op.error());
}
bool insert_op = store->insert(model_key, model_json.dump(0));
if(!insert_op) {
return Option<std::string>(500, "Error while inserting model into the store");
}
}
models[model_json["id"]] = model_json;
return Option<std::string>(model_id);
}
Option<int> PersonalizationModelManager::init(Store* store) {
PersonalizationModelManager::store = store;
std::vector<std::string> model_strs;
store->scan_fill(std::string(MODEL_KEY_PREFIX) + "_", std::string(MODEL_KEY_PREFIX) + "`", model_strs);
if(!model_strs.empty()) {
LOG(INFO) << "Found " << model_strs.size() << " personalization model(s).";
}
int loaded_models = 0;
for(auto& model_str : model_strs) {
nlohmann::json model_json;
try {
model_json = nlohmann::json::parse(model_str);
} catch (const nlohmann::json::parse_error& e) {
LOG(ERROR) << "Error parsing model JSON: " << e.what();
continue;
}
const std::string& model_id = model_json["id"];
auto add_op = add_model(model_json, model_id, false);
if(!add_op.ok()) {
LOG(ERROR) << "Error while loading personalization model: " << model_id << ", error: " << add_op.error();
continue;
}
loaded_models++;
}
return Option<int>(loaded_models);
}
Option<nlohmann::json> PersonalizationModelManager::delete_model(const std::string& model_id) {
std::unique_lock lock(models_mutex);
auto it = models.find(model_id);
if (it == models.end()) {
return Option<nlohmann::json>(404, "Model not found");
}
nlohmann::json model = it->second;
auto delete_op = PersonalizationModel::delete_model(model_id);
if(!delete_op.ok()) {
return Option<nlohmann::json>(delete_op.code(), delete_op.error());
}
auto model_key = get_model_key(model_id);
bool remove_op = store->remove(model_key);
if(!remove_op) {
return Option<nlohmann::json>(500, "Error while deleting model from the store");
}
models.erase(it);
return Option<nlohmann::json>(model);
}
Option<nlohmann::json> PersonalizationModelManager::get_all_models() {
std::shared_lock lock(models_mutex);
nlohmann::json models_json = nlohmann::json::array();
for (auto& [id, model] : models) {
models_json.push_back(model);
}
return Option<nlohmann::json>(models_json);
}
const std::string PersonalizationModelManager::get_model_key(const std::string& model_id) {
return std::string(MODEL_KEY_PREFIX) + "_" + model_id;
}
Option<nlohmann::json> PersonalizationModelManager::update_model(const std::string& model_id, nlohmann::json model, const std::string& model_data) {
std::unique_lock lock(models_mutex);
auto it = models.find(model_id);
if (it == models.end()) {
return Option<nlohmann::json>(404, "Model not found");
}
nlohmann::json model_copy = it->second;
for (auto& [key, value] : model.items()) {
model_copy[key] = value;
}
auto validate_res = PersonalizationModel::validate_model(model_copy);
if (!validate_res.ok()) {
return Option<nlohmann::json>(validate_res.code(), validate_res.error());
}
auto model_key = get_model_key(model_id);
bool insert_op = store->insert(model_key, model_copy.dump(0));
if(!insert_op) {
return Option<nlohmann::json>(500, "Error while inserting model into the store");
}
auto update_op = PersonalizationModel::update_model(model_id, model_copy, model_data);
if(!update_op.ok()) {
return Option<nlohmann::json>(update_op.code(), update_op.error());
}
models[model_id] = model_copy;
return Option<nlohmann::json>(model_copy);
}
| 5,202
|
C++
|
.cpp
| 119
| 37.554622
| 166
| 0.645672
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,756
|
ids_t.cpp
|
typesense_typesense/src/ids_t.cpp
|
#include "ids_t.h"
#include "id_list.h"
int64_t compact_id_list_t::upsert(const uint32_t id) {
// format: id1, id2, id3
uint32_t last_id = (length == 0) ? 0 : ids[length - 1];
int64_t extra_length_needed = 0;
if(length == 0 || id > last_id) {
extra_length_needed = 1;
if(length + extra_length_needed > capacity) {
// enough storage should have been provided upstream
return (length + extra_length_needed) - capacity;
}
// can just append to the end
ids[length++] = id;
} else {
// locate position and shift contents to make space available
int64_t i = 0;
while(i < length) {
size_t existing_id = ids[i];
if(existing_id == id) {
break;
}
else if(existing_id > id) {
extra_length_needed = 1;
if(length + extra_length_needed > capacity) {
// enough storage should have been provided upstream
return (length + extra_length_needed) - capacity;
}
// shift index [i..length-1] by `extra_length_needed` positions
int64_t shift_index = length + extra_length_needed - 1;
while((shift_index - extra_length_needed) >= 0 && shift_index >= i) {
ids[shift_index] = ids[shift_index - extra_length_needed];
shift_index--;
}
// now store the new offsets in the shifted space
ids[i++] = id;
break;
}
i++;
}
length += extra_length_needed;
}
return 0;
}
void compact_id_list_t::erase(const uint32_t id) {
// locate position and shift contents to collapse space vacated
size_t i = 0;
while(i < length) {
size_t existing_id = ids[i];
if(existing_id > id) {
// not found!
return ;
}
if(existing_id == id) {
size_t shift_offset = 1;
while(i+shift_offset < length) {
ids[i] = ids[i + shift_offset];
i++;
}
length -= shift_offset;
break;
}
i++;
}
}
compact_id_list_t* compact_id_list_t::create(uint32_t num_ids, const std::vector<uint32_t>& ids) {
return create(num_ids, &ids[0]);
}
compact_id_list_t* compact_id_list_t::create(uint32_t num_ids, const uint32_t* ids) {
// format: id1, id2, id3, ...
compact_id_list_t* pl = (compact_id_list_t*) malloc(sizeof(compact_id_list_t) +
(num_ids * sizeof(uint32_t)));
pl->length = 0;
pl->capacity = num_ids;
for(size_t i = 0; i < num_ids; i++) {
pl->upsert(ids[i]);
}
return pl;
}
id_list_t* compact_id_list_t::to_full_ids_list() const {
id_list_t* pl = new id_list_t(ids_t::MAX_BLOCK_ELEMENTS);
size_t i = 0;
while(i < length) {
size_t existing_id = ids[i];
pl->upsert(existing_id);
i++;
}
return pl;
}
uint32_t compact_id_list_t::last_id() {
return (length == 0) ? UINT32_MAX : ids[length - 1];
}
uint32_t compact_id_list_t::num_ids() const {
return length;
}
uint32_t compact_id_list_t::first_id() {
if(length == 0) {
return 0;
}
return ids[0];
}
bool compact_id_list_t::contains(uint32_t id) {
size_t i = 0;
while(i < length) {
size_t existing_id = ids[i];
if(existing_id > id) {
// not found!
return false;
}
if(existing_id == id) {
return true;
}
i++;
}
return false;
}
size_t compact_id_list_t::intersect_count(const uint32_t* res_ids, size_t res_ids_len) {
size_t count = 0;
size_t i = 0;
size_t res_index = 0;
while(i < length && res_index < res_ids_len) {
size_t curr_id = ids[i];
if(curr_id < res_ids[res_index]) {
i++;
} else if(curr_id > res_ids[res_index]) {
// returns index that is >= to value or last if no such element is found.
res_index = std::lower_bound(res_ids + res_index, res_ids + res_ids_len, curr_id) - res_ids;
} else {
i++;
res_index++;
count++;
}
}
return count;
}
/* posting operations */
void ids_t::upsert(void*& obj, uint32_t id) {
if(IS_COMPACT_IDS(obj)) {
compact_id_list_t* list = (compact_id_list_t*) RAW_IDS_PTR(obj);
int64_t extra_capacity_required = list->upsert(id);
if(extra_capacity_required == 0) {
// upsert succeeded
return;
}
if((list->capacity + extra_capacity_required) > COMPACT_LIST_THRESHOLD_LENGTH) {
// we have to convert to a full posting list
id_list_t* full_list = list->to_full_ids_list();
free(list);
obj = full_list;
}
else {
// grow the container by 30%
size_t new_capacity = std::min<size_t>((list->capacity + extra_capacity_required) * 1.3,
COMPACT_LIST_THRESHOLD_LENGTH);
size_t new_capacity_bytes = sizeof(compact_id_list_t) + (new_capacity * sizeof(uint32_t));
auto new_list = (compact_id_list_t *) realloc(list, new_capacity_bytes);
if(new_list == nullptr) {
abort();
}
list = new_list;
list->capacity = new_capacity;
obj = SET_COMPACT_IDS(list);
list->upsert(id);
return ;
}
}
// either `obj` is already a full list or was converted to a full list above
id_list_t* list = (id_list_t*)(obj);
list->upsert(id);
}
void ids_t::erase(void*& obj, uint32_t id) {
if(IS_COMPACT_IDS(obj)) {
compact_id_list_t* list = COMPACT_IDS_PTR(obj);
list->erase(id);
// if the list becomes too small, we resize it to save memory
if(list->length < list->capacity/2) {
// resize container
size_t new_capacity = list->capacity/2;
size_t new_capacity_bytes = sizeof(compact_id_list_t) + (new_capacity * sizeof(uint32_t));
auto new_list = (compact_id_list_t *) realloc(list, new_capacity_bytes);
if(new_list == nullptr) {
abort();
}
list = new_list;
list->capacity = new_capacity;
obj = SET_COMPACT_IDS(list);
}
} else {
id_list_t* list = (id_list_t*)(obj);
list->erase(id);
if(list->num_blocks() == 1 && list->get_root()->size() <= COMPACT_LIST_THRESHOLD_LENGTH) {
// convert to compact posting format
auto root_block = list->get_root();
auto ids = root_block->ids.uncompress();
compact_id_list_t* compact_list = compact_id_list_t::create(root_block->size(), ids);
delete [] ids;
delete list;
obj = SET_COMPACT_IDS(compact_list);
}
}
}
uint32_t ids_t::num_ids(const void* obj) {
if(IS_COMPACT_IDS(obj)) {
compact_id_list_t* list = COMPACT_IDS_PTR(obj);
return list->num_ids();
} else {
id_list_t* list = (id_list_t*)(obj);
return list->num_ids();
}
}
uint32_t ids_t::first_id(const void* obj) {
if(IS_COMPACT_IDS(obj)) {
compact_id_list_t* list = COMPACT_IDS_PTR(obj);
return list->first_id();
} else {
id_list_t* list = (id_list_t*)(obj);
return list->first_id();
}
}
bool ids_t::contains(const void* obj, uint32_t id) {
if(IS_COMPACT_IDS(obj)) {
compact_id_list_t* list = COMPACT_IDS_PTR(obj);
return list->contains(id);
} else {
id_list_t* list = (id_list_t*)(obj);
return list->contains(id);
}
}
void ids_t::merge(const std::vector<void*>& raw_posting_lists, std::vector<uint32_t>& result_ids) {
// we will have to convert the compact posting list (if any) to full form
std::vector<id_list_t*> id_lists;
std::vector<id_list_t*> expanded_id_lists;
to_expanded_id_lists(raw_posting_lists, id_lists, expanded_id_lists);
id_list_t::merge(id_lists, result_ids);
for(id_list_t* expanded_plist: expanded_id_lists) {
delete expanded_plist;
}
}
void ids_t::intersect(const std::vector<void*>& raw_posting_lists, std::vector<uint32_t>& result_ids) {
// we will have to convert the compact posting list (if any) to full form
std::vector<id_list_t*> id_lists;
std::vector<id_list_t*> expanded_id_lists;
to_expanded_id_lists(raw_posting_lists, id_lists, expanded_id_lists);
id_list_t::intersect(id_lists, result_ids);
for(auto expanded_plist: expanded_id_lists) {
delete expanded_plist;
}
}
void ids_t::to_expanded_id_lists(const std::vector<void*>& raw_posting_lists, std::vector<id_list_t*>& id_lists,
std::vector<id_list_t*>& expanded_id_lists) {
for(size_t i = 0; i < raw_posting_lists.size(); i++) {
auto raw_posting_list = raw_posting_lists[i];
if(IS_COMPACT_IDS(raw_posting_list)) {
auto compact_posting_list = COMPACT_IDS_PTR(raw_posting_list);
id_list_t* full_posting_list = compact_posting_list->to_full_ids_list();
id_lists.emplace_back(full_posting_list);
expanded_id_lists.push_back(full_posting_list);
} else {
id_list_t* full_posting_list = (id_list_t*)(raw_posting_list);
id_lists.emplace_back(full_posting_list);
}
}
}
void ids_t::destroy_list(void*& obj) {
if(obj == nullptr) {
return;
}
if(IS_COMPACT_IDS(obj)) {
compact_id_list_t* list = COMPACT_IDS_PTR(obj);
free(list); // assigned via malloc, so must be free()d
} else {
id_list_t* list = (id_list_t*)(obj);
delete list;
}
obj = nullptr;
}
uint32_t* ids_t::uncompress(void*& obj) {
if(IS_COMPACT_IDS(obj)) {
compact_id_list_t* list = COMPACT_IDS_PTR(obj);
uint32_t* arr = new uint32_t[list->length];
std::memcpy(arr, list->ids, list->length * sizeof(uint32_t));
return arr;
} else {
id_list_t* list = (id_list_t*)(obj);
return list->uncompress();
}
}
void ids_t::uncompress(void*& obj, std::vector<uint32_t>& ids) {
if(IS_COMPACT_IDS(obj)) {
compact_id_list_t* list = COMPACT_IDS_PTR(obj);
for(size_t i = 0; i < list->length; i++) {
ids.push_back(list->ids[i]);
}
} else {
id_list_t* list = (id_list_t*)(obj);
list->uncompress(ids);
}
}
size_t ids_t::intersect_count(void*& obj, const uint32_t* result_ids, size_t result_ids_len,
bool estimate_facets, size_t facet_sample_mod_value) {
if(IS_COMPACT_IDS(obj)) {
compact_id_list_t* list = COMPACT_IDS_PTR(obj);
return list->intersect_count(result_ids, result_ids_len);
} else {
id_list_t* list = (id_list_t*)(obj);
return list->intersect_count(result_ids, result_ids_len, estimate_facets, facet_sample_mod_value);
}
}
void* ids_t::create(const std::vector<uint32_t>& ids) {
if(ids.size() < COMPACT_LIST_THRESHOLD_LENGTH) {
return SET_COMPACT_IDS(compact_id_list_t::create(ids.size(), ids));
} else {
id_list_t* pl = new id_list_t(ids_t::MAX_BLOCK_ELEMENTS);
for(auto id: ids) {
pl->upsert(id);
}
return pl;
}
}
void ids_t::block_intersector_t::split_lists(size_t concurrency,
std::vector<std::vector<id_list_t::iterator_t>>& partial_its_vec) {
const size_t num_blocks = this->id_lists[0]->num_blocks();
const size_t window_size = (num_blocks + concurrency - 1) / concurrency; // rounds up
size_t blocks_traversed = 0;
id_list_t::block_t* start_block = this->id_lists[0]->get_root();
id_list_t::block_t* curr_block = start_block;
size_t window_index = 0;
while(curr_block != nullptr) {
blocks_traversed++;
if(blocks_traversed % window_size == 0 || blocks_traversed == num_blocks) {
// construct partial iterators and intersect within them
std::vector<id_list_t::iterator_t>& partial_its = partial_its_vec[window_index];
for(size_t i = 0; i < this->id_lists.size(); i++) {
id_list_t::block_t* p_start_block = nullptr;
id_list_t::block_t* p_end_block = nullptr;
// [1, 2] [3, 4] [5, 6]
// [3, 5] [6]
if(i == 0) {
p_start_block = start_block;
p_end_block = curr_block->next;
} else {
auto start_block_first_id = start_block->ids.at(0);
auto end_block_last_id = curr_block->ids.last();
p_start_block = this->id_lists[i]->block_of(start_block_first_id);
id_list_t::block_t* last_block = this->id_lists[i]->block_of(end_block_last_id);
if(p_start_block == last_block && p_start_block != nullptr) {
p_end_block = p_start_block->next;
} else {
p_end_block = last_block == nullptr ? nullptr : last_block->next;
}
}
partial_its.emplace_back(p_start_block, p_end_block, nullptr, false);
}
start_block = curr_block->next;
window_index++;
}
curr_block = curr_block->next;
}
}
| 13,726
|
C++
|
.cpp
| 359
| 28.905292
| 112
| 0.545626
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,757
|
batched_indexer.cpp
|
typesense_typesense/src/batched_indexer.cpp
|
#include "batched_indexer.h"
#include "core_api.h"
#include "thread_local_vars.h"
#include "cached_resource_stat.h"
#include "collection_manager.h"
BatchedIndexer::BatchedIndexer(HttpServer* server, Store* store, Store* meta_store, const size_t num_threads,
const Config& config, const std::atomic<bool>& skip_writes):
server(server), store(store), meta_store(meta_store), num_threads(num_threads),
last_gc_run(std::chrono::high_resolution_clock::now()), quit(false),
config(config), skip_writes(skip_writes) {
queues.resize(num_threads);
qmutuxes = new await_t[num_threads];
skip_index_iter_upper_bound = new rocksdb::Slice(skip_index_upper_bound_key);
}
std::string get_ref_coll_names(const std::string& body, std::unordered_set<std::string>& referenced_collections) {
std::string collection_name;
auto const& obj = nlohmann::json::parse(body, nullptr, false);
if (!obj.is_discarded() && obj.is_object() && obj.contains("name") && obj["name"].is_string() &&
obj.contains("fields")) {
collection_name = obj["name"];
for (const auto &field: obj["fields"]) {
if (!field.contains("reference")) {
continue;
}
std::vector<std::string> split_result;
StringUtils::split(field["reference"], split_result, ".");
referenced_collections.insert(split_result.front());
}
}
return collection_name;
}
void BatchedIndexer::enqueue(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
// Called by the raft write thread: goal is to quickly send the request to a queue and move on
// NOTE: it's ok to access `req` and `res` in this function without synchronization
// because the read thread for *this* request is paused now and resumes only messaged at the end
//LOG(INFO) << "BatchedIndexer::enqueue";
uint32_t chunk_sequence = 0;
{
uint64_t now = std::chrono::duration_cast<std::chrono::seconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
std::unique_lock lk(mutex);
auto req_res_map_it = req_res_map.find(req->start_ts);
if(req_res_map_it == req_res_map.end()) {
// first chunk
req_res_t req_res(req->start_ts, "", req, res, now, 1, 0, false);
req_res_map.emplace(req->start_ts, req_res);
} else {
chunk_sequence = req_res_map_it->second.num_chunks;
req_res_map_it->second.num_chunks += 1;
req_res_map_it->second.last_updated = now;
}
}
const std::string& req_key_prefix = get_req_prefix_key(req->start_ts);
const std::string& request_chunk_key = req_key_prefix + StringUtils::serialize_uint32_t(chunk_sequence);
//LOG(INFO) << "request_chunk_key: " << req->start_ts << "_" << chunk_sequence << ", req body: " << req->body;
store->insert(request_chunk_key, req->to_json());
bool is_old_serialized_request = (req->start_ts == 0);
bool read_more_input = (req->_req != nullptr && req->_req->proceed_req);
bool is_live_req = res->is_alive;
if(req->last_chunk_aggregate) {
//LOG(INFO) << "Last chunk for req_id: " << req->start_ts;
queued_writes += (chunk_sequence + 1);
{
const std::string& coll_name = get_collection_name(req);
uint64_t queue_id = StringUtils::hash_wy(coll_name.c_str(), coll_name.size()) % num_threads;
req->params["collection"] = coll_name;
{
std::unique_lock lk2(mutex);
req_res_map[req->start_ts].is_complete = true;
}
bool queue_write = true;
if(!is_live_req) {
if (is_coll_create_route(req->route_hash)) {
// Save reference mapping to take care of ordering of requests
std::unordered_set<std::string> referenced_collections;
get_ref_coll_names(req->body, referenced_collections);
if (!referenced_collections.empty()) {
std::lock_guard lock(mutex);
coll_to_references[coll_name] = std::move(referenced_collections);
}
} else if (is_drop_collection_route(req->route_hash)) {
std::lock_guard lock(mutex);
coll_to_references.erase(coll_name);
} else {
auto ref_colls_it = coll_to_references.find(coll_name);
const auto& ref_collections = (ref_colls_it != coll_to_references.end()) ? ref_colls_it->second :
CollectionManager::get_instance().get_collection_references(coll_name);
if(!ref_collections.empty()) {
// If this request involves a collection that references other collection(s), we have to wait
// for the other collection(s) request(s) that arrived before this request to finish by pushing
// this request onto a waiting queue.
std::unique_lock lk(refq_wait.mcv);
reference_q.emplace_back(queue_id, req->start_ts);
lk.unlock();
refq_wait.cv.notify_one();
queue_write = false;
}
}
}
req->body = "";
if(queue_write) {
std::unique_lock qlk(qmutuxes[queue_id].mcv);
queues[queue_id].emplace_back(req->start_ts);
qlk.unlock();
qmutuxes[queue_id].cv.notify_one();
}
}
// IMPORTANT: must not read `req` variables (except _req) henceforth to prevent data races with indexing thread
if(is_old_serialized_request) {
// Indicates a serialized request from a version that did not support batching (v0.21 and below).
// We can only do serial writes as we cannot reliably distinguish one streaming request from another.
// So, wait for `req_res_map` to be empty before proceeding
while(true) {
{
std::unique_lock lk(mutex);
if(req_res_map.empty()) {
break;
}
}
std::this_thread::sleep_for(std::chrono::milliseconds (10));
}
}
} else {
req->body = "";
}
if(read_more_input) {
// Tell the http library to read more input data
deferred_req_res_t* req_res = new deferred_req_res_t(req, res, server, true);
server->get_message_dispatcher()->send_message(HttpServer::REQUEST_PROCEED_MESSAGE, req_res);
}
}
std::string BatchedIndexer::get_collection_name(const std::shared_ptr<http_req>& req) {
std::string& coll_name = req->params["collection"];
if(coll_name.empty()) {
route_path* rpath = nullptr;
bool route_found = server->get_route(req->route_hash, &rpath);
// ensure that collection creation is sent to the same queue as writes to that collection
if(route_found && rpath->handler == post_create_collection) {
nlohmann::json obj = nlohmann::json::parse(req->body, nullptr, false);
if(!obj.is_discarded() && obj.is_object() &&
obj.count("name") != 0 && obj["name"].is_string()) {
coll_name = obj["name"];
}
} else if(route_found && rpath->handler == post_conversation_model) {
nlohmann::json obj = nlohmann::json::parse(req->body, nullptr, false);
if(!obj.is_discarded() && obj.is_object() &&
obj.count("history_collection") != 0 && obj["history_collection"].is_string()) {
coll_name = obj["history_collection"];
}
}
}
return coll_name;
}
void BatchedIndexer::run() {
LOG(INFO) << "Starting batch indexer with " << num_threads << " threads.";
ThreadPool* thread_pool = new ThreadPool(num_threads);
skip_index_iter = meta_store->scan(SKIP_INDICES_PREFIX, skip_index_iter_upper_bound);
populate_skip_index();
LOG(INFO) << "BatchedIndexer skip_index: " << skip_index;
for(size_t i = 0; i < num_threads; i++) {
std::deque<uint64_t>& queue = queues[i];
await_t& queue_mutex = qmutuxes[i];
thread_pool->enqueue([&queue, &queue_mutex, this, i]() {
while(!quit) {
std::unique_lock<std::mutex> qlk(queue_mutex.mcv);
queue_mutex.cv.wait(qlk, [&] { return quit || !queue.empty(); });
if(quit) {
break;
}
uint64_t req_id = queue.front();
queue.pop_front();
qlk.unlock();
std::unique_lock mlk(mutex);
auto req_res_map_it = req_res_map.find(req_id);
if(req_res_map_it == req_res_map.end()) {
LOG(ERROR) << "Req ID " << req_id << " not found in req_res_map.";
continue;
}
req_res_t& orig_req_res = req_res_map_it->second;
mlk.unlock();
// scan db for all logs associated with request
const std::string& req_key_prefix = get_req_prefix_key(req_id);
/* Format of the key: $RL_reqId_chunkId
NOTE: we use an explicit `next_chunk_index` so that the reads can resume from a partially request.
*/
const std::string& req_key_start_prefix = req_key_prefix + StringUtils::serialize_uint32_t(
orig_req_res.next_chunk_index);
const std::string& req_key_upper_bound = get_req_suffix_key(req_id); // cannot inline this
rocksdb::Slice upper_bound(req_key_upper_bound);
rocksdb::Iterator* iter = store->scan(req_key_start_prefix, &upper_bound);
// used to handle partial JSON documents caused by chunking
std::string& prev_body = orig_req_res.prev_req_body;
const std::shared_ptr<http_req>& orig_req = orig_req_res.req;
const std::shared_ptr<http_res>& orig_res = orig_req_res.res;
bool is_live_req = orig_res->is_alive;
route_path* found_rpath = nullptr;
bool route_found = server->get_route(orig_req->route_hash, &found_rpath);
bool async_res = false;
while(iter->Valid() && iter->key().starts_with(req_key_prefix)) {
std::shared_lock slk(pause_mutex); // used for snapshot
orig_req->body = prev_body;
orig_req->load_from_json(iter->value().ToString());
// update thread local for reference during a crash
write_log_index = orig_req->log_index;
if(write_log_index == skip_index) {
LOG(ERROR) << "Skipping write log index " << write_log_index
<< " which seems to have triggered a crash previously.";
populate_skip_index();
}
else {
//LOG(INFO) << "index req " << req_id << ", chunk index: " << orig_req_res.next_chunk_index;
auto resource_check = cached_resource_stat_t::get_instance()
.has_enough_resources(config.get_data_dir(),
config.get_disk_used_max_percentage(),
config.get_memory_used_max_percentage());
if (resource_check != cached_resource_stat_t::OK && orig_req->do_resource_check()) {
const std::string& err_msg = "Rejecting write: running out of resource type: " +
std::string(magic_enum::enum_name(resource_check));
LOG(ERROR) << err_msg;
orig_res->set_422(err_msg);
orig_res->final = true;
async_req_res_t* async_req_res = new async_req_res_t(orig_req, orig_res, true);
server->get_message_dispatcher()->send_message(HttpServer::STREAM_RESPONSE_MESSAGE, async_req_res);
goto end;
}
else if(route_found) {
if(skip_writes && found_rpath->handler != post_config) {
orig_res->set(422, "Skipping write.");
orig_res->final = true;
async_req_res_t* async_req_res = new async_req_res_t(orig_req, orig_res, true);
server->get_message_dispatcher()->send_message(HttpServer::STREAM_RESPONSE_MESSAGE, async_req_res);
goto end;
}
async_res = found_rpath->async_res;
try {
found_rpath->handler(orig_req, orig_res);
} catch(const std::exception& e) {
const std::string& api_action = found_rpath->_get_action();
LOG(ERROR) << "Exception while calling handler " << api_action;
LOG(ERROR) << "Raw error: " << e.what();
// bad request gets a response immediately
orig_res->set_400("Bad request.");
orig_res->final = true;
async_res = false;
}
prev_body = orig_req->body;
} else {
orig_res->set_404();
}
if(is_live_req && (!route_found ||!async_res)) {
// sync request gets a response immediately
async_req_res_t* async_req_res = new async_req_res_t(orig_req, orig_res, true);
server->get_message_dispatcher()->send_message(HttpServer::STREAM_RESPONSE_MESSAGE, async_req_res);
}
if(!route_found) {
goto end;
}
}
end:
queued_writes--;
orig_req_res.next_chunk_index++;
iter->Next();
if(quit) {
break;
}
}
delete iter;
//LOG(INFO) << "Erasing request data from disk and memory for request " << req_id;
// we can delete the buffered request content
store->delete_range(req_key_prefix, req_key_prefix + StringUtils::serialize_uint32_t(UINT32_MAX));
std::unique_lock lk(mutex);
req_res_map.erase(req_id);
lk.unlock();
refq_wait.cv.notify_one();
}
});
}
std::thread ref_sequence_thread([&]() {
// Waits for dependent requests that are ahead to finish before pushing a request onto main indexing queue.
while(!quit) {
std::unique_lock ref_qlk(refq_wait.mcv);
refq_wait.cv.wait(ref_qlk, [&] {
return quit || !reference_q.empty();
});
if(quit) {
break;
}
std::lock_guard lock(mutex);
// We will iterate on the reference queue and check if there are any ongoing requests that have been
// sent prior to this request.
auto reference_q_it = reference_q.begin();
while(reference_q_it != reference_q.end()) {
bool found_ref_coll = false;
auto req_res_it = req_res_map.find(reference_q_it->start_ts);
if(req_res_it == req_res_map.end()) {
reference_q_it = reference_q.erase(reference_q_it);
continue;
}
auto const& coll_name = req_res_it->second.req->params["collection"];
auto ref_colls_it = coll_to_references.find(coll_name);
const auto& ref_collections = (ref_colls_it != coll_to_references.end()) ? ref_colls_it->second :
CollectionManager::get_instance().get_collection_references(coll_name);
if(ref_collections.empty()) {
// This request is not dependent on any other request. Push this request onto main processing queue
// and remove node from queue.
std::unique_lock qlk(qmutuxes[reference_q_it->queue_id].mcv);
queues[reference_q_it->queue_id].emplace_back(reference_q_it->start_ts);
qlk.unlock();
qmutuxes[reference_q_it->queue_id].cv.notify_one();
reference_q_it = reference_q.erase(reference_q_it);
continue;
}
for (auto it = req_res_map.begin(); it != req_res_it; it++) {
auto const& req_coll_name = it->second.req->params["collection"];
if(ref_collections.count(req_coll_name) != 0) {
found_ref_coll = true;
break;
}
}
if(!found_ref_coll) {
// All the dependent requests have been completed. Push this request onto main processing queue and
// remove node from queue.
std::unique_lock qlk(qmutuxes[reference_q_it->queue_id].mcv);
queues[reference_q_it->queue_id].emplace_back(reference_q_it->start_ts);
qlk.unlock();
qmutuxes[reference_q_it->queue_id].cv.notify_one();
reference_q_it = reference_q.erase(reference_q_it);
} else {
reference_q_it++;
}
}
}
});
uint64_t stuck_counter = 0;
uint64_t prev_count = 0;
while(!quit) {
std::this_thread::sleep_for(std::chrono::milliseconds (1000));
// do gc, if we are due for one
uint64_t seconds_elapsed = std::chrono::duration_cast<std::chrono::seconds>(
std::chrono::high_resolution_clock::now() - last_gc_run).count();
if(seconds_elapsed > GC_INTERVAL_SECONDS) {
std::unique_lock lk(mutex);
LOG(INFO) << "Running GC for aborted requests, req map size: " << req_res_map.size();
if(req_res_map.size() > 0 && prev_count == req_res_map.size()) {
stuck_counter++;
if(stuck_counter > 3) {
size_t max_loop = 0;
for(const auto& it : req_res_map) {
max_loop++;
LOG(INFO) << "Stuck req_key: " << it.first;
if(max_loop == 5) {
break;
}
}
stuck_counter = 0;
}
} else {
stuck_counter = 0;
}
prev_count = req_res_map.size();
// iterate through all map entries and delete ones which are not complete but > GC_PRUNE_MAX_SECONDS
for (auto it = req_res_map.cbegin(); it != req_res_map.cend();) {
uint64_t seconds_since_batch_update = std::chrono::duration_cast<std::chrono::seconds>(
std::chrono::system_clock::now().time_since_epoch()).count() - it->second.last_updated;
//LOG(INFO) << "GC checking on req id: " << it->first;
//LOG(INFO) << "Seconds since last batch update: " << seconds_since_batch_update;
if(!it->second.is_complete && seconds_since_batch_update > GC_PRUNE_MAX_SECONDS) {
LOG(INFO) << "Deleting partial upload for req id " << it->second.start_ts;
const std::string& req_key_prefix = get_req_prefix_key(it->second.start_ts);
store->delete_range(req_key_prefix, req_key_prefix + StringUtils::serialize_uint32_t(UINT32_MAX));
if(it->second.res->is_alive) {
it->second.res->final = true;
async_req_res_t* async_req_res = new async_req_res_t(it->second.req, it->second.res, true);
server->get_message_dispatcher()->send_message(HttpServer::STREAM_RESPONSE_MESSAGE, async_req_res);
}
it = req_res_map.erase(it);
} else {
it++;
}
}
last_gc_run = std::chrono::high_resolution_clock::now();
}
}
LOG(INFO) << "Notifying batch indexer threads about shutdown...";
for(size_t i = 0; i < num_threads; i++) {
await_t& queue_mutex = qmutuxes[i];
queue_mutex.cv.notify_one();
}
LOG(INFO) << "Notifying reference sequence thread about shutdown...";
refq_wait.cv.notify_one();
ref_sequence_thread.join();
LOG(INFO) << "Batched indexer threadpool shutdown...";
thread_pool->shutdown();
delete thread_pool;
}
std::string BatchedIndexer::get_req_prefix_key(uint64_t req_id) {
const std::string& req_key_prefix = RAFT_REQ_LOG_PREFIX + StringUtils::serialize_uint64_t(req_id) + "_";
return req_key_prefix;
}
std::string BatchedIndexer::get_req_suffix_key(uint64_t req_id) {
const std::string& req_key_prefix = RAFT_REQ_LOG_PREFIX + StringUtils::serialize_uint64_t(req_id) + "`";
return req_key_prefix;
}
BatchedIndexer::~BatchedIndexer() {
delete [] qmutuxes;
delete skip_index_iter_upper_bound;
delete skip_index_iter;
}
void BatchedIndexer::stop() {
quit = true;
}
int64_t BatchedIndexer::get_queued_writes() {
return queued_writes;
}
void BatchedIndexer::populate_skip_index() {
if(skip_index_iter->Valid() && skip_index_iter->key().starts_with(SKIP_INDICES_PREFIX)) {
const std::string& index_value = skip_index_iter->value().ToString();
if(StringUtils::is_int64_t(index_value)) {
skip_index = std::stoll(index_value);
}
skip_index_iter->Next();
} else {
skip_index = UNSET_SKIP_INDEX;
}
}
void BatchedIndexer::persist_applying_index() {
LOG(INFO) << "Saving currently applying index: " << write_log_index;
std::string key = SKIP_INDICES_PREFIX + std::to_string(write_log_index);
meta_store->insert(key, std::to_string(write_log_index));
}
void BatchedIndexer::serialize_state(nlohmann::json& state) {
// requires external synchronization!
state["queued_writes"] = queued_writes.load();
state["req_res_map"] = nlohmann::json();
size_t num_reqs_stored = 0;
std::unique_lock lk(mutex);
for(auto& kv: req_res_map) {
std::string req_key = std::to_string(kv.first);
state["req_res_map"].emplace(req_key, nlohmann::json());
nlohmann::json& req_res = state["req_res_map"][req_key];
req_res["start_ts"] = kv.second.start_ts;
req_res["last_updated"] = kv.second.last_updated;
req_res["num_chunks"] = kv.second.num_chunks;
req_res["next_chunk_index"] = kv.second.next_chunk_index;
req_res["is_complete"] = kv.second.is_complete;
req_res["req"] = kv.second.req->to_json();
req_res["prev_req_body"] = kv.second.prev_req_body;
num_reqs_stored++;
//LOG(INFO) << "req_key: " << req_key << ", next_chunk_index: " << kv.second.next_chunk_index;
}
state["reference_q"] = nlohmann::json::array();
for(auto& ref_req: reference_q) {
nlohmann::json ref_req_obj;
ref_req_obj["queue_id"] = ref_req.queue_id;
ref_req_obj["start_ts"] = ref_req.start_ts;
state["reference_q"].push_back(ref_req_obj);
}
LOG(INFO) << "Serialized " << num_reqs_stored << " in-flight requests for snapshot.";
}
void BatchedIndexer::load_state(const nlohmann::json& state) {
queued_writes = state["queued_writes"].get<int64_t>();
size_t num_reqs_restored = 0;
std::set<uint64_t> queue_ids;
for(auto& kv: state["req_res_map"].items()) {
std::shared_ptr<http_req> req = std::make_shared<http_req>();
req->load_from_json(kv.value()["req"].get<std::string>());
std::shared_ptr<http_res> res = std::make_shared<http_res>(nullptr);
req_res_t req_res(kv.value()["start_ts"].get<uint64_t>(),
kv.value()["prev_req_body"].get<std::string>(), req, res,
kv.value()["last_updated"].get<uint64_t>(),
kv.value()["num_chunks"].get<uint32_t>(),
kv.value()["next_chunk_index"].get<uint32_t>(),
kv.value()["is_complete"].get<bool>());
{
std::unique_lock mlk(mutex);
req_res_map.emplace(std::stoull(kv.key()), req_res);
}
// add only completed requests to their respective collection-based queues
// the rest will be added by enqueue() when raft log is completely read
if(req_res.is_complete) {
LOG(INFO) << "req_res.start_ts: " << req_res.start_ts
<< ", req_res.next_chunk_index: " << req_res.next_chunk_index;
const std::string& coll_name = get_collection_name(req);
uint64_t queue_id = StringUtils::hash_wy(coll_name.c_str(), coll_name.size()) % num_threads;
queue_ids.insert(queue_id);
std::unique_lock qlk(qmutuxes[queue_id].mcv);
queues[queue_id].emplace_back(req->start_ts);
}
num_reqs_restored++;
}
if(state.contains("reference_q")) {
for(const auto& item: state["reference_q"].items()) {
const nlohmann::json& ref_entry = item.value();
reference_q.emplace_back(ref_entry["queue_id"], ref_entry["start_ts"]);
}
refq_wait.cv.notify_one();
}
// need to sort on `start_ts` to preserve original order before notifying queues
for(auto queue_id: queue_ids) {
std::unique_lock lk(qmutuxes[queue_id].mcv);
std::sort(queues[queue_id].begin(), queues[queue_id].end());
qmutuxes[queue_id].cv.notify_one();
}
LOG(INFO) << "Restored " << num_reqs_restored << " in-flight requests from snapshot.";
}
std::shared_mutex& BatchedIndexer::get_pause_mutex() {
return pause_mutex;
}
void BatchedIndexer::clear_skip_indices() {
delete skip_index_iter;
skip_index_iter = meta_store->scan(SKIP_INDICES_PREFIX, skip_index_iter_upper_bound);
while(skip_index_iter->Valid() && skip_index_iter->key().starts_with(SKIP_INDICES_PREFIX)) {
meta_store->remove(skip_index_iter->key().ToString());
skip_index_iter->Next();
}
meta_store->flush();
}
| 27,533
|
C++
|
.cpp
| 517
| 38.437137
| 131
| 0.532798
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,758
|
cached_resource_stat.cpp
|
typesense_typesense/src/cached_resource_stat.cpp
|
#include "cached_resource_stat.h"
#include <fstream>
#include "logger.h"
cached_resource_stat_t::resource_check_t
cached_resource_stat_t::has_enough_resources(const std::string& data_dir_path,
const int disk_used_max_percentage,
const int memory_used_max_percentage) {
if(disk_used_max_percentage == 100 && memory_used_max_percentage == 100) {
return cached_resource_stat_t::OK;
}
std::unique_lock lk(m);
uint64_t now = std::chrono::duration_cast<std::chrono::seconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
if((now - last_checked_ts) < REFRESH_INTERVAL_SECS) {
return resource_status;
}
resource_status = get_resource_status(data_dir_path, disk_used_max_percentage, memory_used_max_percentage);
last_checked_ts = now;
return resource_status;
}
cached_resource_stat_t::resource_check_t
cached_resource_stat_t::get_resource_status(const std::string& data_dir_path, const int disk_used_max_percentage,
const int memory_used_max_percentage) {
uint64_t disk_total_bytes = 0;
uint64_t disk_used_bytes = 0;
uint64_t memory_total_bytes = 0;
uint64_t memory_available_bytes = 0;
uint64_t swap_total_bytes = 0;
uint64_t swap_free_bytes = 0;
// get disk usage
struct statvfs st{};
statvfs(data_dir_path.c_str(), &st);
disk_total_bytes = st.f_blocks * st.f_frsize;
disk_used_bytes = (st.f_blocks - st.f_bavail) * st.f_frsize;
// get memory and swap usage
std::string token;
std::ifstream file("/proc/meminfo");
while(file >> token) {
if(token == "MemTotal:") {
uint64_t value_kb;
if(file >> value_kb) {
memory_total_bytes = value_kb * 1024;
}
}
else if(token == "MemAvailable:") {
uint64_t value_kb;
if(file >> value_kb) {
memory_available_bytes = value_kb * 1024;
}
}
else if(token == "SwapTotal:") {
uint64_t value_kb;
if(file >> value_kb) {
swap_total_bytes = value_kb * 1024;
}
}
else if(token == "SwapFree:") {
uint64_t value_kb;
if(file >> value_kb) {
swap_free_bytes = value_kb * 1024;
}
// since "SwapFree" appears last in the file
break;
}
}
if(memory_total_bytes == 0) {
// if there is an error in fetching the stat, we will return `OK`
return cached_resource_stat_t::OK;
}
double disk_used_percentage = (double(disk_used_bytes)/double(disk_total_bytes)) * 100;
if(disk_used_percentage > disk_used_max_percentage) {
LOG(INFO) << "disk_total_bytes: " << disk_total_bytes << ", disk_used_bytes: " << disk_used_bytes
<< ", disk_used_percentage: " << disk_used_percentage;
return cached_resource_stat_t::OUT_OF_DISK;
}
// Calculate sum of RAM + SWAP used as all_memory_used
uint64_t all_memory_used = (memory_total_bytes - memory_available_bytes) + (swap_total_bytes - swap_free_bytes);
if(all_memory_used >= memory_total_bytes) {
return cached_resource_stat_t::OUT_OF_MEMORY;
}
// compare with 500M or `100 - memory_used_max_percentage` of total memory, whichever is lower
uint64_t memory_free_min_bytes = std::min<uint64_t>(500ULL * 1024 * 1024,
((100ULL - memory_used_max_percentage) * memory_total_bytes) / 100);
uint64_t free_mem = (memory_total_bytes - all_memory_used);
if(free_mem < memory_free_min_bytes) {
LOG(INFO) << "memory_total: " << memory_total_bytes << ", memory_available: " << memory_available_bytes
<< ", all_memory_used: " << all_memory_used << ", free_mem: " << free_mem
<< ", memory_free_min: " << memory_free_min_bytes;
return cached_resource_stat_t::OUT_OF_MEMORY;
}
return cached_resource_stat_t::OK;
}
| 4,157
|
C++
|
.cpp
| 92
| 35.402174
| 124
| 0.583519
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,759
|
stemmer_manager.cpp
|
typesense_typesense/src/stemmer_manager.cpp
|
#include "stemmer_manager.h"
Stemmer::Stemmer(const char * language) {
this->stemmer = sb_stemmer_new(language, nullptr);
this->cache = LRU::Cache<std::string, std::string>(20);
}
Stemmer::~Stemmer() {
sb_stemmer_delete(stemmer);
}
std::string Stemmer::stem(const std::string & word) {
std::unique_lock<std::mutex> lock(mutex);
std::string stemmed_word;
if (cache.contains(word)) {
return cache.lookup(word);
}
auto stemmed = sb_stemmer_stem(stemmer, reinterpret_cast<const sb_symbol*>(word.c_str()), word.length());
stemmed_word = std::string(reinterpret_cast<const char*>(stemmed));
cache.insert(word, stemmed_word);
return stemmed_word;
}
StemmerManager::~StemmerManager() {
delete_all_stemmers();
}
std::shared_ptr<Stemmer> StemmerManager::get_stemmer(const std::string& language) {
std::unique_lock<std::mutex> lock(mutex);
// use english as default language
const std::string language_ = language.empty() ? "english" : language;
if (stemmers.find(language_) == stemmers.end()) {
stemmers[language] = std::make_shared<Stemmer>(language_.c_str());
}
return stemmers[language];
}
void StemmerManager::delete_stemmer(const std::string& language) {
std::unique_lock<std::mutex> lock(mutex);
if (stemmers.find(language) != stemmers.end()) {
stemmers.erase(language);
}
}
void StemmerManager::delete_all_stemmers() {
std::unique_lock<std::mutex> lock(mutex);
stemmers.clear();
}
const bool StemmerManager::validate_language(const std::string& language) {
const std::string language_ = language.empty() ? "english" : language;
auto stemmer = sb_stemmer_new(language_.c_str(), nullptr);
if (stemmer == nullptr) {
return false;
}
sb_stemmer_delete(stemmer);
return true;
}
| 1,825
|
C++
|
.cpp
| 50
| 32.38
| 109
| 0.684776
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,760
|
conversation_manager.cpp
|
typesense_typesense/src/conversation_manager.cpp
|
#include "conversation_manager.h"
#include "logger.h"
#include <chrono>
#include "http_client.h"
#include "core_api.h"
Option<std::string> ConversationManager::add_conversation(const nlohmann::json& conversation, const nlohmann::json& model, const std::string& id) {
std::unique_lock lock(conversations_mutex);
if(!conversation.is_array()) {
return Option<std::string>(400, "Conversation is not an array");
}
if(!id.empty()) {
auto conversation_exists = check_conversation_exists(id);
if(!conversation_exists.ok()) {
return Option<std::string>(conversation_exists.code(), conversation_exists.error());
}
}
std::string conversation_id = id.empty() ? sole::uuid4().str() : id;
std::string history_collection = model["history_collection"].get<std::string>();
auto collection = CollectionManager::get_instance().get_collection(history_collection).get();
if(!collection) {
return Option<std::string>(404, "Conversation store collection not found");
}
std::string body;
for(const auto& message : conversation) {
if(!message.is_object()) {
return Option<std::string>(400, "Message is not an object");
}
// key is role, value is message
const auto& message_it = message.items().begin();
if(message_it == message.items().end()) {
return Option<std::string>(400, "Message is empty");
}
if(!message_it.value().is_string()) {
return Option<std::string>(400, "Role and message must be strings");
}
nlohmann::json message_json;
message_json["conversation_id"] = conversation_id;
message_json["role"] = message_it.key();
message_json["message"] = message_it.value();
message_json["timestamp"] = std::chrono::duration_cast<std::chrono::seconds>(std::chrono::system_clock::now().time_since_epoch()).count();
message_json["model_id"] = model["id"];
body += message_json.dump(-1) + "\n";
}
if(!raft_server) {
auto req = std::make_shared<http_req>();
auto resp = std::make_shared<http_res>(nullptr);
req->params["action"] = "emplace";
req->params["collection"] = history_collection;
req->body = body;
auto api_res = post_import_documents(req, resp);
if(!api_res) {
return Option<std::string>(resp->status_code, resp->body);
}
return Option<std::string>(conversation_id);
}
std::string leader_url = raft_server->get_leader_url();
if(!leader_url.empty()) {
std::string base_url = leader_url + "collections/" + history_collection;
std::string res;
std::string url = base_url + "/documents/import?action=emplace";
std::map<std::string, std::string> res_headers;
long status = HttpClient::post_response(url, body, res, res_headers, {}, 10*1000, true);
if(status != 200) {
LOG(ERROR) << "Error while creating conversation: " << res;
LOG(ERROR) << "Status: " << status;
return Option<std::string>(400, "Error while creating conversation");
} else {
return Option<std::string>(conversation_id);
}
} else {
return Option<std::string>(500, "Leader URL is empty");
}
}
Option<nlohmann::json> ConversationManager::get_conversation(const std::string& conversation_id) {
auto collection_op = get_history_collection(conversation_id);
if(!collection_op.ok()) {
return Option<nlohmann::json>(collection_op.code(), collection_op.error());
}
auto collection = collection_op.get();
nlohmann::json res;
size_t total = 0;
std::vector<sort_by> sort_by_vec = {{"timestamp", sort_field_const::asc}};
auto search_res = collection->search("*", {}, "conversation_id:" + conversation_id, {}, sort_by_vec, {}, 250);
if(!search_res.ok()) {
return Option<nlohmann::json>(400, "Error while searching conversation store: " + search_res.error());
}
auto search_res_json = search_res.get();
total = search_res_json["found"].get<uint32_t>();
if(total == 0) {
return Option<nlohmann::json>(404, "Conversation not found");
}
res["conversation"] = nlohmann::json::array();
for(auto& hit : search_res_json["hits"]) {
nlohmann::json message;
message[hit["document"]["role"]] = hit["document"]["message"];
res["conversation"].push_back(message);
}
// swap every two elements
for(size_t i = 0; i < res["conversation"].size() - 1; i += 2) {
res["conversation"].at(i).swap(res["conversation"].at(i + 1));
}
res["id"] = conversation_id;
res["last_updated"] = (search_res_json["hits"].size() > 0) ? search_res_json["hits"][search_res_json["hits"].size() - 1]["document"]["timestamp"].get<uint32_t>() : 0;
if(total > 250) {
while(total > 0) {
search_res = collection->search("*", {}, "conversation_id:" + conversation_id, {}, sort_by_vec, {}, 250, search_res_json["page"].get<uint32_t>() + 1);
if(!search_res.ok()) {
return Option<nlohmann::json>(400, "Error while searching conversation store: " + search_res.error());
}
search_res_json = search_res.get();
for(auto& hit : search_res_json["hits"]) {
nlohmann::json message;
message[hit["document"]["role"]] = hit["document"]["message"];
res["conversation"].push_back(message);
}
res["last_updated"] = search_res_json["hits"][search_res_json["hits"].size() - 1]["document"]["timestamp"];
total -= search_res_json["hits"].size();
}
}
return Option<nlohmann::json>(res);
}
// pop front elements until the conversation is less than MAX_TOKENS
Option<nlohmann::json> ConversationManager::truncate_conversation(nlohmann::json conversation, size_t limit) {
if(!conversation.is_array()) {
return Option<nlohmann::json>(400, "Conversation history is not an array");
}
if(limit <= 0) {
return Option<nlohmann::json>(400, "Limit must be positive integer");
}
while(conversation.dump(0).size() > limit) {
// pop front element from json array
try {
conversation.erase(0);
} catch (std::exception& e) {
return Option<nlohmann::json>(400, "Conversation history is not an array");
}
}
return Option<nlohmann::json>(conversation);
}
Option<nlohmann::json> ConversationManager::delete_conversation_unsafe(const std::string& conversation_id) {
auto conversation_exists = check_conversation_exists(conversation_id);
if(!conversation_exists.ok()) {
return Option<nlohmann::json>(conversation_exists.code(), conversation_exists.error());
}
auto history_collection_op = get_history_collection(conversation_id);
if(!history_collection_op.ok()) {
return Option<nlohmann::json>(history_collection_op.code(), history_collection_op.error());
}
auto history_collection = history_collection_op.get()->get_name();
if(!raft_server) {
auto req = std::make_shared<http_req>();
auto resp = std::make_shared<http_res>(nullptr);
req->params["filter_by"] = "conversation_id:" + conversation_id;
req->params["collection"] = history_collection;
auto api_res = del_remove_documents(req, resp);
if(!api_res) {
return Option<nlohmann::json>(resp->status_code, resp->body);
}
nlohmann::json res_json;
res_json["id"] = conversation_id;
return Option<nlohmann::json>(res_json);
}
auto leader_url = raft_server->get_leader_url();
if(leader_url.empty()) {
return Option<nlohmann::json>(500, "Leader URL is empty");
}
std::string base_url = leader_url + "collections/" + history_collection;
std::string res;
std::string url = base_url + "/documents?filter_by=conversation_id:" + conversation_id;
std::map<std::string, std::string> res_headers;
long status = HttpClient::delete_response(url, res, res_headers, 10*1000, true);
if(status != 200) {
LOG(ERROR) << "Error while deleting conversation: " << res;
LOG(ERROR) << "Status: " << status;
return Option<nlohmann::json>(400, "Error while deleting conversation");
} else {
nlohmann::json res_json;
res_json["conversation_id"] = conversation_id;
return Option<nlohmann::json>(res_json);
}
}
Option<nlohmann::json> ConversationManager::delete_conversation(const std::string& conversation_id) {
std::unique_lock lock(conversations_mutex);
return delete_conversation_unsafe(conversation_id);
}
Option<bool> ConversationManager::init(ReplicationState* raft_server) {
if(raft_server == nullptr) {
return Option<bool>(400, "Raft server is null");
}
this->raft_server = raft_server;
return Option<bool>(true);
}
void ConversationManager::clear_expired_conversations() {
std::unique_lock lock(conversations_mutex);
// Only leader can delete expired conversations
if(raft_server && !raft_server->is_leader()) {
return;
}
auto models_op = ConversationModelManager::get_all_models();
if(!models_op.ok()) {
LOG(ERROR) << "Error while getting conversation models: " << models_op.error();
return;
}
auto models = models_op.get();
for(auto& model : models) {
if(model.count("history_collection") == 0) {
continue;
}
auto history_collection = model["history_collection"].get<std::string>();
auto ttl = model["ttl"].get<uint64_t>();
auto collection = CollectionManager::get_instance().get_collection(history_collection).get();
std::string filter_by_str = "timestamp:<" + std::to_string(std::chrono::duration_cast<std::chrono::seconds>(std::chrono::system_clock::now().time_since_epoch()).count() - ttl + TTL_OFFSET) + "&&model_id:=" + model["id"].get<std::string>();
if(raft_server) {
std::string res;
std::map<std::string, std::string> res_headers;
std::string url = raft_server->get_leader_url() + "collections/" + history_collection + "/documents?filter_by=" + filter_by_str;
auto res_code = HttpClient::get_instance().delete_response(url, res, res_headers, 10*1000, true);
if(res_code != 200) {
LOG(ERROR) << "Error while deleting expired conversations: " << res;
LOG(ERROR) << "Status: " << res_code;
}
} else {
std::shared_ptr<http_req> req = std::make_shared<http_req>();
std::shared_ptr<http_res> resp = std::make_shared<http_res>(nullptr);
req->params["collection"] = history_collection;
req->params["filter_by"] = filter_by_str;
auto api_res = del_remove_documents(req, resp);
if(!api_res) {
LOG(ERROR) << "Error while deleting expired conversations: " << resp->body;
}
}
}
}
void ConversationManager::run() {
while(!quit) {
std::unique_lock lock(conversations_mutex);
cv.wait_for(lock, std::chrono::seconds(60), [&] { return quit.load(); });
if(quit) {
return;
}
lock.unlock();
clear_expired_conversations();
}
}
void ConversationManager::stop() {
quit = true;
cv.notify_all();
}
Option<bool> ConversationManager::validate_conversation_store_schema(Collection* collection) {
const auto& schema = collection->get_schema();
if(schema.count("conversation_id") == 0) {
return Option<bool>(400, "Schema is missing `conversation_id` field");
}
if(schema.count("role") == 0) {
return Option<bool>(400, "Schema is missing `role` field");
}
if(schema.count("message") == 0) {
return Option<bool>(400, "Schema is missing `message` field");
}
if(schema.count("timestamp") == 0) {
return Option<bool>(400, "Schema is missing `timestamp` field");
}
if(schema.at("conversation_id").type != field_types::STRING) {
return Option<bool>(400, "`conversation_id` field must be a string");
}
if(schema.at("role").type != field_types::STRING) {
return Option<bool>(400, "`role` field must be a string");
}
if(schema.at("message").type != field_types::STRING) {
return Option<bool>(400, "`message` field must be a string");
}
if(schema.at("timestamp").type != field_types::INT32) {
return Option<bool>(400, "`timestamp` field must be an integer");
}
if(!schema.at("timestamp").sort) {
return Option<bool>(400, "`timestamp` field must be a sort field");
}
if(schema.count("model_id") == 0) {
return Option<bool>(400, "Schema is missing `model_id` field");
}
if(schema.at("model_id").type != field_types::STRING) {
return Option<bool>(400, "`model_id` field must be a string");
}
return Option<bool>(true);
}
Option<bool> ConversationManager::check_conversation_exists(const std::string& conversation_id) {
auto collection_op = get_history_collection(conversation_id);
if(!collection_op.ok()) {
return Option<bool>(collection_op.code(), collection_op.error());
}
auto collection = collection_op.get();
nlohmann::json res;
size_t total = 0;
auto search_res = collection->search("*", {}, "conversation_id:" + conversation_id, {}, {}, {}, 250);
if(!search_res.ok()) {
return Option<bool>(400, "Error while searching conversation store: " + search_res.error());
}
auto search_res_json = search_res.get();
total = search_res_json["found"].get<uint32_t>();
if(total == 0) {
return Option<bool>(404, "Conversation not found");
}
return Option<bool>(true);
}
Option<Collection*> ConversationManager::get_history_collection(const std::string& conversation_id) {
auto history_collections = ConversationModelManager::get_history_collections();
for(auto& collection : history_collections) {
auto collection_ptr = CollectionManager::get_instance().get_collection(collection).get();
if(!collection_ptr) {
continue;
}
auto search_res = collection_ptr->search("*", {}, "conversation_id:" + conversation_id, {}, {}, {}, 1);
if(!search_res.ok()) {
continue;
}
auto search_res_json = search_res.get();
if(search_res_json["found"].get<uint32_t>() > 0) {
return Option<Collection*>(collection_ptr);
}
}
return Option<Collection*>(404, "Conversation not found");
}
Option<bool> ConversationManager::validate_conversation_store_collection(const std::string& collection) {
auto collection_ptr = CollectionManager::get_instance().get_collection(collection).get();
if(!collection_ptr) {
return Option<bool>(404, "Collection not found");
}
auto validate_op = validate_conversation_store_schema(collection_ptr);
if(!validate_op.ok()) {
return Option<bool>(validate_op.code(), validate_op.error());
}
return Option<bool>(true);
}
| 15,327
|
C++
|
.cpp
| 333
| 38.498498
| 247
| 0.624345
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,761
|
override.cpp
|
typesense_typesense/src/override.cpp
|
#include <string_utils.h>
#include "override.h"
#include "tokenizer.h"
Option<bool> override_t::parse(const nlohmann::json& override_json, const std::string& id,
override_t& override,
const std::string& locale,
const std::vector<char>& symbols_to_index,
const std::vector<char>& token_separators) {
if(!override_json.is_object()) {
return Option<bool>(400, "Bad JSON.");
}
if(override_json.count("rule") == 0 || !override_json["rule"].is_object()) {
return Option<bool>(400, "Missing `rule` definition.");
}
if (override_json["rule"].count("filter_by") == 0 && override_json["rule"].count("tags") == 0 &&
(override_json["rule"].count("query") == 0 || override_json["rule"].count("match") == 0)) {
return Option<bool>(400, "The `rule` definition must contain either a `tags` or a `query` and `match`.");
}
if(override_json.count("includes") == 0 && override_json.count("excludes") == 0 &&
override_json.count("filter_by") == 0 && override_json.count("sort_by") == 0 &&
override_json.count("remove_matched_tokens") == 0 && override_json.count("metadata") == 0 &&
override_json.count("replace_query") == 0) {
return Option<bool>(400, "Must contain one of: `includes`, `excludes`, `metadata`, "
"`filter_by`, `sort_by`, `remove_matched_tokens`, `replace_query`.");
}
if(override_json["rule"].count("tags") != 0) {
if(!override_json["rule"]["tags"].is_array()) {
return Option<bool>(400, "The `tags` value must be an array of strings.");
}
for(const auto& tag: override_json["rule"]["tags"]) {
if(!tag.is_string()) {
return Option<bool>(400, "The `tags` value must be an array of strings.");
}
override.rule.tags.insert(tag.get<std::string>());
}
}
if(override_json.count("includes") != 0) {
if(!override_json["includes"].is_array()) {
return Option<bool>(400, "The `includes` value must be an array.");
}
for(const auto & include_obj: override_json["includes"]) {
if(!include_obj.is_object()) {
return Option<bool>(400, "The `includes` value must be an array of objects.");
}
if(include_obj.count("id") == 0 || include_obj.count("position") == 0) {
return Option<bool>(400, "Inclusion definition must define both `id` and `position` keys.");
}
if(!include_obj["id"].is_string()) {
return Option<bool>(400, "Inclusion `id` must be a string.");
}
if(!include_obj["position"].is_number_integer()) {
return Option<bool>(400, "Inclusion `position` must be an integer.");
}
}
}
if(override_json.count("excludes") != 0) {
if(!override_json["excludes"].is_array()) {
return Option<bool>(400, "The `excludes` value must be an array.");
}
for(const auto & exclude_obj: override_json["excludes"]) {
if(!exclude_obj.is_object()) {
return Option<bool>(400, "The `excludes` value must be an array of objects.");
}
if(exclude_obj.count("id") == 0) {
return Option<bool>(400, "Exclusion definition must define an `id`.");
}
if(!exclude_obj["id"].is_string()) {
return Option<bool>(400, "Exclusion `id` must be a string.");
}
}
}
if(override_json.count("filter_by") != 0) {
if(!override_json["filter_by"].is_string()) {
return Option<bool>(400, "The `filter_by` must be a string.");
}
if(override_json["filter_by"].get<std::string>().empty()) {
return Option<bool>(400, "The `filter_by` must be a non-empty string.");
}
}
if(override_json.count("remove_matched_tokens") != 0) {
if (!override_json["remove_matched_tokens"].is_boolean()) {
return Option<bool>(400, "The `remove_matched_tokens` must be a boolean.");
}
}
if(override_json.count("filter_curated_hits") != 0) {
if (!override_json["filter_curated_hits"].is_boolean()) {
return Option<bool>(400, "The `filter_curated_hits` must be a boolean.");
}
}
if(override_json.count("stop_processing") != 0) {
if (!override_json["stop_processing"].is_boolean()) {
return Option<bool>(400, "The `stop_processing` must be a boolean.");
}
}
if(!id.empty()) {
override.id = id;
} else if(override_json.count("id") != 0) {
override.id = override_json["id"].get<std::string>();
} else {
return Option<bool>(400, "Override `id` not provided.");
}
const auto& json_rule = override_json["rule"];
override.rule.query = json_rule.count("query") == 0 ? "" : json_rule["query"].get<std::string>();
override.rule.match = json_rule.count("match") == 0 ? "" : json_rule["match"].get<std::string>();
if(!override.rule.query.empty()) {
auto symbols = symbols_to_index;
symbols.push_back('{');
symbols.push_back('}');
symbols.push_back('*');
Tokenizer tokenizer(override.rule.query, true, false, locale, symbols, token_separators);
std::vector<std::string> tokens;
tokenizer.tokenize(tokens);
override.rule.normalized_query = StringUtils::join(tokens, " ");
}
if(json_rule.count("filter_by") != 0) {
if(!override_json["rule"]["filter_by"].is_string()) {
return Option<bool>(400, "Override `rule.filter_by` must be a string.");
}
override.rule.filter_by = override_json["rule"]["filter_by"].get<std::string>();
}
if (override_json.count("includes") != 0) {
for(const auto & include: override_json["includes"]) {
add_hit_t add_hit;
add_hit.doc_id = include["id"].get<std::string>();
add_hit.position = include["position"].get<uint32_t>();
override.add_hits.push_back(add_hit);
}
}
if (override_json.count("excludes") != 0) {
for(const auto & exclude: override_json["excludes"]) {
drop_hit_t drop_hit;
drop_hit.doc_id = exclude["id"].get<std::string>();
override.drop_hits.push_back(drop_hit);
}
}
if (override_json.count("filter_by") != 0) {
override.filter_by = override_json["filter_by"].get<std::string>();
}
if (override_json.count("sort_by") != 0) {
override.sort_by = override_json["sort_by"].get<std::string>();
}
if (override_json.count("replace_query") != 0) {
if(override_json.count("remove_matched_tokens") != 0 && override_json["remove_matched_tokens"].get<bool>()) {
return Option<bool>(400, "Only one of `replace_query` or `remove_matched_tokens` can be specified.");
}
override.replace_query = override_json["replace_query"].get<std::string>();
}
if (override_json.count("metadata") != 0) {
if(!override_json["metadata"].is_object()) {
return Option<bool>(400, "The `metadata` must be a JSON object.");
}
override.metadata = override_json["metadata"];
}
if(override_json.count("remove_matched_tokens") != 0) {
override.remove_matched_tokens = override_json["remove_matched_tokens"].get<bool>();
} else {
override.remove_matched_tokens = (override_json.count("filter_by") != 0);
}
if(override_json.count("filter_curated_hits") != 0) {
override.filter_curated_hits = override_json["filter_curated_hits"].get<bool>();
}
if(override_json.count("stop_processing") != 0) {
override.stop_processing = override_json["stop_processing"].get<bool>();
}
if(override_json.count("effective_from_ts") != 0) {
override.effective_from_ts = override_json["effective_from_ts"].get<int64_t>();
}
if(override_json.count("effective_to_ts") != 0) {
override.effective_to_ts = override_json["effective_to_ts"].get<int64_t>();
}
// we have to also detect if it is a dynamic query rule
size_t i = 0;
while(i < override.rule.normalized_query.size()) {
if(override.rule.normalized_query[i] == '{') {
// look for closing curly
i++;
while(i < override.rule.normalized_query.size()) {
if(override.rule.normalized_query[i] == '}') {
override.rule.dynamic_query = true;
// remove spaces around curlies
override.rule.normalized_query = StringUtils::trim_curly_spaces(override.rule.normalized_query);
break;
}
i++;
}
}
i++;
}
return Option<bool>(true);
}
nlohmann::json override_t::to_json() const {
nlohmann::json override;
override["id"] = id;
if(!rule.query.empty()) {
override["rule"]["query"] = rule.query;
}
if(!rule.match.empty()) {
override["rule"]["match"] = rule.match;
}
if(!rule.filter_by.empty()) {
override["rule"]["filter_by"] = rule.filter_by;
}
if(!rule.tags.empty()) {
override["rule"]["tags"] = rule.tags;
}
override["includes"] = nlohmann::json::array();
for(const auto & add_hit: add_hits) {
nlohmann::json include;
include["id"] = add_hit.doc_id;
include["position"] = add_hit.position;
override["includes"].push_back(include);
}
override["excludes"] = nlohmann::json::array();
for(const auto & drop_hit: drop_hits) {
nlohmann::json exclude;
exclude["id"] = drop_hit.doc_id;
override["excludes"].push_back(exclude);
}
if(!filter_by.empty()) {
override["filter_by"] = filter_by;
}
if(!sort_by.empty()) {
override["sort_by"] = sort_by;
}
if(!replace_query.empty()) {
override["replace_query"] = replace_query;
}
if(effective_from_ts != -1) {
override["effective_from_ts"] = effective_from_ts;
}
if(effective_to_ts != -1) {
override["effective_to_ts"] = effective_to_ts;
}
override["remove_matched_tokens"] = remove_matched_tokens;
override["filter_curated_hits"] = filter_curated_hits;
override["stop_processing"] = stop_processing;
if(!metadata.empty()) {
override["metadata"] = metadata;
}
return override;
}
| 10,679
|
C++
|
.cpp
| 241
| 35.170124
| 117
| 0.568966
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,762
|
core_api.cpp
|
typesense_typesense/src/core_api.cpp
|
#include <chrono>
#include <thread>
#include <app_metrics.h>
#include <regex>
#include <analytics_manager.h>
#include <housekeeper.h>
#include "typesense_server_utils.h"
#include "core_api.h"
#include "string_utils.h"
#include "collection.h"
#include "collection_manager.h"
#include "system_metrics.h"
#include "logger.h"
#include "core_api_utils.h"
#include "lru/lru.hpp"
#include "ratelimit_manager.h"
#include "event_manager.h"
#include "http_proxy.h"
#include "include/stopwords_manager.h"
#include "conversation_manager.h"
#include "conversation_model_manager.h"
#include "conversation_model.h"
#include "personalization_model_manager.h"
using namespace std::chrono_literals;
std::shared_mutex mutex;
LRU::Cache<uint64_t, cached_res_t> res_cache;
std::shared_mutex alter_mutex;
std::set<std::string> alters_in_progress;
class alter_guard_t {
std::string collection_name;
public:
alter_guard_t(const std::string& collection) {
std::unique_lock ulock(alter_mutex);
collection_name = collection;
alters_in_progress.insert(collection_name);
}
~alter_guard_t() {
std::unique_lock ulock(alter_mutex);
alters_in_progress.erase(collection_name);
}
};
class in_flight_req_guard_t {
uint64_t req_id;
public:
in_flight_req_guard_t(const std::shared_ptr<http_req>& req) {
req_id = req->start_ts;
HouseKeeper::get_instance().add_req(req);
}
~in_flight_req_guard_t() {
HouseKeeper::get_instance().remove_req(req_id);
}
};
void init_api(uint32_t cache_num_entries) {
std::unique_lock lock(mutex);
res_cache.capacity(cache_num_entries);
}
bool get_alter_in_progress(const std::string& collection) {
std::shared_lock lock(alter_mutex);
return alters_in_progress.count(collection) != 0;
}
bool handle_authentication(std::map<std::string, std::string>& req_params,
std::vector<nlohmann::json>& embedded_params_vec,
const std::string& body,
const route_path& rpath,
const std::string& req_auth_key) {
if(rpath.handler == get_health) {
// health endpoint requires no authentication
return true;
}
if(rpath.handler == get_health_with_resource_usage) {
// health_rusage end-point will be authenticated via pre-determined keys
return !req_auth_key.empty() && (
req_auth_key == Config::get_instance().get_api_key() ||
req_auth_key == Config::get_instance().get_health_rusage_api_key()
);
}
CollectionManager & collectionManager = CollectionManager::get_instance();
std::vector<collection_key_t> collections;
get_collections_for_auth(req_params, body, rpath, req_auth_key, collections, embedded_params_vec);
if(collections.size() != embedded_params_vec.size()) {
LOG(ERROR) << "Impossible error: size of collections and embedded_params_vec don't match, "
<< "collections.size: " << collections.size()
<< ", embedded_params_vec.size: " << embedded_params_vec.size();
return false;
}
return collectionManager.auth_key_matches(req_auth_key, rpath.action, collections, req_params, embedded_params_vec);
}
void stream_response(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
if(!res->is_alive) {
// underlying request is dead or this is a raft log playback
return ;
}
// wait for previous chunk to finish (if any)
res->wait();
auto req_res = new async_req_res_t(req, res, true);
server->get_message_dispatcher()->send_message(HttpServer::STREAM_RESPONSE_MESSAGE, req_res);
}
void defer_processing(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res, size_t timeout_ms) {
defer_processing_t* defer = new defer_processing_t(req, res, timeout_ms, server);
//LOG(INFO) << "core_api req " << req.get() << ", use count: " << req.use_count();
server->get_message_dispatcher()->send_message(HttpServer::DEFER_PROCESSING_MESSAGE, defer);
}
// we cannot return errors here because that will end up as auth failure and won't convey
// bad schema errors
void get_collections_for_auth(std::map<std::string, std::string>& req_params,
const string& body,
const route_path& rpath, const string& req_auth_key,
std::vector<collection_key_t>& collections,
std::vector<nlohmann::json>& embedded_params_vec) {
if(rpath.handler == post_multi_search) {
nlohmann::json req_obj;
// If a `preset` parameter is present, we've to only load a pre-existing search configuration
// and ignore the actual request body.
auto preset_it = req_params.find("preset");
if(preset_it != req_params.end()) {
CollectionManager::get_instance().get_preset(preset_it->second, req_obj);
} else {
req_obj = nlohmann::json::parse(body, nullptr, false);
}
if(!req_obj.is_discarded() && req_obj.count("searches") != 0 && req_obj["searches"].is_array()) {
for(auto& el : req_obj["searches"]) {
if(el.is_object()) {
std::string coll_name;
if(el.count("collection") != 0 && el["collection"].is_string()) {
coll_name = el["collection"].get<std::string>();
} else if(req_params.count("collection") != 0) {
coll_name = req_params["collection"];
} else {
// if preset exists, that should be the lowest priority
if(el.count("preset") != 0) {
nlohmann::json preset_obj;
auto preset_op = CollectionManager::get_instance().
get_preset(el["preset"].get<std::string>(), preset_obj);
if(preset_op.ok() && preset_obj.count("collection") != 0 &&
preset_obj["collection"].is_string()) {
coll_name = preset_obj["collection"].get<std::string>();
}
}
}
const std::string& access_key = (el.count("x-typesense-api-key") != 0 &&
el["x-typesense-api-key"].is_string()) ?
el["x-typesense-api-key"].get<std::string>() :
req_auth_key;
collections.emplace_back(coll_name, access_key);
embedded_params_vec.emplace_back(nlohmann::json::object());
} else {
collections.emplace_back("", req_auth_key);
embedded_params_vec.emplace_back(nlohmann::json::object());
}
}
} else {
//LOG(ERROR) << "Multi search request body is malformed, body: " << body;
}
} else {
if(rpath.handler == post_create_collection) {
nlohmann::json obj = nlohmann::json::parse(body, nullptr, false);
if(obj.is_discarded()) {
LOG(ERROR) << "Create collection request body is malformed.";
}
else if(obj.count("name") != 0 && obj["name"].is_string()) {
collections.emplace_back(obj["name"].get<std::string>(), req_auth_key);
embedded_params_vec.emplace_back(nlohmann::json::object());
}
} else if(req_params.count("collection") != 0) {
collections.emplace_back(req_params.at("collection"), req_auth_key);
embedded_params_vec.emplace_back(nlohmann::json::object());
}
}
if(collections.empty()) {
collections.emplace_back("", req_auth_key);
embedded_params_vec.emplace_back(nlohmann::json::object());
}
}
index_operation_t get_index_operation(const std::string& action) {
if(action == "create") {
return CREATE;
} else if(action == "update") {
return UPDATE;
} else if(action == "upsert") {
return UPSERT;
} else if(action == "emplace") {
return EMPLACE;
}
return CREATE;
}
bool get_collections(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
CollectionManager & collectionManager = CollectionManager::get_instance();
uint32_t offset = 0, limit = 0;
std::vector<std::string> exclude_fields;
if(req->params.count("offset") != 0) {
const auto &offset_str = req->params["offset"];
if(!StringUtils::is_uint32_t(offset_str)) {
res->set(400, "Offset param should be unsigned integer.");
return false;
}
offset = std::stoi(offset_str);
}
if(req->params.count("limit") != 0) {
const auto &limit_str = req->params["limit"];
if(!StringUtils::is_uint32_t(limit_str)) {
res->set(400, "Limit param should be unsigned integer.");
return false;
}
limit = std::stoi(limit_str);
}
if(req->params.count("exclude_fields") != 0) {
const auto& exclude_fields_str = req->params["exclude_fields"];
StringUtils::split(exclude_fields_str, exclude_fields, ",");
}
AuthManager &auth_manager = collectionManager.getAuthManager();
auto api_key_collections = auth_manager.get_api_key_collections(req->api_auth_key);
auto collections_summaries_op = collectionManager.get_collection_summaries(limit, offset, exclude_fields,
api_key_collections);
if(!collections_summaries_op.ok()) {
res->set(collections_summaries_op.code(), collections_summaries_op.error());
return false;
}
nlohmann::json json_response = collections_summaries_op.get();
res->set_200(json_response.dump());
return true;
}
bool post_create_collection(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
nlohmann::json req_json;
try {
req_json = nlohmann::json::parse(req->body);
} catch(const std::exception& e) {
//LOG(ERROR) << "JSON error: " << e.what();
res->set_400("Bad JSON.");
return false;
}
const std::string SRC_COLL_NAME = "src_name";
/*if(res->is_alive && req_json.is_object() && req_json.count("enable_nested_fields") == 0) {
// This patch ensures that nested fields are only enabled for collections created on Typesense versions
// which support nested fields. This ensures that ".*" schema does not end up duplicating fields on
// manually flattened collection schemas that also contain nested versions for convenience.
// TO BE ENABLED WHEN READY!
// req_json["enable_nested_fields"] = true;
}*/
CollectionManager& collectionManager = CollectionManager::get_instance();
const Option<Collection*> &collection_op = req->params.count(SRC_COLL_NAME) != 0 ?
collectionManager.clone_collection(req->params[SRC_COLL_NAME], req_json) :
CollectionManager::create_collection(req_json);
if(collection_op.ok()) {
nlohmann::json json_response = collection_op.get()->get_summary_json();
res->set_201(json_response.dump());
return true;
}
res->set(collection_op.code(), collection_op.error());
return false;
}
bool patch_update_collection(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
nlohmann::json req_json;
std::set<std::string> allowed_keys = {"metadata", "fields"};
// Ensures that only one alter can run per collection.
// The actual check for this, happens in `ReplicationState::write` which is called only during live writes.
alter_guard_t alter_guard(req->params["collection"]);
try {
req_json = nlohmann::json::parse(req->body);
} catch(const std::exception& e) {
//LOG(ERROR) << "JSON error: " << e.what();
res->set_400("Bad JSON.");
return false;
}
if(req_json.empty()) {
res->set_400("Alter payload is empty.");
return false;
}
for(auto it : req_json.items()) {
if(allowed_keys.count(it.key()) == 0) {
res->set_400("Only `fields` and `metadata` can be updated at the moment.");
return false;
}
}
CollectionManager & collectionManager = CollectionManager::get_instance();
auto collection = collectionManager.get_collection(req->params["collection"]);
if(collection == nullptr) {
res->set_404();
return false;
}
if(req_json.contains("metadata")) {
if(!req_json["metadata"].is_object()) {
res->set_400("The `metadata` value should be an object.");
return false;
}
//update in collection metadata and store in db
auto op = collectionManager.update_collection_metadata(req->params["collection"], req_json["metadata"]);
if(!op.ok()) {
res->set(op.code(), op.error());
return false;
}
}
if(req_json.contains("fields")) {
nlohmann::json alter_payload;
alter_payload["fields"] = req_json["fields"];
auto alter_op = collection->alter(alter_payload);
if(!alter_op.ok()) {
res->set(alter_op.code(), alter_op.error());
return false;
}
// without this line, response will return full api key without being masked
req_json["fields"] = alter_payload["fields"];
}
res->set_200(req_json.dump());
return true;
}
bool del_drop_collection(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
bool compact_store = false;
if(req->params.count("compact_store") != 0) {
compact_store = (req->params["compact_store"] == "true");
}
CollectionManager & collectionManager = CollectionManager::get_instance();
Option<nlohmann::json> drop_op = collectionManager.drop_collection(req->params["collection"], true, compact_store);
if(!drop_op.ok()) {
res->set(drop_op.code(), drop_op.error());
return false;
}
res->set_200(drop_op.get().dump());
return true;
}
bool get_debug(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
bool log_inflight_queries = false;
if(req->params.count("log_inflight_queries") != 0) {
log_inflight_queries = (req->params["log_inflight_queries"] == "true");
}
if(log_inflight_queries) {
HouseKeeper::get_instance().log_running_queries();
}
nlohmann::json result;
result["version"] = server->get_version();
uint64_t state = server->node_state();
result["state"] = state;
res->set_200(result.dump());
return true;
}
bool get_health_with_resource_usage(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
nlohmann::json result;
bool alive = server->is_alive();
auto resource_check = cached_resource_stat_t::get_instance().has_enough_resources(
Config::get_instance().get_data_dir(),
Config::get_instance().get_disk_used_max_percentage(),
Config::get_instance().get_memory_used_max_percentage()
);
if (resource_check != cached_resource_stat_t::resource_check_t::OK) {
result["resource_error"] = std::string(magic_enum::enum_name(resource_check));
}
if(req->params.count("cpu_threshold") != 0 && StringUtils::is_float(req->params["cpu_threshold"])) {
float cpu_threshold = std::stof(req->params["cpu_threshold"]);
SystemMetrics sys_metrics;
std::vector<cpu_stat_t> cpu_stats = sys_metrics.get_cpu_stats();
if(!cpu_stats.empty() && StringUtils::is_float(cpu_stats[0].active)) {
alive = alive && (std::stof(cpu_stats[0].active) < cpu_threshold);
}
}
result["ok"] = alive;
if(alive) {
res->set_body(200, result.dump());
} else {
res->set_body(503, result.dump());
}
return alive;
}
bool get_health(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
nlohmann::json result;
bool alive = server->is_alive();
result["ok"] = alive;
auto resource_check = cached_resource_stat_t::get_instance().has_enough_resources(
Config::get_instance().get_data_dir(),
Config::get_instance().get_disk_used_max_percentage(),
Config::get_instance().get_memory_used_max_percentage()
);
if (resource_check != cached_resource_stat_t::resource_check_t::OK) {
result["resource_error"] = std::string(magic_enum::enum_name(resource_check));
}
if(alive) {
res->set_body(200, result.dump());
} else {
res->set_body(503, result.dump());
}
return alive;
}
bool post_health(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
nlohmann::json result;
bool alive = server->is_alive();
result["ok"] = alive;
if(alive) {
res->set_body(200, result.dump());
} else {
res->set_body(503, result.dump());
}
return alive;
}
bool get_metrics_json(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
nlohmann::json result;
CollectionManager & collectionManager = CollectionManager::get_instance();
const std::string & data_dir_path = collectionManager.get_store()->get_state_dir_path();
SystemMetrics sys_metrics;
sys_metrics.get(data_dir_path, result);
res->set_body(200, result.dump(2));
return true;
}
bool get_stats_json(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
nlohmann::json result;
AppMetrics::get_instance().get("requests_per_second", "latency_ms", result);
result["pending_write_batches"] = server->get_num_queued_writes();
res->set_body(200, result.dump(2));
return true;
}
bool get_status(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
nlohmann::json status = server->node_status();
res->set_body(200, status.dump());
return true;
}
uint64_t hash_request(const std::shared_ptr<http_req>& req) {
std::stringstream ss;
ss << req->route_hash << req->body;
for(auto& kv: req->params) {
if(kv.first != "use_cache") {
ss << kv.second;
}
}
const std::string& req_str = ss.str();
return StringUtils::hash_wy(req_str.c_str(), req_str.size());
}
bool get_search(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
const auto use_cache_it = req->params.find("use_cache");
bool use_cache = (use_cache_it != req->params.end()) && (use_cache_it->second == "1" || use_cache_it->second == "true");
uint64_t req_hash = 0;
in_flight_req_guard_t in_flight_req_guard(req);
if(use_cache) {
// cache enabled, let's check if request is already in the cache
req_hash = hash_request(req);
//LOG(INFO) << "req_hash = " << req_hash;
std::unique_lock lock(mutex);
auto hit_it = res_cache.find(req_hash);
if(hit_it != res_cache.end()) {
//LOG(INFO) << "Result found in cache.";
const auto& cached_value = hit_it.value();
// we still need to check that TTL has not expired
uint32_t ttl = cached_value.ttl;
uint64_t seconds_elapsed = std::chrono::duration_cast<std::chrono::seconds>(
std::chrono::high_resolution_clock::now() - cached_value.created_at).count();
if(seconds_elapsed < cached_value.ttl) {
res->set_content(cached_value.status_code, cached_value.content_type_header, cached_value.body, true);
return true;
}
// Result found in cache but ttl has lapsed.
res_cache.erase(req_hash);
}
}
if(req->embedded_params_vec.empty()) {
res->set_500("Embedded params is empty.");
return false;
}
std::string results_json_str;
Option<bool> search_op = CollectionManager::do_search(req->params, req->embedded_params_vec[0],
results_json_str, req->conn_ts);
if(!search_op.ok()) {
res->set(search_op.code(), search_op.error());
if(search_op.code() == 408) {
req->overloaded = true;
}
return false;
}
res->set_200(results_json_str);
// we will cache only successful requests
if(use_cache) {
//LOG(INFO) << "Adding to cache, key = " << req_hash;
auto now = std::chrono::high_resolution_clock::now();
const auto cache_ttl_it = req->params.find("cache_ttl");
uint32_t cache_ttl = 60;
if(cache_ttl_it != req->params.end() && StringUtils::is_int32_t(cache_ttl_it->second)) {
cache_ttl = std::stoul(cache_ttl_it->second);
}
cached_res_t cached_res;
cached_res.load(res->status_code, res->content_type_header, res->body, now, cache_ttl, req_hash);
std::unique_lock lock(mutex);
res_cache.insert(req_hash, cached_res);
}
return true;
}
bool post_multi_search(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
const auto use_cache_it = req->params.find("use_cache");
bool use_cache = (use_cache_it != req->params.end()) && (use_cache_it->second == "1" || use_cache_it->second == "true");
uint64_t req_hash = 0;
in_flight_req_guard_t in_flight_req_guard(req);
if(use_cache) {
// cache enabled, let's check if request is already in the cache
req_hash = hash_request(req);
//LOG(INFO) << "req_hash = " << req_hash;
std::unique_lock lock(mutex);
auto hit_it = res_cache.find(req_hash);
if(hit_it != res_cache.end()) {
//LOG(INFO) << "Result found in cache.";
const auto& cached_value = hit_it.value();
// we still need to check that TTL has not expired
uint32_t ttl = cached_value.ttl;
uint64_t seconds_elapsed = std::chrono::duration_cast<std::chrono::seconds>(
std::chrono::high_resolution_clock::now() - cached_value.created_at).count();
if(seconds_elapsed < cached_value.ttl) {
res->set_content(cached_value.status_code, cached_value.content_type_header, cached_value.body, true);
return true;
}
// Result found in cache but ttl has lapsed.
res_cache.erase(req_hash);
}
}
nlohmann::json req_json;
const auto preset_it = req->params.find("preset");
if(preset_it != req->params.end()) {
CollectionManager::get_instance().get_preset(preset_it->second, req_json);
} else {
try {
req_json = nlohmann::json::parse(req->body);
} catch(const std::exception& e) {
LOG(ERROR) << "JSON error: " << e.what();
res->set_400("Bad JSON.");
return false;
}
}
if(req_json.count("searches") == 0) {
res->set_400("Missing `searches` array.");
return false;
}
if(!req_json["searches"].is_array()) {
res->set_400("Missing `searches` array.");
return false;
}
if(req->embedded_params_vec.empty()) {
res->set_400("Missing embedded params array.");
return false;
}
auto orig_req_params = req->params;
const char* LIMIT_MULTI_SEARCHES = "limit_multi_searches";
size_t limit_multi_searches = 50;
if(orig_req_params.count(LIMIT_MULTI_SEARCHES) != 0 &&
StringUtils::is_uint32_t(orig_req_params[LIMIT_MULTI_SEARCHES])) {
limit_multi_searches = std::stoi(orig_req_params[LIMIT_MULTI_SEARCHES]);
}
const auto& first_embedded_param = req->embedded_params_vec[0];
if(first_embedded_param.count(LIMIT_MULTI_SEARCHES) != 0 && first_embedded_param[LIMIT_MULTI_SEARCHES].is_number_integer()) {
limit_multi_searches = first_embedded_param[LIMIT_MULTI_SEARCHES].get<size_t>();
}
if(req_json["searches"].size() > limit_multi_searches) {
res->set_400(std::string("Number of multi searches exceeds `") + LIMIT_MULTI_SEARCHES + "` parameter.");
return false;
}
nlohmann::json response;
response["results"] = nlohmann::json::array();
nlohmann::json& searches = req_json["searches"];
if(searches.size() != req->embedded_params_vec.size()) {
LOG(ERROR) << "Embedded params parsing error: length does not match multi search array, searches.size(): "
<< searches.size() << ", embedded_params_vec.size: " << req->embedded_params_vec.size()
<< ", req_body: " << req->body;
res->set_500("Embedded params parsing error.");
return false;
}
// Get API key and IP
if(!req->metadata.empty()) {
auto api_key_ip_op = get_api_key_and_ip(req->metadata);
if(!api_key_ip_op.ok()) {
res->set(api_key_ip_op.code(), api_key_ip_op.error());
return false;
}
const auto& api_key_ip = api_key_ip_op.get();
auto rate_limit_manager = RateLimitManager::getInstance();
// Check rate limiting first before doing any search, don't want to waste time if we're rate limited
for(size_t i = 0; i < searches.size(); i++) {
if(RateLimitManager::getInstance()->is_rate_limited({RateLimitedEntityType::api_key, api_key_ip.first}, {RateLimitedEntityType::ip, api_key_ip.second})) {
res->set(429, "Rate limit exceeded or blocked");
return false;
}
}
}
bool conversation = orig_req_params["conversation"] == "true";
bool conversation_history = orig_req_params.find("conversation_id") != orig_req_params.end();
std::string common_query;
if(!conversation && conversation_history) {
res->set_400("`conversation_id` can only be used if `conversation` is enabled.");
return false;
}
if(conversation) {
if(orig_req_params.find("q") == orig_req_params.end()) {
res->set_400("`q` parameter has to be common for all searches if conversation is enabled. Please set `q` as a query parameter in the request, instead of inside the POST body");
return false;
}
if(orig_req_params.find("conversation_model_id") == orig_req_params.end()) {
res->set_400("`conversation_model_id` is needed if conversation is enabled.");
return false;
}
const std::string& conversation_model_id = orig_req_params["conversation_model_id"];
auto conversation_model = ConversationModelManager::get_model(conversation_model_id);
if(!conversation_model.ok()) {
res->set_400("`conversation_model_id` is invalid.");
return false;
}
if(conversation_history) {
std::string conversation_id = orig_req_params["conversation_id"];
auto conversation_history = ConversationManager::get_instance().get_conversation(conversation_id);
if(!conversation_history.ok()) {
res->set_400("`conversation_id` is invalid.");
return false;
}
}
common_query = orig_req_params["q"];
if(conversation_history) {
const std::string& conversation_model_id = orig_req_params["conversation_model_id"];
auto conversation_id = orig_req_params["conversation_id"];
auto conversation_model = ConversationModelManager::get_model(conversation_model_id).get();
auto conversation_history = ConversationManager::get_instance().get_conversation(conversation_id).get();
auto generate_standalone_q = ConversationModel::get_standalone_question(conversation_history, common_query, conversation_model);
if(!generate_standalone_q.ok()) {
res->set_400(generate_standalone_q.error());
return false;
}
orig_req_params["q"] = generate_standalone_q.get();
}
}
for(size_t i = 0; i < searches.size(); i++) {
auto& search_params = searches[i];
if(!search_params.is_object()) {
res->set_400("The value of `searches` must be an array of objects.");
return false;
}
req->params = orig_req_params;
for(auto& search_item: search_params.items()) {
if(search_item.key() == "cache_ttl") {
// cache ttl can be applied only from an embedded key: cannot be a multi search param
continue;
}
if(conversation && search_item.key() == "q") {
// q is common for all searches
res->set_400("`q` parameter cannot be used in POST body if `conversation` is enabled. Please set `q` as a query parameter in the request, instead of inside the POST body");
return false;
}
if(conversation && search_item.key() == "conversation_model_id") {
// conversation_model_id is common for all searches
res->set_400("`conversation_model_id` cannot be used in POST body. Please set `conversation_model_id` as a query parameter in the request, instead of inside the POST body");
return false;
}
if(conversation && search_item.key() == "conversation_id") {
// conversation_id is common for all searches
res->set_400("`conversation_id` cannot be used in POST body. Please set `conversation_id` as a query parameter in the request, instead of inside the POST body");
return false;
}
if(search_item.key() == "conversation") {
res->set_400("`conversation` cannot be used in POST body. Please set `conversation` as a query parameter in the request, instead of inside the POST body");
return false;
}
// overwrite = false since req params will contain embedded params and so has higher priority
bool populated = AuthManager::add_item_to_params(req->params, search_item, false);
if(!populated) {
res->set_400("One or more search parameters are malformed.");
return false;
}
}
if(req->params.count("conversation") != 0) {
req->params.erase("conversation");
}
if(req->params.count("conversation_id") != 0) {
req->params.erase("conversation_id");
}
if(req->params.count("conversation_model_id") != 0) {
req->params.erase("conversation_model_id");
}
std::string results_json_str;
Option<bool> search_op = CollectionManager::do_search(req->params, req->embedded_params_vec[i],
results_json_str, req->conn_ts);
if(search_op.ok()) {
auto results_json = nlohmann::json::parse(results_json_str);
if(conversation) {
results_json["request_params"]["q"] = common_query;
}
response["results"].push_back(results_json);
} else {
if(search_op.code() == 408) {
res->set(search_op.code(), search_op.error());
req->overloaded = true;
return false;
}
nlohmann::json err_res;
err_res["error"] = search_op.error();
err_res["code"] = search_op.code();
response["results"].push_back(err_res);
}
}
if(conversation) {
nlohmann::json result_docs_arr = nlohmann::json::array();
int res_index = 0;
for(const auto& result : response["results"]) {
if(result.count("code") != 0) {
continue;
}
nlohmann::json result_docs = nlohmann::json::array();
std::vector<std::string> vector_fields;
auto collection = CollectionManager::get_instance().get_collection(searches[res_index]["collection"].get<std::string>());
auto search_schema = collection->get_schema();
for(const auto& field : search_schema) {
if(field.type == field_types::FLOAT_ARRAY) {
vector_fields.push_back(field.name);
}
}
if(result.contains("grouped_hits")) {
for(const auto& grouped_hit : result["grouped_hits"]) {
for(const auto& hit : grouped_hit["hits"]) {
auto doc = hit["document"];
for(const auto& vector_field : vector_fields) {
if(doc.contains(vector_field)) {
doc.erase(vector_field);
}
}
result_docs.push_back(doc);
}
}
}
else {
for(const auto& hit : result["hits"]) {
auto doc = hit["document"];
for(const auto& vector_field : vector_fields) {
if(doc.contains(vector_field)) {
doc.erase(vector_field);
}
}
result_docs.push_back(doc);
}
}
result_docs_arr.push_back(result_docs);
}
const std::string& conversation_model_id = orig_req_params["conversation_model_id"];
auto conversation_model = ConversationModelManager::get_model(conversation_model_id).get();
auto min_required_bytes_op = ConversationModel::get_minimum_required_bytes(conversation_model);
if(!min_required_bytes_op.ok()) {
res->set_400(min_required_bytes_op.error());
return false;
}
auto min_required_bytes = min_required_bytes_op.get();
auto prompt = req->params["q"];
if(conversation_model["max_bytes"].get<size_t>() < min_required_bytes + prompt.size()) {
res->set_400("`max_bytes` of the conversation model is less than the minimum required bytes(" + std::to_string(min_required_bytes) + ").");
return false;
}
// remove document with lowest score until total tokens is less than MAX_TOKENS
while(result_docs_arr.dump(0).size() > conversation_model["max_bytes"].get<size_t>() - min_required_bytes - prompt.size()) {
// sort the result_docs_arr by size descending
std::sort(result_docs_arr.begin(), result_docs_arr.end(), [](const auto& a, const auto& b) {
return a.size() > b.size();
});
// pop the last element from first array
if(result_docs_arr.size() > 0 && result_docs_arr[0].size() > 0) {
result_docs_arr[0].erase(result_docs_arr[0].size() - 1);
}
}
// Make result_docs_arr 1D
nlohmann::json result_docs = nlohmann::json::array();
for(const auto& result_doc : result_docs_arr) {
for(const auto& doc : result_doc) {
result_docs.push_back(doc);
}
}
auto answer_op = ConversationModel::get_answer(result_docs.dump(0), prompt, conversation_model);
if(!answer_op.ok()) {
res->set_400(answer_op.error());
return false;
}
response["conversation"] = nlohmann::json::object();
response["conversation"]["query"] = common_query;
response["conversation"]["answer"] = answer_op.get();
auto formatted_question_op = ConversationModel::format_question(common_query, conversation_model);
if(!formatted_question_op.ok()) {
res->set_400(formatted_question_op.error());
return false;
}
auto formatted_answer_op = ConversationModel::format_answer(answer_op.get(), conversation_model);
if(!formatted_answer_op.ok()) {
res->set_400(formatted_answer_op.error());
return false;
}
std::vector<std::string> exclude_fields;
StringUtils::split(req->params["exclude_fields"], exclude_fields, ",");
bool exclude_conversation_history = std::find(exclude_fields.begin(), exclude_fields.end(), "conversation_history") != exclude_fields.end();
nlohmann::json new_conversation_history = nlohmann::json::array();
new_conversation_history.push_back(formatted_question_op.get());
new_conversation_history.push_back(formatted_answer_op.get());
std::string conversation_id = conversation_history ? orig_req_params["conversation_id"] : "";
auto add_conversation_op = ConversationManager::get_instance().add_conversation(new_conversation_history, conversation_model, conversation_id);
if(!add_conversation_op.ok()) {
res->set_400(add_conversation_op.error());
return false;
}
if(!exclude_conversation_history) {
auto get_conversation_op = ConversationManager::get_instance().get_conversation(add_conversation_op.get());
if(!get_conversation_op.ok()) {
res->set_400(get_conversation_op.error());
return false;
}
response["conversation"]["conversation_history"] = get_conversation_op.get();
response["conversation"]["conversation_history"].erase("id");
}
response["conversation"]["conversation_id"] = add_conversation_op.get();
}
res->set_200(response.dump());
// we will cache only successful requests
if(use_cache) {
//LOG(INFO) << "Adding to cache, key = " << req_hash;
auto now = std::chrono::high_resolution_clock::now();
const auto cache_ttl_it = req->params.find("cache_ttl");
uint32_t cache_ttl = 60;
if(cache_ttl_it != req->params.end() && StringUtils::is_int32_t(cache_ttl_it->second)) {
cache_ttl = std::stoul(cache_ttl_it->second);
}
cached_res_t cached_res;
cached_res.load(res->status_code, res->content_type_header, res->body, now, cache_ttl, req_hash);
std::unique_lock lock(mutex);
res_cache.insert(req_hash, cached_res);
}
return true;
}
bool get_collection_summary(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
CollectionManager& collectionManager = CollectionManager::get_instance();
auto collection = collectionManager.get_collection(req->params["collection"]);
if(collection == nullptr) {
res->set_404();
return false;
}
nlohmann::json json_response = collection->get_summary_json();
res->set_200(json_response.dump(-1, ' ', false, nlohmann::detail::error_handler_t::ignore));
return true;
}
bool get_export_documents(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
// NOTE: this is a streaming response end-point so this handler will be called multiple times
CollectionManager & collectionManager = CollectionManager::get_instance();
auto collection = collectionManager.get_collection(req->params["collection"]);
if(collection == nullptr) {
req->last_chunk_aggregate = true;
res->final = true;
res->set_404();
stream_response(req, res);
return false;
}
const char* FILTER_BY = "filter_by";
const char* INCLUDE_FIELDS = "include_fields";
const char* EXCLUDE_FIELDS = "exclude_fields";
const char* BATCH_SIZE = "batch_size";
export_state_t* export_state = nullptr;
const std::string seq_id_prefix = collection->get_seq_id_collection_prefix();
if(req->data == nullptr) {
export_state = new export_state_t();
export_state->collection = collection.get();
// destruction of data is managed by req destructor
req->data = export_state;
std::string filter_query;
std::vector<std::string> include_fields_vec;
std::vector<std::string> exclude_fields_vec;
spp::sparse_hash_set<std::string> exclude_fields;
spp::sparse_hash_set<std::string> include_fields;
if(req->params.count(FILTER_BY) != 0) {
filter_query = req->params[FILTER_BY];
}
if(req->params.count(INCLUDE_FIELDS) != 0) {
auto op = StringUtils::split_include_exclude_fields(req->params[INCLUDE_FIELDS], include_fields_vec);
if (!op.ok()) {
res->set(op.code(), op.error());
req->last_chunk_aggregate = true;
res->final = true;
stream_response(req, res);
return false;
}
}
if(req->params.count(EXCLUDE_FIELDS) != 0) {
auto op = StringUtils::split_include_exclude_fields(req->params[EXCLUDE_FIELDS], exclude_fields_vec);
if (!op.ok()) {
res->set(op.code(), op.error());
req->last_chunk_aggregate = true;
res->final = true;
stream_response(req, res);
return false;
}
}
auto initialize_op = Join::initialize_ref_include_exclude_fields_vec(filter_query, include_fields_vec, exclude_fields_vec,
export_state->ref_include_exclude_fields_vec);
if (!initialize_op.ok()) {
res->set(initialize_op.code(), initialize_op.error());
req->last_chunk_aggregate = true;
res->final = true;
stream_response(req, res);
return false;
}
include_fields.insert(include_fields_vec.begin(), include_fields_vec.end());
exclude_fields.insert(exclude_fields_vec.begin(), exclude_fields_vec.end());
collection->populate_include_exclude_fields_lk(include_fields, exclude_fields,
export_state->include_fields, export_state->exclude_fields);
if(req->params.count(BATCH_SIZE) != 0 && StringUtils::is_uint32_t(req->params[BATCH_SIZE])) {
export_state->export_batch_size = std::stoul(req->params[BATCH_SIZE]);
}
if(filter_query.empty()) {
export_state->iter_upper_bound_key = collection->get_seq_id_collection_prefix() + "`"; // cannot inline this
export_state->iter_upper_bound = new rocksdb::Slice(export_state->iter_upper_bound_key);
export_state->it = collectionManager.get_store()->scan(seq_id_prefix, export_state->iter_upper_bound);
} else {
auto filter_ids_op = collection->get_filter_ids(filter_query, export_state->filter_result, false);
if(!filter_ids_op.ok()) {
res->set(filter_ids_op.code(), filter_ids_op.error());
req->last_chunk_aggregate = true;
res->final = true;
stream_response(req, res);
return false;
}
export_state->res_body = &res->body;
}
} else {
export_state = dynamic_cast<export_state_t*>(req->data);
}
if(export_state->it != nullptr) {
rocksdb::Iterator* it = export_state->it;
size_t batch_counter = 0;
std::string().swap(res->body);
while(it->Valid() && it->key().ToString().compare(0, seq_id_prefix.size(), seq_id_prefix) == 0) {
nlohmann::json doc = nlohmann::json::parse(it->value().ToString());
Collection::remove_flat_fields(doc);
Collection::remove_reference_helper_fields(doc);
if(export_state->include_fields.empty() && export_state->exclude_fields.empty()) {
res->body += doc.dump();
} else {
if (doc.count("id") == 0 || !doc.at("id").is_string()) {
res->set(500, "Could not find `id` field in the document: " + doc.dump());
req->last_chunk_aggregate = true;
res->final = true;
stream_response(req, res);
return false;
}
auto const& coll = export_state->collection;
auto const seq_id_op = coll->doc_id_to_seq_id_with_lock(doc.at("id"));
if (!seq_id_op.ok()) {
res->set(seq_id_op.code(), seq_id_op.error());
req->last_chunk_aggregate = true;
res->final = true;
stream_response(req, res);
return false;
}
std::map<std::string, reference_filter_result_t> references = {};
coll->prune_doc_with_lock(doc, export_state->include_fields, export_state->exclude_fields,
references, seq_id_op.get(), export_state->ref_include_exclude_fields_vec);
res->body += doc.dump();
}
it->Next();
// append a new line character if there is going to be one more record to send
if(it->Valid() && it->key().ToString().compare(0, seq_id_prefix.size(), seq_id_prefix) == 0) {
res->body += "\n";
req->last_chunk_aggregate = false;
res->final = false;
} else {
req->last_chunk_aggregate = true;
res->final = true;
}
batch_counter++;
if(batch_counter == export_state->export_batch_size) {
break;
}
}
} else {
bool done;
stateful_export_docs(export_state, export_state->export_batch_size, done);
if(!done) {
req->last_chunk_aggregate = false;
res->final = false;
} else {
req->last_chunk_aggregate = true;
res->final = true;
}
}
res->content_type_header = "text/plain; charset=utf-8";
res->status_code = 200;
stream_response(req, res);
return true;
}
bool post_import_documents(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
//LOG(INFO) << "Import, req->body_index=" << req->body_index << ", body size: " << req->body.size();
//LOG(INFO) << "req->first_chunk=" << req->first_chunk_aggregate << ", last_chunk=" << req->last_chunk_aggregate;
const char *BATCH_SIZE = "batch_size";
const char *ACTION = "action";
const char *DIRTY_VALUES = "dirty_values";
const char *RETURN_DOC = "return_doc";
const char *RETURN_ID = "return_id";
const char *REMOTE_EMBEDDING_BATCH_SIZE = "remote_embedding_batch_size";
const char *REMOTE_EMBEDDING_TIMEOUT_MS = "remote_embedding_timeout_ms";
const char *REMOTE_EMBEDDING_NUM_TRIES = "remote_embedding_num_tries";
if(req->params.count(BATCH_SIZE) == 0) {
req->params[BATCH_SIZE] = "40";
}
if(req->params.count(REMOTE_EMBEDDING_BATCH_SIZE) == 0) {
req->params[REMOTE_EMBEDDING_BATCH_SIZE] = "200";
}
if(req->params.count(ACTION) == 0) {
req->params[ACTION] = "create";
}
if(req->params.count(DIRTY_VALUES) == 0) {
req->params[DIRTY_VALUES] = ""; // set it empty as default will depend on `index_all_fields`
}
if(req->params.count(RETURN_DOC) == 0) {
req->params[RETURN_DOC] = "false";
}
if(req->params.count(RETURN_ID) == 0) {
req->params[RETURN_ID] = "false";
}
if(!StringUtils::is_uint32_t(req->params[BATCH_SIZE])) {
res->final = true;
res->set_400("Parameter `" + std::string(BATCH_SIZE) + "` must be a positive integer.");
stream_response(req, res);
return false;
}
if(req->params[ACTION] != "create" && req->params[ACTION] != "update" && req->params[ACTION] != "upsert" &&
req->params[ACTION] != "emplace") {
res->final = true;
res->set_400("Parameter `" + std::string(ACTION) + "` must be a create|update|upsert.");
stream_response(req, res);
return false;
}
if(req->params[RETURN_DOC] != "true" && req->params[RETURN_DOC] != "false") {
res->final = true;
res->set_400("Parameter `" + std::string(RETURN_DOC) + "` must be a true|false.");
stream_response(req, res);
return false;
}
if(req->params[RETURN_ID] != "true" && req->params[RETURN_ID] != "false") {
res->final = true;
res->set_400("Parameter `" + std::string(RETURN_ID) + "` must be a true|false.");
stream_response(req, res);
return false;
}
if(req->params.count(REMOTE_EMBEDDING_TIMEOUT_MS) == 0) {
req->params[REMOTE_EMBEDDING_TIMEOUT_MS] = "60000";
}
if(req->params.count(REMOTE_EMBEDDING_NUM_TRIES) == 0) {
req->params[REMOTE_EMBEDDING_NUM_TRIES] = "2";
}
const size_t IMPORT_BATCH_SIZE = std::stoi(req->params[BATCH_SIZE]);
const size_t REMOTE_EMBEDDING_BATCH_SIZE_VAL = std::stoi(req->params[REMOTE_EMBEDDING_BATCH_SIZE]);
const size_t REMOTE_EMBEDDING_TIMEOUT_MS_VAL = std::stoi(req->params[REMOTE_EMBEDDING_TIMEOUT_MS]);
const size_t REMOTE_EMBEDDING_NUM_TRIES_VAL = std::stoi(req->params[REMOTE_EMBEDDING_NUM_TRIES]);
if(IMPORT_BATCH_SIZE == 0) {
res->final = true;
res->set_400("Parameter `" + std::string(BATCH_SIZE) + "` must be a positive integer.");
stream_response(req, res);
return false;
}
if(REMOTE_EMBEDDING_BATCH_SIZE_VAL == 0) {
res->final = true;
res->set_400("Parameter `" + std::string(REMOTE_EMBEDDING_BATCH_SIZE) + "` must be a positive integer.");
stream_response(req, res);
return false;
}
if(REMOTE_EMBEDDING_TIMEOUT_MS_VAL == 0) {
res->final = true;
res->set_400("Parameter `" + std::string(REMOTE_EMBEDDING_TIMEOUT_MS) + "` must be a positive integer.");
stream_response(req, res);
return false;
}
if(REMOTE_EMBEDDING_NUM_TRIES_VAL == 0) {
res->final = true;
res->set_400("Parameter `" + std::string(REMOTE_EMBEDDING_NUM_TRIES) + "` must be a positive integer.");
stream_response(req, res);
return false;
}
if(req->body_index == 0) {
// will log for every major chunk of request body
//LOG(INFO) << "Import, req->body.size=" << req->body.size() << ", batch_size=" << IMPORT_BATCH_SIZE;
//int nminusten_pos = std::max(0, int(req->body.size())-10);
//LOG(INFO) << "Last 10 chars: " << req->body.substr(nminusten_pos);
}
CollectionManager & collectionManager = CollectionManager::get_instance();
auto collection = collectionManager.get_collection(req->params["collection"]);
if(collection == nullptr) {
//LOG(INFO) << "collection == nullptr, for collection: " << req->params["collection"];
res->final = true;
res->set_404();
stream_response(req, res);
return false;
}
//LOG(INFO) << "Import, " << "req->body_index=" << req->body_index << ", req->body.size: " << req->body.size();
//LOG(INFO) << "req body %: " << (float(req->body_index)/req->body.size())*100;
std::vector<std::string> json_lines;
StringUtils::split(req->body, json_lines, "\n", false, false);
//LOG(INFO) << "json_lines.size before: " << json_lines.size() << ", req->body_index: " << req->body_index;
if(req->last_chunk_aggregate) {
//LOG(INFO) << "req->last_chunk_aggregate is true";
req->body = "";
} else {
if(!json_lines.empty()) {
// check if req->body had complete last record
bool complete_document;
try {
nlohmann::json document = nlohmann::json::parse(json_lines.back());
complete_document = document.is_object();
} catch(const std::exception& e) {
complete_document = false;
}
if(!complete_document) {
// eject partial record
req->body = json_lines.back();
json_lines.pop_back();
} else {
req->body = "";
}
}
}
//LOG(INFO) << "json_lines.size after: " << json_lines.size() << ", stream_proceed: " << stream_proceed;
//LOG(INFO) << "json_lines.size: " << json_lines.size() << ", req->res_state: " << req->res_state;
// When only one partial record arrives as a chunk, an empty body is pushed to response stream
bool single_partial_record_body = (json_lines.empty() && !req->body.empty());
std::stringstream response_stream;
//LOG(INFO) << "single_partial_record_body: " << single_partial_record_body;
const index_operation_t operation = get_index_operation(req->params[ACTION]);
if(!single_partial_record_body) {
nlohmann::json document;
const auto& dirty_values = collection->parse_dirty_values_option(req->params[DIRTY_VALUES]);
const bool& return_doc = req->params[RETURN_DOC] == "true";
const bool& return_id = req->params[RETURN_ID] == "true";
nlohmann::json json_res = collection->add_many(json_lines, document, operation, "",
dirty_values, return_doc, return_id, REMOTE_EMBEDDING_BATCH_SIZE_VAL, REMOTE_EMBEDDING_TIMEOUT_MS_VAL, REMOTE_EMBEDDING_NUM_TRIES_VAL);
//const std::string& import_summary_json = json_res->dump();
//response_stream << import_summary_json << "\n";
for (size_t i = 0; i < json_lines.size(); i++) {
bool res_start = (res->status_code == 0) && (i == 0);
if(res_start) {
// indicates first import result to be streamed
response_stream << json_lines[i];
} else {
response_stream << "\n" << json_lines[i];
}
}
// Since we use `res->status_code == 0` for flagging `res_start`, we will only set this
// when we have accumulated enough response data to stream.
// Otherwise, we will send an empty line as first response.
res->status_code = 200;
}
res->content_type_header = "text/plain; charset=utf-8";
res->body = response_stream.str();
res->final.store(req->last_chunk_aggregate);
stream_response(req, res);
return true;
}
bool post_add_document(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
const char *ACTION = "action";
const char *DIRTY_VALUES_PARAM = "dirty_values";
if(req->params.count(ACTION) == 0) {
req->params[ACTION] = "create";
}
if(req->params[ACTION] != "create" && req->params[ACTION] != "update" && req->params[ACTION] != "upsert" &&
req->params[ACTION] != "emplace") {
res->set_400("Parameter `" + std::string(ACTION) + "` must be a create|update|upsert.");
return false;
}
if(req->params.count(DIRTY_VALUES_PARAM) == 0) {
req->params[DIRTY_VALUES_PARAM] = ""; // set it empty as default will depend on whether schema is enabled
}
CollectionManager & collectionManager = CollectionManager::get_instance();
auto collection = collectionManager.get_collection(req->params["collection"]);
if(collection == nullptr) {
res->set_404();
return false;
}
const index_operation_t operation = get_index_operation(req->params[ACTION]);
const auto& dirty_values = collection->parse_dirty_values_option(req->params[DIRTY_VALUES_PARAM]);
size_t remote_embedding_timeout_ms = 60000;
size_t remote_embedding_num_tries = 2;
if(req->params.count("remote_embedding_timeout_ms") != 0) {
remote_embedding_timeout_ms = std::stoul(req->params["remote_embedding_timeout_ms"]);
}
if(req->params.count("remote_embedding_num_tries") != 0) {
remote_embedding_num_tries = std::stoul(req->params["remote_embedding_num_tries"]);
}
nlohmann::json document;
std::vector<std::string> json_lines = {req->body};
const nlohmann::json& inserted_doc_op = collection->add_many(json_lines, document, operation, "", dirty_values,
false, false, 200, remote_embedding_timeout_ms,
remote_embedding_num_tries);
if(!inserted_doc_op["success"].get<bool>()) {
nlohmann::json res_doc;
try {
res_doc = nlohmann::json::parse(json_lines[0]);
} catch(const std::exception& e) {
LOG(ERROR) << "JSON error: " << e.what();
res->set_400("Bad JSON.");
return false;
}
res->status_code = res_doc["code"].get<size_t>();
// erase keys from res_doc except error and embedding_error
for(auto it = res_doc.begin(); it != res_doc.end(); ) {
if(it.key() != "error" && it.key() != "embedding_error") {
it = res_doc.erase(it);
} else {
++it;
}
}
// rename error to message if not empty and exists
if(res_doc.count("error") != 0 && !res_doc["error"].get<std::string>().empty()) {
res_doc["message"] = res_doc["error"];
res_doc.erase("error");
}
res->body = res_doc.dump();
return false;
}
res->set_201(document.dump(-1, ' ', false, nlohmann::detail::error_handler_t::ignore));
return true;
}
bool patch_update_document(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
std::string doc_id = req->params["id"];
CollectionManager & collectionManager = CollectionManager::get_instance();
auto collection = collectionManager.get_collection(req->params["collection"]);
if(collection == nullptr) {
res->set_404();
return false;
}
const char* DIRTY_VALUES_PARAM = "dirty_values";
if(req->params.count(DIRTY_VALUES_PARAM) == 0) {
req->params[DIRTY_VALUES_PARAM] = ""; // set it empty as default will depend on whether schema is enabled
}
const auto& dirty_values = collection->parse_dirty_values_option(req->params[DIRTY_VALUES_PARAM]);
Option<nlohmann::json> upserted_doc_op = collection->add(req->body, index_operation_t::UPDATE, doc_id, dirty_values);
if(!upserted_doc_op.ok()) {
res->set(upserted_doc_op.code(), upserted_doc_op.error());
return false;
}
res->set_200(upserted_doc_op.get().dump(-1, ' ', false, nlohmann::detail::error_handler_t::ignore));
return true;
}
bool patch_update_documents(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
const char *FILTER_BY = "filter_by";
std::string filter_query;
if(req->params.count(FILTER_BY) == 0) {
res->set_400("Parameter `" + std::string(FILTER_BY) + "` must be provided.");
return false;
} else {
filter_query = req->params[FILTER_BY];
}
CollectionManager & collectionManager = CollectionManager::get_instance();
auto collection = collectionManager.get_collection(req->params["collection"]);
if(collection == nullptr) {
res->set_404();
return false;
}
const char* DIRTY_VALUES_PARAM = "dirty_values";
if(req->params.count(DIRTY_VALUES_PARAM) == 0) {
req->params[DIRTY_VALUES_PARAM] = ""; // set it empty as default will depend on whether schema is enabled
}
search_stop_us = UINT64_MAX; // Filtering shouldn't timeout during update operation.
auto update_op = collection->update_matching_filter(filter_query, req->body, req->params[DIRTY_VALUES_PARAM]);
if(update_op.ok()) {
res->set_200(update_op.get().dump());
} else {
res->set(update_op.code(), update_op.error());
}
return update_op.ok();
}
bool get_fetch_document(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
std::string doc_id = req->params["id"];
const char* INCLUDE_FIELDS = "include_fields";
const char* EXCLUDE_FIELDS = "exclude_fields";
spp::sparse_hash_set<std::string> exclude_fields;
spp::sparse_hash_set<std::string> include_fields;
CollectionManager & collectionManager = CollectionManager::get_instance();
auto collection = collectionManager.get_collection(req->params["collection"]);
if(collection == nullptr) {
res->set_404();
return false;
}
Option<nlohmann::json> doc_option = collection->get(doc_id);
if(!doc_option.ok()) {
res->set(doc_option.code(), doc_option.error());
return false;
}
if(req->params.count(INCLUDE_FIELDS) != 0) {
std::vector<std::string> include_fields_vec;
StringUtils::split(req->params[INCLUDE_FIELDS], include_fields_vec, ",");
include_fields = spp::sparse_hash_set<std::string>(include_fields_vec.begin(), include_fields_vec.end());
}
if(req->params.count(EXCLUDE_FIELDS) != 0) {
std::vector<std::string> exclude_fields_vec;
StringUtils::split(req->params[EXCLUDE_FIELDS], exclude_fields_vec, ",");
exclude_fields = spp::sparse_hash_set<std::string>(exclude_fields_vec.begin(), exclude_fields_vec.end());
}
nlohmann::json doc = doc_option.get();
for(auto it = doc.begin(); it != doc.end(); ++it) {
if(!include_fields.empty() && include_fields.count(it.key()) == 0) {
doc.erase(it.key());
continue;
}
if(!exclude_fields.empty() && exclude_fields.count(it.key()) != 0) {
doc.erase(it.key());
continue;
}
}
res->set_200(doc.dump(-1, ' ', false, nlohmann::detail::error_handler_t::ignore));
return true;
}
bool del_remove_document(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
std::string doc_id = req->params["id"];
bool ignore_not_found = false;
if((req->params.count("ignore_not_found") != 0) && (req->params["ignore_not_found"] == "true")) {
ignore_not_found = true;
}
CollectionManager & collectionManager = CollectionManager::get_instance();
auto collection = collectionManager.get_collection(req->params["collection"]);
if(collection == nullptr) {
res->set_404();
return false;
}
Option<nlohmann::json> doc_option = collection->get(doc_id);
if (!doc_option.ok()) {
if (ignore_not_found && doc_option.code() == 404) {
nlohmann::json resp;
resp["id"] = doc_id;
res->set_200(resp.dump());
return true;
}
res->set(doc_option.code(), doc_option.error());
return false;
}
Option<std::string> deleted_id_op = collection->remove(doc_id);
if (!deleted_id_op.ok()) {
if (ignore_not_found && deleted_id_op.code() == 404) {
nlohmann::json resp;
resp["id"] = doc_id;
res->set_200(resp.dump());
return true;
}
res->set(deleted_id_op.code(), deleted_id_op.error());
return false;
}
nlohmann::json doc = doc_option.get();
res->set_200(doc.dump(-1, ' ', false, nlohmann::detail::error_handler_t::ignore));
return true;
}
bool del_remove_documents(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
// defaults: will get overridden later if needed
res->content_type_header = "application/json";
res->status_code = 200;
// NOTE: this is a streaming response end-point so this handler will be called multiple times
CollectionManager & collectionManager = CollectionManager::get_instance();
auto collection = collectionManager.get_collection(req->params["collection"]);
if(collection == nullptr) {
req->last_chunk_aggregate = true;
res->final = true;
res->set_404();
stream_response(req, res);
return false;
}
const char *BATCH_SIZE = "batch_size";
const char *FILTER_BY = "filter_by";
const char *TOP_K_BY = "top_k_by";
if(req->params.count(TOP_K_BY) != 0) {
std::vector<std::string> parts;
StringUtils::split(req->params[TOP_K_BY], parts, ":");
if(parts.size() != 2 || !StringUtils::is_uint32_t(parts[1])) {
req->last_chunk_aggregate = true;
res->final = true;
res->set_400("The `top_k_by` parameter is not valid.");
stream_response(req, res);
return false;
}
const std::string& field_name = parts[0];
const size_t k = std::stoull(parts[1]);
auto op = collection->truncate_after_top_k(field_name, k);
req->last_chunk_aggregate = true;
res->final = true;
if(!op.ok()) {
res->set_500(op.error());
stream_response(req, res);
return false;
}
res->set_200(R"({"ok": true})");
stream_response(req, res);
return true;
}
if(req->params.count(BATCH_SIZE) == 0) {
req->params[BATCH_SIZE] = "1000000000"; // 1 Billion
}
if(req->params.count(FILTER_BY) == 0) {
req->last_chunk_aggregate = true;
res->final = true;
res->set_400("Parameter `" + std::string(FILTER_BY) + "` must be provided.");
stream_response(req, res);
return false;
}
if(!StringUtils::is_uint32_t(req->params[BATCH_SIZE])) {
req->last_chunk_aggregate = true;
res->final = true;
res->set_400("Parameter `" + std::string(BATCH_SIZE) + "` must be a positive integer.");
stream_response(req, res);
return false;
}
const size_t DELETE_BATCH_SIZE = std::stoi(req->params[BATCH_SIZE]);
if(DELETE_BATCH_SIZE == 0) {
req->last_chunk_aggregate = true;
res->final = true;
res->set_400("Parameter `" + std::string(BATCH_SIZE) + "` must be a positive integer.");
stream_response(req, res);
return false;
}
std::string simple_filter_query;
if(req->params.count(FILTER_BY) != 0) {
simple_filter_query = req->params[FILTER_BY];
}
deletion_state_t* deletion_state = nullptr;
if(req->data == nullptr) {
deletion_state = new deletion_state_t{};
// destruction of data is managed by req destructor
req->data = deletion_state;
filter_result_t filter_result;
auto filter_ids_op = collection->get_filter_ids(simple_filter_query, filter_result, false);
if(!filter_ids_op.ok()) {
res->set(filter_ids_op.code(), filter_ids_op.error());
req->last_chunk_aggregate = true;
res->final = true;
stream_response(req, res);
return false;
}
deletion_state->index_ids.emplace_back(filter_result.count, filter_result.docs);
filter_result.docs = nullptr;
for(size_t i=0; i<deletion_state->index_ids.size(); i++) {
deletion_state->offsets.push_back(0);
}
deletion_state->collection = collection.get();
deletion_state->num_removed = 0;
} else {
deletion_state = dynamic_cast<deletion_state_t*>(req->data);
}
bool done = true;
Option<bool> remove_op = stateful_remove_docs(deletion_state, DELETE_BATCH_SIZE, done);
//LOG(INFO) << "Deletion batch size: " << DELETE_BATCH_SIZE << ", done: " << done;
if(!remove_op.ok()) {
res->set(remove_op.code(), remove_op.error());
req->last_chunk_aggregate = true;
res->final = true;
} else {
if(!done) {
req->last_chunk_aggregate = false;
res->final = false;
} else {
nlohmann::json response;
response["num_deleted"] = deletion_state->num_removed;
req->last_chunk_aggregate = true;
res->body = response.dump();
res->final = true;
}
}
if(res->final) {
stream_response(req, res);
} else {
defer_processing(req, res, 0);
}
return true;
}
bool get_aliases(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
CollectionManager & collectionManager = CollectionManager::get_instance();
const spp::sparse_hash_map<std::string, std::string> & symlinks = collectionManager.get_symlinks();
nlohmann::json res_json = nlohmann::json::object();
res_json["aliases"] = nlohmann::json::array();
for(const auto & symlink_collection: symlinks) {
nlohmann::json symlink;
symlink["name"] = symlink_collection.first;
symlink["collection_name"] = symlink_collection.second;
res_json["aliases"].push_back(symlink);
}
res->set_200(res_json.dump());
return true;
}
bool get_alias(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
const std::string & alias = req->params["alias"];
CollectionManager & collectionManager = CollectionManager::get_instance();
Option<std::string> collection_name_op = collectionManager.resolve_symlink(alias);
if(!collection_name_op.ok()) {
res->set_404();
return false;
}
nlohmann::json res_json;
res_json["name"] = alias;
res_json["collection_name"] = collection_name_op.get();
res->set_200(res_json.dump());
return true;
}
bool put_upsert_alias(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
nlohmann::json req_json;
try {
req_json = nlohmann::json::parse(req->body);
} catch(const std::exception& e) {
LOG(ERROR) << "JSON error: " << e.what();
res->set_400("Bad JSON.");
return false;
}
CollectionManager & collectionManager = CollectionManager::get_instance();
const std::string & alias = req->params["alias"];
const char* COLLECTION_NAME = "collection_name";
if(req_json.count(COLLECTION_NAME) == 0) {
res->set_400(std::string("Parameter `") + COLLECTION_NAME + "` is required.");
return false;
}
Option<bool> success_op = collectionManager.upsert_symlink(alias, req_json[COLLECTION_NAME]);
if(!success_op.ok()) {
res->set_500(success_op.error());
return false;
}
req_json["name"] = alias;
res->set_200(req_json.dump());
return true;
}
bool del_alias(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
const std::string & alias = req->params["alias"];
CollectionManager & collectionManager = CollectionManager::get_instance();
Option<std::string> collection_name_op = collectionManager.resolve_symlink(alias);
if(!collection_name_op.ok()) {
res->set_404();
return false;
}
Option<bool> delete_op = collectionManager.delete_symlink(alias);
if(!delete_op.ok()) {
res->set_500(delete_op.error());
return false;
}
nlohmann::json res_json;
res_json["name"] = alias;
res_json["collection_name"] = collection_name_op.get();
res->set_200(res_json.dump());
return true;
}
bool get_overrides(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
CollectionManager & collectionManager = CollectionManager::get_instance();
auto collection = collectionManager.get_collection(req->params["collection"]);
if(collection == nullptr) {
res->set_404();
return false;
}
uint32_t offset = 0, limit = 0;
if(req->params.count("offset") != 0) {
const auto &offset_str = req->params["offset"];
if(!StringUtils::is_uint32_t(offset_str)) {
res->set(400, "Offset param should be unsigned integer.");
return false;
}
offset = std::stoi(offset_str);
}
if(req->params.count("limit") != 0) {
const auto &limit_str = req->params["limit"];
if(!StringUtils::is_uint32_t(limit_str)) {
res->set(400, "Limit param should be unsigned integer.");
return false;
}
limit = std::stoi(limit_str);
}
nlohmann::json res_json;
res_json["overrides"] = nlohmann::json::array();
auto overrides_op = collection->get_overrides(limit, offset);
if(!overrides_op.ok()) {
res->set(overrides_op.code(), overrides_op.error());
return false;
}
const auto overrides = overrides_op.get();
for(const auto &kv: overrides) {
res_json["overrides"].push_back(kv.second->to_json());
}
res->set_200(res_json.dump());
return true;
}
bool get_override(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
CollectionManager & collectionManager = CollectionManager::get_instance();
auto collection = collectionManager.get_collection(req->params["collection"]);
if(collection == nullptr) {
res->set_404();
return false;
}
std::string override_id = req->params["id"];
auto overrides_op = collection->get_override(override_id);
if(!overrides_op.ok()) {
res->set(overrides_op.code(), overrides_op.error());
return false;
}
res->set_200(overrides_op.get().to_json().dump());
return true;
}
bool put_override(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
CollectionManager & collectionManager = CollectionManager::get_instance();
auto collection = collectionManager.get_collection(req->params["collection"]);
std::string override_id = req->params["id"];
if(collection == nullptr) {
res->set_404();
return false;
}
nlohmann::json req_json;
try {
req_json = nlohmann::json::parse(req->body);
} catch(const std::exception& e) {
LOG(ERROR) << "JSON error: " << e.what();
res->set_400("Bad JSON.");
return false;
}
override_t override;
Option<bool> parse_op = override_t::parse(req_json, override_id, override, "",
collection->get_symbols_to_index(),
collection->get_token_separators());
if(!parse_op.ok()) {
res->set(parse_op.code(), parse_op.error());
return false;
}
Option<uint32_t> add_op = collection->add_override(override);
if(!add_op.ok()) {
res->set(add_op.code(), add_op.error());
return false;
}
req_json["id"] = override.id;
res->set_200(req_json.dump());
return true;
}
bool del_override(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
CollectionManager & collectionManager = CollectionManager::get_instance();
auto collection = collectionManager.get_collection(req->params["collection"]);
if(collection == nullptr) {
res->set_404();
return false;
}
Option<uint32_t> rem_op = collection->remove_override(req->params["id"]);
if(!rem_op.ok()) {
res->set(rem_op.code(), rem_op.error());
return false;
}
nlohmann::json res_json;
res_json["id"] = req->params["id"];
res->set_200(res_json.dump());
return true;
}
bool get_keys(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
CollectionManager & collectionManager = CollectionManager::get_instance();
AuthManager &auth_manager = collectionManager.getAuthManager();
const Option<std::vector<api_key_t>>& keys_op = auth_manager.list_keys();
if(!keys_op.ok()) {
res->set(keys_op.code(), keys_op.error());
return false;
}
nlohmann::json res_json;
res_json["keys"] = nlohmann::json::array();
const std::vector<api_key_t>& keys = keys_op.get();
for(const auto & key: keys) {
nlohmann::json key_obj = key.to_json();
key_obj["value_prefix"] = key_obj["value"];
key_obj.erase("value");
res_json["keys"].push_back(key_obj);
}
res->set_200(res_json.dump());
return true;
}
bool post_create_key(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
//LOG(INFO) << "post_create_key";
CollectionManager & collectionManager = CollectionManager::get_instance();
AuthManager &auth_manager = collectionManager.getAuthManager();
nlohmann::json req_json;
try {
req_json = nlohmann::json::parse(req->body);
} catch(const std::exception& e) {
LOG(ERROR) << "JSON error: " << e.what();
res->set_400("Bad JSON.");
return false;
}
const Option<uint32_t>& validate_op = api_key_t::validate(req_json);
if(!validate_op.ok()) {
res->set(validate_op.code(), validate_op.error());
return false;
}
if(req_json.count("expires_at") == 0) {
req_json["expires_at"] = api_key_t::FAR_FUTURE_TIMESTAMP;
}
if(req_json.count("autodelete") == 0) {
req_json["autodelete"] = false;
}
const std::string &rand_key = (req_json.count("value") != 0) ?
req_json["value"].get<std::string>() : req->metadata;
api_key_t api_key(
rand_key,
req_json["description"].get<std::string>(),
req_json["actions"].get<std::vector<std::string>>(),
req_json["collections"].get<std::vector<std::string>>(),
req_json["expires_at"].get<uint64_t>(),
req_json["autodelete"].get<bool>()
);
const Option<api_key_t>& api_key_op = auth_manager.create_key(api_key);
if(!api_key_op.ok()) {
res->set(api_key_op.code(), api_key_op.error());
return false;
}
res->set_201(api_key_op.get().to_json().dump());
return true;
}
bool get_key(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
CollectionManager & collectionManager = CollectionManager::get_instance();
AuthManager &auth_manager = collectionManager.getAuthManager();
const std::string& key_id_str = req->params["id"];
uint32_t key_id = (uint32_t) std::stoul(key_id_str);
const Option<api_key_t>& key_op = auth_manager.get_key(key_id);
if(!key_op.ok()) {
res->set(key_op.code(), key_op.error());
return false;
}
nlohmann::json key_obj = key_op.get().to_json();
key_obj["value_prefix"] = key_obj["value"];
key_obj.erase("value");
res->set_200(key_obj.dump());
return true;
}
bool del_key(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
CollectionManager & collectionManager = CollectionManager::get_instance();
AuthManager &auth_manager = collectionManager.getAuthManager();
const std::string& key_id_str = req->params["id"];
uint32_t key_id = (uint32_t) std::stoul(key_id_str);
const Option<api_key_t> &del_op = auth_manager.remove_key(key_id);
if(!del_op.ok()) {
res->set(del_op.code(), del_op.error());
return false;
}
nlohmann::json res_json;
res_json["id"] = del_op.get().id;
res->set_200(res_json.dump(-1, ' ', false, nlohmann::detail::error_handler_t::ignore));
return true;
}
bool post_snapshot(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
const std::string SNAPSHOT_PATH = "snapshot_path";
res->status_code = 201;
res->content_type_header = "application/json";
if(req->params.count(SNAPSHOT_PATH) == 0) {
req->params[SNAPSHOT_PATH] = "";
}
server->do_snapshot(req->params[SNAPSHOT_PATH], req, res);
return true;
}
bool post_vote(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
res->status_code = 200;
res->content_type_header = "application/json";
nlohmann::json response;
response["success"] = server->trigger_vote();
res->body = response.dump();
return true;
}
bool post_config(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
nlohmann::json req_json;
try {
req_json = nlohmann::json::parse(req->body);
} catch(const std::exception& e) {
LOG(ERROR) << "JSON error: " << e.what();
res->set_400("Bad JSON.");
return false;
}
auto config_update_op = Config::get_instance().update_config(req_json);
if(!config_update_op.ok()) {
res->set(config_update_op.code(), config_update_op.error());
} else {
// for cache config, we have to resize the cache
if(req_json.count("cache-num-entries") != 0) {
std::unique_lock lock(mutex);
res_cache.capacity(Config::get_instance().get_cache_num_entries());
}
nlohmann::json response;
response["success"] = true;
res->set_201(response.dump());
}
return true;
}
bool post_clear_cache(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
{
std::unique_lock lock(mutex);
res_cache.clear();
}
nlohmann::json response;
response["success"] = true;
res->set_200(response.dump());
return true;
}
bool post_compact_db(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
CollectionManager& collectionManager = CollectionManager::get_instance();
rocksdb::Status status = collectionManager.get_store()->compact_all();
nlohmann::json response;
response["success"] = status.ok();
if(!status.ok()) {
response["error"] = "Error code: " + std::to_string(status.code());
res->set_500(response.dump());
} else {
res->set_200(response.dump());
}
return true;
}
bool post_reset_peers(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
res->status_code = 200;
res->content_type_header = "application/json";
nlohmann::json response;
response["success"] = server->reset_peers();
res->body = response.dump();
return true;
}
bool get_synonyms(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
CollectionManager & collectionManager = CollectionManager::get_instance();
auto collection = collectionManager.get_collection(req->params["collection"]);
if(collection == nullptr) {
res->set_404();
return false;
}
uint32_t offset = 0, limit = 0;
if(req->params.count("offset") != 0) {
const auto &offset_str = req->params["offset"];
if(!StringUtils::is_uint32_t(offset_str)) {
res->set(400, "Offset param should be unsigned integer.");
return false;
}
offset = std::stoi(offset_str);
}
if(req->params.count("limit") != 0) {
const auto &limit_str = req->params["limit"];
if(!StringUtils::is_uint32_t(limit_str)) {
res->set(400, "Limit param should be unsigned integer.");
return false;
}
limit = std::stoi(limit_str);
}
nlohmann::json res_json;
res_json["synonyms"] = nlohmann::json::array();
auto synonyms_op = collection->get_synonyms(limit, offset);
if(!synonyms_op.ok()) {
res->set(synonyms_op.code(), synonyms_op.error());
return false;
}
const auto synonyms = synonyms_op.get();
for(const auto & kv: synonyms) {
res_json["synonyms"].push_back(kv.second->to_view_json());
}
res->set_200(res_json.dump());
return true;
}
bool get_synonym(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
CollectionManager & collectionManager = CollectionManager::get_instance();
auto collection = collectionManager.get_collection(req->params["collection"]);
if(collection == nullptr) {
res->set_404();
return false;
}
std::string synonym_id = req->params["id"];
synonym_t synonym;
bool found = collection->get_synonym(synonym_id, synonym);
if(found) {
nlohmann::json synonym_json = synonym.to_view_json();
res->set_200(synonym_json.dump());
return true;
}
res->set_404();
return false;
}
bool put_synonym(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
CollectionManager & collectionManager = CollectionManager::get_instance();
auto collection = collectionManager.get_collection(req->params["collection"]);
std::string synonym_id = req->params["id"];
if(collection == nullptr) {
res->set_404();
return false;
}
nlohmann::json syn_json;
try {
syn_json = nlohmann::json::parse(req->body);
} catch(const std::exception& e) {
LOG(ERROR) << "JSON error: " << e.what();
res->set_400("Bad JSON.");
return false;
}
if(!syn_json.is_object()) {
res->set_400("Bad JSON.");
return false;
}
// These checks should be inside `add_synonym` but older versions of Typesense wrongly persisted
// `root` as an array, so we have to do it here so that on-disk synonyms are loaded properly
if(syn_json.count("root") != 0 && !syn_json["root"].is_string()) {
res->set_400("Key `root` should be a string.");
return false;
}
if(syn_json.count("synonyms") && syn_json["synonyms"].is_array()) {
if(syn_json["synonyms"].empty()) {
res->set_400("Could not find a valid string array of `synonyms`");
return false;
}
for(const auto& synonym: syn_json["synonyms"]) {
if (!synonym.is_string() || synonym.empty()) {
res->set_400("Could not find a valid string array of `synonyms`");
return false;
}
}
}
syn_json["id"] = synonym_id;
Option<bool> upsert_op = collection->add_synonym(syn_json);
if(!upsert_op.ok()) {
res->set(upsert_op.code(), upsert_op.error());
return false;
}
res->set_200(syn_json.dump());
return true;
}
bool del_synonym(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
CollectionManager & collectionManager = CollectionManager::get_instance();
auto collection = collectionManager.get_collection(req->params["collection"]);
if(collection == nullptr) {
res->set_404();
return false;
}
Option<bool> rem_op = collection->remove_synonym(req->params["id"]);
if(!rem_op.ok()) {
res->set(rem_op.code(), rem_op.error());
return false;
}
nlohmann::json res_json;
res_json["id"] = req->params["id"];
res->set_200(res_json.dump());
return true;
}
bool is_doc_import_route(uint64_t route_hash) {
route_path* rpath;
bool found = server->get_route(route_hash, &rpath);
return found && (rpath->handler == post_import_documents);
}
bool is_coll_create_route(uint64_t route_hash) {
route_path* rpath;
bool found = server->get_route(route_hash, &rpath);
return found && (rpath->handler == post_create_collection);
}
bool is_drop_collection_route(uint64_t route_hash) {
route_path* rpath;
bool found = server->get_route(route_hash, &rpath);
return found && (rpath->handler == del_drop_collection);
}
bool is_doc_write_route(uint64_t route_hash) {
route_path* rpath;
bool found = server->get_route(route_hash, &rpath);
return found && (rpath->handler == post_add_document || rpath->handler == patch_update_document);
}
bool is_doc_del_route(uint64_t route_hash) {
route_path* rpath;
bool found = server->get_route(route_hash, &rpath);
return found && (rpath->handler == del_remove_document || rpath->handler == del_remove_documents);
}
bool get_presets(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
CollectionManager & collectionManager = CollectionManager::get_instance();
const spp::sparse_hash_map<std::string, nlohmann::json> & presets = collectionManager.get_presets();
nlohmann::json res_json = nlohmann::json::object();
res_json["presets"] = nlohmann::json::array();
for(const auto& preset_kv: presets) {
nlohmann::json preset;
preset["name"] = preset_kv.first;
preset["value"] = preset_kv.second;
res_json["presets"].push_back(preset);
}
res->set_200(res_json.dump());
return true;
}
bool get_preset(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
const std::string & preset_name = req->params["name"];
CollectionManager & collectionManager = CollectionManager::get_instance();
nlohmann::json preset;
Option<bool> preset_op = collectionManager.get_preset(preset_name, preset);
if(!preset_op.ok()) {
res->set(preset_op.code(), preset_op.error());
return false;
}
nlohmann::json res_json;
res_json["name"] = preset_name;
res_json["value"] = preset;
res->set_200(res_json.dump());
return true;
}
bool put_upsert_preset(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
nlohmann::json req_json;
try {
req_json = nlohmann::json::parse(req->body);
} catch(const std::exception& e) {
LOG(ERROR) << "JSON error: " << e.what();
res->set_400("Bad JSON.");
return false;
}
CollectionManager & collectionManager = CollectionManager::get_instance();
const std::string & preset_name = req->params["name"];
const char* PRESET_VALUE = "value";
if(req_json.count(PRESET_VALUE) == 0) {
res->set_400(std::string("Parameter `") + PRESET_VALUE + "` is required.");
return false;
}
Option<bool> success_op = collectionManager.upsert_preset(preset_name, req_json[PRESET_VALUE]);
if(!success_op.ok()) {
res->set_500(success_op.error());
return false;
}
req_json["name"] = preset_name;
res->set_200(req_json.dump());
return true;
}
bool del_preset(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
const std::string & preset_name = req->params["name"];
CollectionManager & collectionManager = CollectionManager::get_instance();
nlohmann::json preset;
Option<bool> preset_op = collectionManager.get_preset(preset_name, preset);
if(!preset_op.ok()) {
res->set(preset_op.code(), preset_op.error());
return false;
}
Option<bool> delete_op = collectionManager.delete_preset(preset_name);
if(!delete_op.ok()) {
res->set_500(delete_op.error());
return false;
}
nlohmann::json res_json;
res_json["name"] = preset_name;
res_json["value"] = preset;
res->set_200(res_json.dump());
return true;
}
bool get_stopwords(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
StopwordsManager& stopwordManager = StopwordsManager::get_instance();
const spp::sparse_hash_map<std::string, stopword_struct_t>& stopwords = stopwordManager.get_stopwords();
nlohmann::json res_json = nlohmann::json::object();
res_json["stopwords"] = nlohmann::json::array();
for(const auto& stopwords_kv: stopwords) {
auto stopword = stopwords_kv.second.to_json();
res_json["stopwords"].push_back(stopword);
}
res->set_200(res_json.dump());
return true;
}
bool get_stopword(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
const std::string & stopword_name = req->params["name"];
StopwordsManager& stopwordManager = StopwordsManager::get_instance();
stopword_struct_t stopwordStruct;
Option<bool> stopword_op = stopwordManager.get_stopword(stopword_name, stopwordStruct);
if(!stopword_op.ok()) {
res->set(stopword_op.code(), stopword_op.error());
return false;
}
nlohmann::json res_json;
res_json["stopwords"] = stopwordStruct.to_json();
res->set_200(res_json.dump());
return true;
}
bool put_upsert_stopword(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
nlohmann::json req_json;
try {
req_json = nlohmann::json::parse(req->body);
} catch(const std::exception& e) {
LOG(ERROR) << "JSON error: " << e.what();
res->set_400("Bad JSON.");
return false;
}
StopwordsManager& stopwordManager = StopwordsManager::get_instance();
const std::string & stopword_name = req->params["name"];
Option<bool> success_op = stopwordManager.upsert_stopword(stopword_name, req_json, true);
if(!success_op.ok()) {
res->set(success_op.code(), success_op.error());
return false;
}
req_json["id"] = stopword_name;
res->set_200(req_json.dump());
return true;
}
bool del_stopword(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
const std::string & stopword_name = req->params["name"];
StopwordsManager& stopwordManager = StopwordsManager::get_instance();
Option<bool> delete_op = stopwordManager.delete_stopword(stopword_name);
if(!delete_op.ok()) {
res->set(delete_op.code(), delete_op.error());
return false;
}
nlohmann::json res_json;
res_json["id"] = stopword_name;
res->set_200(res_json.dump());
return true;
}
bool get_rate_limits(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
RateLimitManager* rateLimitManager = RateLimitManager::getInstance();
res->set_200(rateLimitManager->get_all_rules_json().dump());
return true;
}
bool get_rate_limit(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
RateLimitManager* rateLimitManager = RateLimitManager::getInstance();
// Convert param id to uint64_t
if(!StringUtils::is_uint32_t(req->params["id"])) {
res->set_400("{\"message\": \"Invalid ID\"}");
return false;
}
uint64_t id = std::stoull(req->params["id"]);
const auto& rule_option = rateLimitManager->find_rule_by_id(id);
if(!rule_option.ok()) {
res->set(rule_option.code(), rule_option.error());
return false;
}
res->set_200(rule_option.get().dump());
return true;
}
bool put_rate_limit(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
RateLimitManager* rateLimitManager = RateLimitManager::getInstance();
nlohmann::json req_json;
if(!StringUtils::is_uint32_t(req->params["id"])) {
res->set_400("{\"message\": \"Invalid ID\"}");
return false;
}
uint64_t id = std::stoull(req->params["id"]);
try {
req_json = nlohmann::json::parse(req->body);
} catch(const nlohmann::json::parse_error& e) {
res->set_400("Invalid JSON");
return false;
}
const auto& edit_rule_result = rateLimitManager->edit_rule(id, req_json);
if(!edit_rule_result.ok()) {
res->set(edit_rule_result.code(), edit_rule_result.error());
return false;
}
res->set_200(edit_rule_result.get().dump());
return true;
}
bool del_rate_limit(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
RateLimitManager* rateLimitManager = RateLimitManager::getInstance();
if(!StringUtils::is_uint32_t(req->params["id"])) {
res->set_400("{\"message\": \"Invalid ID\"}");
return false;
}
uint64_t id = std::stoull(req->params["id"]);
const auto& rule_option = rateLimitManager->find_rule_by_id(id);
if(!rule_option.ok()) {
res->set(rule_option.code(), rule_option.error());
return false;
}
rateLimitManager->delete_rule_by_id(id);
nlohmann::json res_json;
res_json["id"] = id;
res->set_200(res_json.dump());
return true;
}
bool post_rate_limit(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
RateLimitManager* rateLimitManager = RateLimitManager::getInstance();
nlohmann::json req_json;
try {
req_json = nlohmann::json::parse(req->body);
} catch (const std::exception & e) {
res->set_400("Bad JSON.");
return false;
}
auto add_rule_result = rateLimitManager->add_rule(req_json);
if(!add_rule_result.ok()) {
res->set(add_rule_result.code(), add_rule_result.error());
return false;
}
res->set_200(add_rule_result.get().dump());
return true;
}
bool get_active_throttles(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
RateLimitManager* rateLimitManager = RateLimitManager::getInstance();
res->set_200(rateLimitManager->get_throttled_entities_json().dump());
return true;
}
bool del_throttle(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
RateLimitManager* rateLimitManager = RateLimitManager::getInstance();
if(!StringUtils::is_uint32_t(req->params["id"])) {
res->set_400("{\"message\": \"Invalid ID\"}");
return false;
}
uint64_t id = std::stoull(req->params["id"]);
bool res_ = rateLimitManager->delete_ban_by_id(id);
if(!res_) {
res->set_400("{\"message\": \"Invalid ID\"}");
return false;
}
nlohmann::json res_json;
res_json["id"] = id;
res->set_200(res_json.dump());
return true;
}
bool del_exceed(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
RateLimitManager* rateLimitManager = RateLimitManager::getInstance();
if(!StringUtils::is_uint32_t(req->params["id"])) {
res->set_400("{\"message\": \"Invalid ID\"}");
return false;
}
uint64_t id = std::stoull(req->params["id"]);
bool res_ = rateLimitManager->delete_throttle_by_id(id);
if(!res_) {
res->set_400("{\"message\": \"Invalid ID\"}");
return false;
}
nlohmann::json res_json;
res_json["id"] = id;
res->set_200(res_json.dump());
return true;
}
bool get_limit_exceed_counts(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
RateLimitManager* rateLimitManager = RateLimitManager::getInstance();
res->set_200(rateLimitManager->get_exceeded_entities_json().dump());
return true;
}
Option<std::pair<std::string,std::string>> get_api_key_and_ip(const std::string& metadata) {
// format <length of api_key>:<api_key><ip>
// length of api_key is a uint32_t
if(metadata.size() < 10) {
if(metadata.size() >= 2 && metadata[0] == '0' && metadata[1] == ':') {
// e.g. "0:0.0.0.0" (when api key is not provided at all)
std::string ip = metadata.substr(metadata.find(":") + 1);
return Option<std::pair<std::string,std::string>>(std::make_pair("", ip));
}
return Option<std::pair<std::string,std::string>>(400, "Invalid metadata");
}
if(metadata.find(":") == std::string::npos) {
return Option<std::pair<std::string,std::string>>(400, "Invalid metadata");
}
std::string key_len_str = metadata.substr(0, metadata.find(":"));
if(!StringUtils::is_uint32_t(key_len_str)) {
return Option<std::pair<std::string,std::string>>(400, "Invalid metadata");
}
uint32_t api_key_length = static_cast<uint32_t>(std::stoul(key_len_str));
if(metadata.size() < api_key_length + metadata.find(":") + 7) {
return Option<std::pair<std::string,std::string>>(400, "Invalid metadata");
}
std::string api_key = metadata.substr(metadata.find(":") + 1, api_key_length);
std::string ip = metadata.substr(metadata.find(":") + 1 + api_key_length);
// validate IP address
std::regex ip_pattern("\\b(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\b");
if(!std::regex_match(ip, ip_pattern)) {
return Option<std::pair<std::string,std::string>>(400, "Invalid metadata");
}
return Option<std::pair<std::string,std::string>>(std::make_pair(api_key, ip));
}
bool post_create_event(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
nlohmann::json req_json;
try {
req_json = nlohmann::json::parse(req->body);
} catch(const std::exception& e) {
LOG(ERROR) << "JSON error: " << e.what();
res->set_400("Bad JSON.");
return false;
}
auto add_event_op = EventManager::get_instance().add_event(req_json, req->client_ip);
if(add_event_op.ok()) {
res->set_201(R"({"ok": true})");
return true;
}
res->set_400(add_event_op.error());
return false;
}
bool get_analytics_rules(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
auto rules_op = AnalyticsManager::get_instance().list_rules();
if(!rules_op.ok()) {
res->set(rules_op.code(), rules_op.error());
return false;
}
res->set_200(rules_op.get().dump());
return true;
}
bool get_analytics_rule(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
auto rules_op = AnalyticsManager::get_instance().get_rule(req->params["name"]);
if(!rules_op.ok()) {
res->set(rules_op.code(), rules_op.error());
return false;
}
res->set_200(rules_op.get().dump());
return true;
}
bool post_create_analytics_rules(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
nlohmann::json req_json;
try {
req_json = nlohmann::json::parse(req->body);
} catch(const std::exception& e) {
LOG(ERROR) << "JSON error: " << e.what();
res->set_400("Bad JSON.");
return false;
}
auto op = AnalyticsManager::get_instance().create_rule(req_json, false, true);
if(!op.ok()) {
res->set(op.code(), op.error());
return false;
}
res->set_201(req_json.dump());
return true;
}
bool put_upsert_analytics_rules(const std::shared_ptr<http_req> &req, const std::shared_ptr<http_res> &res) {
nlohmann::json req_json;
try {
req_json = nlohmann::json::parse(req->body);
} catch(const std::exception& e) {
LOG(ERROR) << "JSON error: " << e.what();
res->set_400("Bad JSON.");
return false;
}
req_json["name"] = req->params["name"];
auto op = AnalyticsManager::get_instance().create_rule(req_json, true, true);
if(!op.ok()) {
res->set(op.code(), op.error());
return false;
}
res->set_200(req_json.dump());
return true;
}
bool del_analytics_rules(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
auto op = AnalyticsManager::get_instance().remove_rule(req->params["name"]);
if(!op.ok()) {
res->set(op.code(), op.error());
return false;
}
nlohmann::json res_json;
res_json["name"] = req->params["name"];
res->set_200(res_json.dump());
return true;
}
bool post_write_analytics_to_db(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
nlohmann::json req_json;
try {
req_json = nlohmann::json::parse(req->body);
} catch(const std::exception& e) {
LOG(ERROR) << "JSON error: " << e.what();
res->set_400("Bad JSON.");
return false;
}
if(!AnalyticsManager::get_instance().write_to_db(req_json)) {
res->set_500(R"({"ok": false})");
return false;
}
res->set_200(R"({"ok": true})");
return true;
}
bool post_proxy(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
HttpProxy& proxy = HttpProxy::get_instance();
nlohmann::json req_json;
try {
req_json = nlohmann::json::parse(req->body);
} catch(const nlohmann::json::parse_error& e) {
LOG(ERROR) << "JSON error: " << e.what();
res->set_400("Bad JSON.");
return false;
}
std::string body, url, method;
std::unordered_map<std::string, std::string> headers;
if(req_json.count("url") == 0 || req_json.count("method") == 0) {
res->set_400("Missing required fields.");
return false;
}
if(!req_json["url"].is_string() || !req_json["method"].is_string() || req_json["url"].get<std::string>().empty() || req_json["method"].get<std::string>().empty()) {
res->set_400("URL and method must be non-empty strings.");
return false;
}
try {
if(req_json.count("body") != 0 && !req_json["body"].is_string()) {
res->set_400("Body must be a string.");
return false;
}
if(req_json.count("headers") != 0 && !req_json["headers"].is_object()) {
res->set_400("Headers must be a JSON object.");
return false;
}
if(req_json.count("body")) {
body = req_json["body"].get<std::string>();
}
url = req_json["url"].get<std::string>();
method = req_json["method"].get<std::string>();
if(req_json.count("headers")) {
headers = req_json["headers"].get<std::unordered_map<std::string, std::string>>();
}
} catch(const std::exception& e) {
LOG(ERROR) << "JSON error: " << e.what();
res->set_400("Bad JSON.");
return false;
}
auto response = proxy.send(url, method, body, headers);
if(response.status_code != 200) {
int code = response.status_code;
res->set_body(code, response.body);
return false;
}
res->set_200(response.body);
return true;
}
bool post_conversation_model(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
nlohmann::json model_json;
try {
model_json = nlohmann::json::parse(req->body);
} catch(const nlohmann::json::parse_error& e) {
LOG(ERROR) << "JSON error: " << e.what();
res->set_400("Bad JSON.");
return false;
}
if(!model_json.is_object()) {
res->set_400("Bad JSON.");
return false;
}
std::string model_id = req->metadata;
auto add_model_op = ConversationModelManager::add_model(model_json, model_id, true);
if(!add_model_op.ok()) {
res->set(add_model_op.code(), add_model_op.error());
return false;
}
Collection::hide_credential(model_json, "api_key");
res->set_200(model_json.dump());
return true;
}
bool get_conversation_model(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
const std::string& model_id = req->params["id"];
auto model_op = ConversationModelManager::get_model(model_id);
if(!model_op.ok()) {
res->set(model_op.code(), model_op.error());
return false;
}
auto model = model_op.get();
Collection::hide_credential(model, "api_key");
res->set_200(model.dump());
return true;
}
bool get_conversation_models(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
auto models_op = ConversationModelManager::get_all_models();
if(!models_op.ok()) {
res->set(models_op.code(), models_op.error());
return false;
}
auto models = models_op.get();
for(auto& model: models) {
Collection::hide_credential(model, "api_key");
}
res->set_200(models.dump());
return true;
}
bool del_conversation_model(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
const std::string& model_id = req->params["id"];
auto model_op = ConversationModelManager::delete_model(model_id);
if(!model_op.ok()) {
res->set(model_op.code(), model_op.error());
return false;
}
auto model = model_op.get();
Collection::hide_credential(model, "api_key");
res->set_200(model.dump());
return true;
}
bool put_conversation_model(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
const std::string& model_id = req->params["id"];
nlohmann::json req_json;
try {
req_json = nlohmann::json::parse(req->body);
} catch(const nlohmann::json::parse_error& e) {
LOG(ERROR) << "JSON error: " << e.what();
res->set_400("Bad JSON.");
return false;
}
if(!req_json.is_object()) {
res->set_400("Bad JSON.");
return false;
}
auto model_op = ConversationModelManager::update_model(model_id, req_json);
if(!model_op.ok()) {
res->set(model_op.code(), model_op.error());
return false;
}
auto model = model_op.get();
Collection::hide_credential(model, "api_key");
res->set_200(model.dump());
return true;
}
bool post_personalization_model(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
nlohmann::json req_json;
if (!req->params.count("name") || !req->params.count("collection") || !req->params.count("type")) {
res->set_400("Missing required parameters 'name', 'collection' and 'type'.");
return false;
}
req_json = {
{"name", req->params["name"]},
{"collection", req->params["collection"]},
{"type", req->params["type"]}
};
std::string model_id;
if (req->params.count("id")) {
req_json["id"] = req->params["id"];
model_id = req->params["id"];
}
const std::string model_data = req->body;
auto create_op = PersonalizationModelManager::add_model(req_json, model_id, true, model_data);
if(!create_op.ok()) {
res->set(create_op.code(), create_op.error());
return false;
}
auto model = create_op.get();
res->set_200(nlohmann::json{{"ok", true}, {"model_id", model}}.dump());
return true;
}
bool get_personalization_model(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
const std::string& model_id = req->params["id"];
auto model_op = PersonalizationModelManager::get_model(model_id);
if (!model_op.ok()) {
res->set(model_op.code(), model_op.error());
return false;
}
auto model = model_op.get();
if (model.contains("model_path")) {
model.erase("model_path");
}
res->set_200(model.dump());
return true;
}
bool get_personalization_models(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
auto models_op = PersonalizationModelManager::get_all_models();
if (!models_op.ok()) {
res->set(models_op.code(), models_op.error());
return false;
}
auto models = models_op.get();
for (auto& model : models) {
if (model.contains("model_path")) {
model.erase("model_path");
}
}
res->set_200(models.dump());
return true;
}
bool del_personalization_model(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
const std::string& model_id = req->params["id"];
auto delete_op = PersonalizationModelManager::delete_model(model_id);
if (!delete_op.ok()) {
res->set(delete_op.code(), delete_op.error());
return false;
}
auto deleted_model = delete_op.get();
if (deleted_model.contains("model_path")) {
deleted_model.erase("model_path");
}
res->set_200(deleted_model.dump());
return true;
}
bool put_personalization_model(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
nlohmann::json req_json;
if (req->params.count("name") && !req->params["name"].empty()) {
req_json["name"] = req->params["name"];
}
if (req->params.count("collection") && !req->params["collection"].empty()) {
req_json["collection"] = req->params["collection"];
}
if (req->params.count("type") && !req->params["type"].empty()) {
req_json["type"] = req->params["type"];
}
if (!req->params.count("id")) {
res->set_400("Missing required parameter 'id'.");
return false;
}
std::string model_id = req->params["id"];
const std::string model_data = req->body;
auto update_op = PersonalizationModelManager::update_model(model_id, req_json, model_data);
if(!update_op.ok()) {
res->set(update_op.code(), update_op.error());
return false;
}
nlohmann::json response = update_op.get();
if (response.contains("model_path")) {
response.erase("model_path");
}
res->set_200(response.dump());
return true;
}
| 112,214
|
C++
|
.cpp
| 2,554
| 35.749804
| 190
| 0.603235
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,763
|
collection_manager.cpp
|
typesense_typesense/src/collection_manager.cpp
|
#include <string>
#include <vector>
#include <json.hpp>
#include <app_metrics.h>
#include <analytics_manager.h>
#include <event_manager.h>
#include "collection_manager.h"
#include "batched_indexer.h"
#include "logger.h"
#include "magic_enum.hpp"
#include "stopwords_manager.h"
#include "conversation_model.h"
#include "field.h"
constexpr const size_t CollectionManager::DEFAULT_NUM_MEMORY_SHARDS;
CollectionManager::CollectionManager() {
}
Collection* CollectionManager::init_collection(const nlohmann::json & collection_meta,
const uint32_t collection_next_seq_id,
Store* store,
float max_memory_ratio,
spp::sparse_hash_map<std::string, std::string>& referenced_in,
spp::sparse_hash_map<std::string, std::set<reference_pair_t>>& async_referenced_ins) {
std::string this_collection_name = collection_meta[Collection::COLLECTION_NAME_KEY].get<std::string>();
std::vector<field> fields;
nlohmann::json fields_map = collection_meta[Collection::COLLECTION_SEARCH_FIELDS_KEY];
for (nlohmann::json::iterator it = fields_map.begin(); it != fields_map.end(); ++it) {
nlohmann::json & field_obj = it.value();
// handle older records indexed before optional field introduction
if(field_obj.count(fields::optional) == 0) {
field_obj[fields::optional] = false;
}
if(field_obj.count(fields::index) == 0) {
field_obj[fields::index] = true;
}
if(field_obj.count(fields::locale) == 0) {
field_obj[fields::locale] = "";
}
if(field_obj.count(fields::infix) == 0) {
field_obj[fields::infix] = -1;
}
if(field_obj.count(fields::nested) == 0) {
field_obj[fields::nested] = false;
}
if(field_obj.count(fields::nested_array) == 0) {
field_obj[fields::nested_array] = 0;
}
if(field_obj.count(fields::num_dim) == 0) {
field_obj[fields::num_dim] = 0;
}
if (field_obj.count(fields::reference) == 0) {
field_obj[fields::reference] = "";
}
if (field_obj.count(fields::async_reference) == 0) {
field_obj[fields::async_reference] = false;
}
if(field_obj.count(fields::embed) == 0) {
field_obj[fields::embed] = nlohmann::json::object();
}
if(field_obj.count(fields::model_config) == 0) {
field_obj[fields::model_config] = nlohmann::json::object();
}
if(field_obj.count(fields::hnsw_params) == 0) {
field_obj[fields::hnsw_params] = nlohmann::json::object();
field_obj[fields::hnsw_params]["ef_construction"] = 200;
field_obj[fields::hnsw_params]["M"] = 16;
}
if(field_obj.count(fields::stem) == 0) {
field_obj[fields::stem] = false;
}
if(field_obj.count(fields::range_index) == 0) {
field_obj[fields::range_index] = false;
}
if(field_obj.count(fields::store) == 0) {
field_obj[fields::store] = true;
}
vector_distance_type_t vec_dist_type = vector_distance_type_t::cosine;
if(field_obj.count(fields::vec_dist) != 0) {
auto vec_dist_type_op = magic_enum::enum_cast<vector_distance_type_t>(fields::vec_dist);
if(vec_dist_type_op.has_value()) {
vec_dist_type = vec_dist_type_op.value();
}
}
if(field_obj.count(fields::embed) != 0 && !field_obj[fields::embed].empty()) {
size_t num_dim = 0;
auto& model_config = field_obj[fields::embed][fields::model_config];
auto res = EmbedderManager::get_instance().validate_and_init_model(model_config, num_dim);
if(!res.ok()) {
const std::string& model_name = model_config["model_name"].get<std::string>();
LOG(ERROR) << "Error initializing model: " << model_name << ", error: " << res.error();
continue;
}
field_obj[fields::num_dim] = num_dim;
LOG(INFO) << "Model init done.";
}
field f(field_obj[fields::name], field_obj[fields::type], field_obj[fields::facet],
field_obj[fields::optional], field_obj[fields::index], field_obj[fields::locale],
-1, field_obj[fields::infix], field_obj[fields::nested], field_obj[fields::nested_array],
field_obj[fields::num_dim], vec_dist_type, field_obj[fields::reference], field_obj[fields::embed],
field_obj[fields::range_index], field_obj[fields::store], field_obj[fields::stem],
field_obj[fields::hnsw_params], field_obj[fields::async_reference]);
// value of `sort` depends on field type
if(field_obj.count(fields::sort) == 0) {
f.sort = f.is_num_sort_field();
} else {
f.sort = field_obj[fields::sort];
}
fields.push_back(f);
}
std::string default_sorting_field = collection_meta[Collection::COLLECTION_DEFAULT_SORTING_FIELD_KEY].get<std::string>();
uint64_t created_at = collection_meta.find((const char*)Collection::COLLECTION_CREATED) != collection_meta.end() ?
collection_meta[Collection::COLLECTION_CREATED].get<uint64_t>() : 0;
size_t num_memory_shards = collection_meta.count(Collection::COLLECTION_NUM_MEMORY_SHARDS) != 0 ?
collection_meta[Collection::COLLECTION_NUM_MEMORY_SHARDS].get<size_t>() :
DEFAULT_NUM_MEMORY_SHARDS;
std::string fallback_field_type = collection_meta.count(Collection::COLLECTION_FALLBACK_FIELD_TYPE) != 0 ?
collection_meta[Collection::COLLECTION_FALLBACK_FIELD_TYPE].get<std::string>() :
"";
bool enable_nested_fields = collection_meta.count(Collection::COLLECTION_ENABLE_NESTED_FIELDS) != 0 ?
collection_meta[Collection::COLLECTION_ENABLE_NESTED_FIELDS].get<bool>() :
false;
std::vector<std::string> symbols_to_index;
std::vector<std::string> token_separators;
if(collection_meta.count(Collection::COLLECTION_SYMBOLS_TO_INDEX) != 0) {
symbols_to_index = collection_meta[Collection::COLLECTION_SYMBOLS_TO_INDEX].get<std::vector<std::string>>();
}
if(collection_meta.count(Collection::COLLECTION_SEPARATORS) != 0) {
token_separators = collection_meta[Collection::COLLECTION_SEPARATORS].get<std::vector<std::string>>();
}
LOG(INFO) << "Found collection " << this_collection_name << " with " << num_memory_shards << " memory shards.";
std::shared_ptr<VQModel> model = nullptr;
if(collection_meta.count(Collection::COLLECTION_VOICE_QUERY_MODEL) != 0) {
const nlohmann::json& voice_query_model = collection_meta[Collection::COLLECTION_VOICE_QUERY_MODEL];
if(!voice_query_model.is_object()) {
LOG(ERROR) << "Parameter `voice_query_model` must be an object.";
}
if(voice_query_model.count("model_name") == 0) {
LOG(ERROR) << "Parameter `voice_query_model.model_name` is missing.";
}
if(!voice_query_model["model_name"].is_string() || voice_query_model["model_name"].get<std::string>().empty()) {
LOG(ERROR) << "Parameter `voice_query_model.model_name` is invalid.";
}
std::string model_name = voice_query_model["model_name"].get<std::string>();
auto model_res = VQModelManager::get_instance().validate_and_init_model(model_name);
if(!model_res.ok()) {
LOG(ERROR) << "Error while loading voice query model: " << model_res.error();
} else {
model = model_res.get();
}
}
nlohmann::json metadata;
if(collection_meta.count(Collection::COLLECTION_METADATA) != 0) {
metadata = collection_meta[Collection::COLLECTION_METADATA];
}
Collection* collection = new Collection(this_collection_name,
collection_meta[Collection::COLLECTION_ID_KEY].get<uint32_t>(),
created_at,
collection_next_seq_id,
store,
fields,
default_sorting_field,
max_memory_ratio,
fallback_field_type,
symbols_to_index,
token_separators,
enable_nested_fields, model, std::move(referenced_in),
metadata, std::move(async_referenced_ins));
return collection;
}
void CollectionManager::add_to_collections(Collection* collection) {
const std::string& collection_name = collection->get_name();
const uint32_t collection_id = collection->get_collection_id();
std::unique_lock lock(mutex);
collections.emplace(collection_name, collection);
collection_id_names.emplace(collection_id, collection_name);
}
void CollectionManager::init(Store *store, ThreadPool* thread_pool,
const float max_memory_ratio,
const std::string & auth_key,
std::atomic<bool>& quit,
const uint16_t& filter_by_max_operations) {
std::unique_lock lock(mutex);
this->store = store;
this->thread_pool = thread_pool;
this->bootstrap_auth_key = auth_key;
this->max_memory_ratio = max_memory_ratio;
this->quit = &quit;
this->filter_by_max_ops = filter_by_max_operations;
}
// used only in tests!
void CollectionManager::init(Store *store, const float max_memory_ratio, const std::string & auth_key,
std::atomic<bool>& quit,
const uint16_t& filter_by_max_operations) {
ThreadPool* thread_pool = new ThreadPool(8);
init(store, thread_pool, max_memory_ratio, auth_key, quit, filter_by_max_operations);
}
void CollectionManager::_populate_referenced_ins(const std::string& collection_meta_json,
std::map<std::string, spp::sparse_hash_map<std::string, std::string>>& referenced_ins,
std::map<std::string, spp::sparse_hash_map<std::string, std::set<reference_pair_t>>>& async_referenced_ins) {
auto const& obj = nlohmann::json::parse(collection_meta_json, nullptr, false);
if (!obj.is_discarded() && obj.is_object() && obj.contains("name") && obj["name"].is_string() &&
obj.contains("fields")) {
auto const& collection_name = obj["name"];
for (const auto &field: obj["fields"]) {
if (!field.contains("name") || !field.contains("reference")) {
continue;
}
auto field_name = std::string(field["name"]);
auto const& reference = field["reference"].get<std::string>();
std::vector<std::string> split_result;
StringUtils::split(reference, split_result, ".");
if (split_result.size() < 2) {
LOG(ERROR) << "Invalid reference `" << reference << "`.";
continue;
}
auto ref_coll_name = split_result[0];
auto ref_field_name = reference.substr(ref_coll_name.size() + 1);
// Resolves alias if used in schema.
auto actual_ref_coll_it = CollectionManager::get_instance().collection_symlinks.find(ref_coll_name);
if (actual_ref_coll_it != CollectionManager::get_instance().collection_symlinks.end()) {
ref_coll_name = actual_ref_coll_it->second;
}
if (referenced_ins.count(ref_coll_name) == 0) {
referenced_ins.emplace(ref_coll_name, spp::sparse_hash_map<std::string, std::string>());
}
referenced_ins[ref_coll_name].emplace(collection_name, field_name);
if (field.contains(fields::async_reference) &&
field[fields::async_reference].is_boolean() && field[fields::async_reference].get<bool>()) {
async_referenced_ins[ref_coll_name][ref_field_name].emplace(collection_name, field_name);
}
}
}
}
Option<bool> CollectionManager::load(const size_t collection_batch_size, const size_t document_batch_size) {
// This function must be idempotent, i.e. when called multiple times, must produce the same state without leaks
LOG(INFO) << "CollectionManager::load()";
Option<bool> auth_init_op = auth_manager.init(store, bootstrap_auth_key);
if(!auth_init_op.ok()) {
LOG(ERROR) << "Auth manager init failed, error=" << auth_init_op.error();
}
std::string next_collection_id_str;
StoreStatus next_coll_id_status = store->get(NEXT_COLLECTION_ID_KEY, next_collection_id_str);
if(next_coll_id_status == StoreStatus::ERROR) {
return Option<bool>(500, "Error while fetching the next collection id from the disk.");
}
if(next_coll_id_status == StoreStatus::FOUND) {
next_collection_id = (uint32_t) stoi(next_collection_id_str);
} else {
next_collection_id = 0;
}
// load aliases
std::string symlink_prefix_key = std::string(SYMLINK_PREFIX) + "_";
std::string upper_bound_key = std::string(SYMLINK_PREFIX) + "`"; // cannot inline this
rocksdb::Slice upper_bound(upper_bound_key);
rocksdb::Iterator* iter = store->scan(symlink_prefix_key, &upper_bound);
while(iter->Valid() && iter->key().starts_with(symlink_prefix_key)) {
std::vector<std::string> parts;
StringUtils::split(iter->key().ToString(), parts, symlink_prefix_key);
LOG(INFO) << "Loading symlink " << parts[0] << " to " << iter->value().ToString();
collection_symlinks[parts[0]] = iter->value().ToString();
iter->Next();
}
delete iter;
LOG(INFO) << "Loading upto " << collection_batch_size << " collections in parallel, "
<< document_batch_size << " documents at a time.";
std::vector<std::string> collection_meta_jsons;
store->scan_fill(std::string(Collection::COLLECTION_META_PREFIX) + "_",
std::string(Collection::COLLECTION_META_PREFIX) + "`",
collection_meta_jsons);
const size_t num_collections = collection_meta_jsons.size();
LOG(INFO) << "Found " << num_collections << " collection(s) on disk.";
ThreadPool loading_pool(collection_batch_size);
// Collection name -> Ref collection name -> Ref field name
std::map<std::string, spp::sparse_hash_map<std::string, std::string>> referenced_ins;
// Collection name -> field name -> {Ref collection name, Ref field name}
std::map<std::string, spp::sparse_hash_map<std::string, std::set<reference_pair_t>>> async_referenced_ins;
for (const auto &collection_meta_json: collection_meta_jsons) {
_populate_referenced_ins(collection_meta_json, referenced_ins, async_referenced_ins);
}
size_t num_processed = 0;
std::mutex m_process;
std::condition_variable cv_process;
std::string collection_name;
for(size_t coll_index = 0; coll_index < num_collections; coll_index++) {
const auto& collection_meta_json = collection_meta_jsons[coll_index];
nlohmann::json collection_meta = nlohmann::json::parse(collection_meta_json, nullptr, false);
if(collection_meta.is_discarded()) {
LOG(ERROR) << "Error while parsing collection meta, json: " << collection_meta_json;
return Option<bool>(500, "Error while parsing collection meta.");
}
collection_name = collection_meta[Collection::COLLECTION_NAME_KEY].get<std::string>();
if(collection_name != "6490_searchproductsprod_7qnnmget2ttehgxfus2ski2yxa") {
//continue;
}
auto captured_store = store;
loading_pool.enqueue([captured_store, num_collections, collection_meta, document_batch_size,
&m_process, &cv_process, &num_processed, &next_coll_id_status, quit = quit,
&referenced_ins, &async_referenced_ins, collection_name]() {
spp::sparse_hash_map<std::string, std::string> referenced_in;
auto const& it = referenced_ins.find(collection_name);
if (it != referenced_ins.end()) {
referenced_in = it->second;
}
spp::sparse_hash_map<std::string, std::set<reference_pair_t>> async_referenced_in;
auto const& async_it = async_referenced_ins.find(collection_name);
if (async_it != async_referenced_ins.end()) {
async_referenced_in = async_it->second;
}
//auto begin = std::chrono::high_resolution_clock::now();
Option<bool> res = load_collection(collection_meta, document_batch_size, next_coll_id_status, *quit,
referenced_in, async_referenced_in);
/*long long int timeMillis =
std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::high_resolution_clock::now() - begin).count();
LOG(INFO) << "Time taken for indexing: " << timeMillis << "ms";*/
if(!res.ok()) {
LOG(ERROR) << "Error while loading collection. " << res.error();
LOG(ERROR) << "Typesense is quitting.";
captured_store->close();
exit(1);
}
std::unique_lock<std::mutex> lock(m_process);
num_processed++;
auto& cm = CollectionManager::get_instance();
cv_process.notify_one();
size_t progress_modulo = std::max<size_t>(1, (num_collections / 10)); // every 10%
if(num_processed % progress_modulo == 0) {
LOG(INFO) << "Loaded " << num_processed << " collection(s) so far";
}
});
}
// wait for all collections to be loaded
std::unique_lock<std::mutex> lock_process(m_process);
cv_process.wait(lock_process, [&](){
return num_processed == num_collections;
// return num_processed == 1;
});
// load presets
std::string preset_prefix_key = std::string(PRESET_PREFIX) + "_";
std::string preset_upper_bound_key = std::string(PRESET_PREFIX) + "`"; // cannot inline this
rocksdb::Slice preset_upper_bound(preset_upper_bound_key);
iter = store->scan(preset_prefix_key, &preset_upper_bound);
while(iter->Valid() && iter->key().starts_with(preset_prefix_key)) {
std::vector<std::string> parts;
std::string preset_name = iter->key().ToString().substr(preset_prefix_key.size());
nlohmann::json preset_obj = nlohmann::json::parse(iter->value().ToString(), nullptr, false);
if(!preset_obj.is_discarded() && preset_obj.is_object()) {
preset_configs[preset_name] = preset_obj;
} else {
LOG(INFO) << "Invalid value for preset " << preset_name;
}
iter->Next();
}
delete iter;
//load stopwords
std::string stopword_prefix_key = std::string(StopwordsManager::STOPWORD_PREFIX) + "_";
std::string stopword_upper_bound_key = std::string(StopwordsManager::STOPWORD_PREFIX) + "`"; // cannot inline this
rocksdb::Slice stopword_upper_bound(stopword_upper_bound_key);
iter = store->scan(stopword_prefix_key, &stopword_upper_bound);
while(iter->Valid() && iter->key().starts_with(stopword_prefix_key)) {
std::vector<std::string> parts;
std::string stopword_name = iter->key().ToString().substr(stopword_prefix_key.size());
nlohmann::json stopword_obj = nlohmann::json::parse(iter->value().ToString(), nullptr, false);
if(!stopword_obj.is_discarded() && stopword_obj.is_object()) {
StopwordsManager::get_instance().upsert_stopword(stopword_name, stopword_obj);
} else {
LOG(INFO) << "Invalid object for stopword " << stopword_name;
}
iter->Next();
}
delete iter;
// restore query suggestions configs
std::vector<std::string> analytics_config_jsons;
store->scan_fill(AnalyticsManager::ANALYTICS_RULE_PREFIX,
std::string(AnalyticsManager::ANALYTICS_RULE_PREFIX) + "`",
analytics_config_jsons);
for(const auto& analytics_config_json: analytics_config_jsons) {
nlohmann::json analytics_config = nlohmann::json::parse(analytics_config_json);
AnalyticsManager::get_instance().create_rule(analytics_config, false, false);
}
LOG(INFO) << "Loaded " << num_collections << " collection(s).";
loading_pool.shutdown();
return Option<bool>(true);
}
void CollectionManager::dispose() {
std::unique_lock lock(mutex);
for(auto & name_collection: collections) {
delete name_collection.second;
name_collection.second = nullptr;
}
collections.clear();
collection_symlinks.clear();
preset_configs.clear();
referenced_in_backlog.clear();
store->close();
}
bool CollectionManager::auth_key_matches(const string& req_auth_key, const string& action,
const std::vector<collection_key_t>& collection_keys,
std::map<std::string, std::string>& params,
std::vector<nlohmann::json>& embedded_params_vec) const {
std::shared_lock lock(mutex);
// check with bootstrap auth key
if(bootstrap_auth_key == req_auth_key) {
return true;
}
// finally, check managed auth keys
return auth_manager.authenticate(action, collection_keys, params, embedded_params_vec);
}
Option<Collection*> CollectionManager::create_collection(const std::string& name,
const size_t num_memory_shards,
const std::vector<field> & fields,
const std::string& default_sorting_field,
const uint64_t created_at,
const std::string& fallback_field_type,
const std::vector<std::string>& symbols_to_index,
const std::vector<std::string>& token_separators,
const bool enable_nested_fields, std::shared_ptr<VQModel> model,
const nlohmann::json& metadata) {
std::unique_lock lock(mutex);
if(store->contains(Collection::get_meta_key(name))) {
return Option<Collection*>(409, std::string("A collection with name `") + name + "` already exists.");
}
// validated `fallback_field_type`
if(!fallback_field_type.empty()) {
field fallback_field_type_def("temp", fallback_field_type, false);
if(!fallback_field_type_def.has_valid_type()) {
return Option<Collection*>(400, std::string("Field `.*` has an invalid type."));
}
}
nlohmann::json fields_json = nlohmann::json::array();;
Option<bool> fields_json_op = field::fields_to_json_fields(fields, default_sorting_field, fields_json);
if(!fields_json_op.ok()) {
return Option<Collection*>(fields_json_op.code(), fields_json_op.error());
}
uint32_t new_coll_id = next_collection_id;
next_collection_id++;
nlohmann::json collection_meta;
collection_meta[Collection::COLLECTION_NAME_KEY] = name;
collection_meta[Collection::COLLECTION_ID_KEY] = new_coll_id;
collection_meta[Collection::COLLECTION_SEARCH_FIELDS_KEY] = fields_json;
collection_meta[Collection::COLLECTION_DEFAULT_SORTING_FIELD_KEY] = default_sorting_field;
collection_meta[Collection::COLLECTION_CREATED] = created_at;
collection_meta[Collection::COLLECTION_NUM_MEMORY_SHARDS] = num_memory_shards;
collection_meta[Collection::COLLECTION_FALLBACK_FIELD_TYPE] = fallback_field_type;
collection_meta[Collection::COLLECTION_SYMBOLS_TO_INDEX] = symbols_to_index;
collection_meta[Collection::COLLECTION_SEPARATORS] = token_separators;
collection_meta[Collection::COLLECTION_ENABLE_NESTED_FIELDS] = enable_nested_fields;
if(model != nullptr) {
collection_meta[Collection::COLLECTION_VOICE_QUERY_MODEL] = nlohmann::json::object();
collection_meta[Collection::COLLECTION_VOICE_QUERY_MODEL]["model_name"] = model->get_model_name();
}
if(!metadata.empty()) {
collection_meta[Collection::COLLECTION_METADATA] = metadata;
}
rocksdb::WriteBatch batch;
batch.Put(Collection::get_next_seq_id_key(name), StringUtils::serialize_uint32_t(0));
batch.Put(Collection::get_meta_key(name), collection_meta.dump());
batch.Put(NEXT_COLLECTION_ID_KEY, std::to_string(next_collection_id));
bool write_ok = store->batch_write(batch);
if(!write_ok) {
return Option<Collection*>(500, "Could not write to on-disk storage.");
}
lock.unlock();
Collection* new_collection = new Collection(name, new_coll_id, created_at, 0, store, fields,
default_sorting_field,
this->max_memory_ratio, fallback_field_type,
symbols_to_index, token_separators,
enable_nested_fields, model,
spp::sparse_hash_map<std::string, std::string>(),
metadata);
add_to_collections(new_collection);
lock.lock();
auto it = referenced_in_backlog.find(name);
if (it != referenced_in_backlog.end()) {
new_collection->add_referenced_ins(it->second);
referenced_in_backlog.erase(it);
}
return Option<Collection*>(new_collection);
}
Collection* CollectionManager::get_collection_unsafe(const std::string & collection_name) const {
if(collections.count(collection_name) != 0) {
return collections.at(collection_name);
}
// a symlink name takes lesser precedence over a real collection name
if(collection_symlinks.count(collection_name) != 0) {
const std::string & symlinked_name = collection_symlinks.at(collection_name);
if(collections.count(symlinked_name) != 0) {
return collections.at(symlinked_name);
}
}
return nullptr;
}
locked_resource_view_t<Collection> CollectionManager::get_collection(const std::string & collection_name) const {
std::shared_lock lock(mutex);
Collection* coll = get_collection_unsafe(collection_name);
return coll != nullptr ? locked_resource_view_t<Collection>(coll->get_lifecycle_mutex(), coll) :
locked_resource_view_t<Collection>(noop_coll_mutex, coll);
}
locked_resource_view_t<Collection> CollectionManager::get_collection_with_id(uint32_t collection_id) const {
std::shared_lock lock(mutex);
if(collection_id_names.count(collection_id) != 0) {
return get_collection(collection_id_names.at(collection_id));
}
return locked_resource_view_t<Collection>(mutex, nullptr);
}
Option<std::vector<Collection*>> CollectionManager::get_collections(uint32_t limit, uint32_t offset,
const std::vector<std::string>& api_key_collections) const {
std::shared_lock lock(mutex);
std::vector<Collection*> collection_vec;
auto collections_it = collections.begin();
if(offset > 0) {
if(offset >= collections.size()) {
return Option<std::vector<Collection*>>(400, "Invalid offset param.");
}
std::advance(collections_it, offset);
}
auto collections_end = collections.end();
if(limit > 0 && (offset + limit < collections.size())) {
collections_end = collections_it;
std::advance(collections_end, limit);
}
for (collections_it; collections_it != collections_end; ++collections_it) {
if(is_valid_api_key_collection(api_key_collections, collections_it->second)) {
collection_vec.push_back(collections_it->second);
}
}
if(offset == 0 && limit == 0) { //dont sort for paginated requests
std::sort(std::begin(collection_vec), std::end(collection_vec),
[](Collection *lhs, Collection *rhs) {
return lhs->get_collection_id() > rhs->get_collection_id();
});
}
return Option<std::vector<Collection*>>(collection_vec);
}
std::vector<std::string> CollectionManager::get_collection_names() const {
std::shared_lock lock(mutex);
std::vector<std::string> collection_vec;
for(const auto& kv: collections) {
collection_vec.push_back(kv.first);
}
return collection_vec;
}
Option<nlohmann::json> CollectionManager::drop_collection(const std::string& collection_name,
const bool remove_from_store,
const bool compact_store) {
std::shared_lock s_lock(mutex);
auto collection = get_collection_unsafe(collection_name);
if(collection == nullptr) {
return Option<nlohmann::json>(404, "No collection with name `" + collection_name + "` found.");
}
// to handle alias resolution
const std::string actual_coll_name = collection->get_name();
nlohmann::json collection_json = collection->get_summary_json();
if(remove_from_store) {
const std::string& del_key_prefix = std::to_string(collection->get_collection_id()) + "_";
const std::string& del_end_prefix = std::to_string(collection->get_collection_id()) + "`";
store->delete_range(del_key_prefix, del_end_prefix);
if(compact_store) {
store->flush();
store->compact_range(del_key_prefix, del_end_prefix);
}
// delete overrides
const std::string& del_override_prefix =
std::string(Collection::COLLECTION_OVERRIDE_PREFIX) + "_" + actual_coll_name + "_";
std::string upper_bound_key = std::string(Collection::COLLECTION_OVERRIDE_PREFIX) + "_" +
actual_coll_name + "`"; // cannot inline this
rocksdb::Slice upper_bound(upper_bound_key);
rocksdb::Iterator* iter = store->scan(del_override_prefix, &upper_bound);
while(iter->Valid() && iter->key().starts_with(del_override_prefix)) {
store->remove(iter->key().ToString());
iter->Next();
}
delete iter;
// delete synonyms
const std::string& del_synonym_prefix =
std::string(SynonymIndex::COLLECTION_SYNONYM_PREFIX) + "_" + actual_coll_name + "_";
std::string syn_upper_bound_key = std::string(SynonymIndex::COLLECTION_SYNONYM_PREFIX) + "_" +
actual_coll_name + "`"; // cannot inline this
rocksdb::Slice syn_upper_bound(syn_upper_bound_key);
iter = store->scan(del_synonym_prefix, &syn_upper_bound);
while(iter->Valid() && iter->key().starts_with(del_synonym_prefix)) {
store->remove(iter->key().ToString());
iter->Next();
}
delete iter;
store->remove(Collection::get_next_seq_id_key(actual_coll_name));
store->remove(Collection::get_meta_key(actual_coll_name));
}
s_lock.unlock();
std::unique_lock u_lock(mutex);
collections.erase(actual_coll_name);
collection_id_names.erase(collection->get_collection_id());
const auto& embedding_fields = collection->get_embedding_fields();
u_lock.unlock();
for(const auto& embedding_field : embedding_fields) {
const auto& model_name = embedding_field.embed[fields::model_config]["model_name"].get<std::string>();
process_embedding_field_delete(model_name);
}
// don't hold any collection manager locks here, since this can take some time
delete collection;
return Option<nlohmann::json>(collection_json);
}
uint32_t CollectionManager::get_next_collection_id() const {
return next_collection_id;
}
std::string CollectionManager::get_symlink_key(const std::string & symlink_name) {
return std::string(SYMLINK_PREFIX) + "_" + symlink_name;
}
spp::sparse_hash_map<std::string, std::string> CollectionManager::get_symlinks() const {
std::shared_lock lock(mutex);
return collection_symlinks;
}
Option<std::string> CollectionManager::resolve_symlink(const std::string & symlink_name) const {
std::shared_lock lock(mutex);
if(collection_symlinks.count(symlink_name) != 0) {
return Option<std::string>(collection_symlinks.at(symlink_name));
}
return Option<std::string>(404, "Not found.");
}
Option<bool> CollectionManager::upsert_symlink(const std::string & symlink_name, const std::string & collection_name) {
std::unique_lock lock(mutex);
if(collections.count(symlink_name) != 0) {
return Option<bool>(500, "Name `" + symlink_name + "` conflicts with an existing collection name.");
}
bool inserted = store->insert(get_symlink_key(symlink_name), collection_name);
if(!inserted) {
return Option<bool>(500, "Unable to insert into store.");
}
collection_symlinks[symlink_name] = collection_name;
return Option<bool>(true);
}
Option<bool> CollectionManager::delete_symlink(const std::string & symlink_name) {
std::unique_lock lock(mutex);
bool removed = store->remove(get_symlink_key(symlink_name));
if(!removed) {
return Option<bool>(500, "Unable to delete from store.");
}
collection_symlinks.erase(symlink_name);
return Option<bool>(true);
}
Store* CollectionManager::get_store() {
return store;
}
AuthManager& CollectionManager::getAuthManager() {
return auth_manager;
}
bool parse_multi_eval(const std::string& sort_by_str, uint32_t& index, std::vector<sort_by>& sort_fields) {
// FORMAT:
// _eval([ (<expr_1>): <score_1>, (<expr_2>): <score_2> ]):<order>
std::vector<std::string> eval_expressions;
std::vector<std::int64_t> scores;
while (true) {
if (index >= sort_by_str.size()) {
return false;
} else if (sort_by_str[index] == ']') {
break;
}
auto open_paren_pos = sort_by_str.find('(', index);
if (open_paren_pos == std::string::npos) {
return false;
}
index = open_paren_pos;
std::string eval_expr = "(";
int paren_count = 1;
while (++index < sort_by_str.size() && paren_count > 0) {
if (sort_by_str[index] == '(') {
paren_count++;
} else if (sort_by_str[index] == ')') {
paren_count--;
}
eval_expr += sort_by_str[index];
}
// Removing outer parenthesis.
eval_expr = eval_expr.substr(1, eval_expr.size() - 2);
if (paren_count != 0 || index >= sort_by_str.size()) {
return false;
}
while (sort_by_str[index] != ':' && ++index < sort_by_str.size());
if (index >= sort_by_str.size()) {
return false;
}
std::string score;
while (++index < sort_by_str.size() && !(sort_by_str[index] == ',' || sort_by_str[index] == ']')) {
score += sort_by_str[index];
}
StringUtils::trim(score);
if (!StringUtils::is_int64_t(score)) {
return false;
}
eval_expressions.emplace_back(eval_expr);
scores.emplace_back(std::stoll(score));
}
while (++index < sort_by_str.size() && sort_by_str[index] != ':');
if (index >= sort_by_str.size()) {
return false;
}
std::string order_str;
while (++index < sort_by_str.size() && sort_by_str[index] != ',') {
order_str += sort_by_str[index];
}
StringUtils::trim(order_str);
StringUtils::toupper(order_str);
sort_fields.emplace_back(eval_expressions, scores, order_str);
return true;
}
bool parse_eval(const std::string& sort_by_str, uint32_t& index, std::vector<sort_by>& sort_fields) {
// FORMAT:
// _eval(<expr>):<order>
std::string eval_expr = "(";
int paren_count = 1;
while (++index < sort_by_str.size() && paren_count > 0) {
if (sort_by_str[index] == '(') {
paren_count++;
} else if (sort_by_str[index] == ')') {
paren_count--;
}
eval_expr += sort_by_str[index];
}
// Removing outer parenthesis.
eval_expr = eval_expr.substr(1, eval_expr.size() - 2);
if (paren_count != 0 || index >= sort_by_str.size()) {
return false;
}
while (sort_by_str[index] != ':' && ++index < sort_by_str.size());
if (index >= sort_by_str.size()) {
return false;
}
std::string order_str;
while (++index < sort_by_str.size() && sort_by_str[index] != ',') {
order_str += sort_by_str[index];
}
StringUtils::trim(order_str);
StringUtils::toupper(order_str);
std::vector<std::string> eval_expressions = {eval_expr};
std::vector<int64_t> scores = {1};
sort_fields.emplace_back(eval_expressions, scores, order_str);
return true;
}
bool parse_nested_join_sort_by_str(const std::string& sort_by_str, uint32_t& index, const std::string& parent_coll_name,
std::vector<sort_by>& sort_fields) {
if (sort_by_str[index] != '$') {
return false;
}
std::string sort_field_expr;
char prev_non_space_char = '`';
auto open_paren_pos = sort_by_str.find('(', index);
if (open_paren_pos == std::string::npos) {
return false;
}
auto const& collection_name = sort_by_str.substr(index + 1, open_paren_pos - index - 1);
index = open_paren_pos;
int paren_count = 1;
while (++index < sort_by_str.size() && paren_count > 0) {
if (sort_by_str[index] == '(') {
paren_count++;
} else if (sort_by_str[index] == ')' && --paren_count == 0) {
break;
}
if (sort_by_str[index] == '$' && (prev_non_space_char == '`' || prev_non_space_char == ',')) {
// Nested join sort_by
// Process the sort fields provided up until now.
if (!sort_field_expr.empty()) {
sort_fields.emplace_back("$" + collection_name + "(" + sort_field_expr + ")", "");
auto& collection_names = sort_fields.back().nested_join_collection_names;
collection_names.insert(collection_names.begin(), parent_coll_name);
collection_names.emplace_back(collection_name);
sort_field_expr.clear();
}
auto prev_size = sort_fields.size();
if (!parse_nested_join_sort_by_str(sort_by_str, index, collection_name, sort_fields)) {
return false;
}
for (; prev_size < sort_fields.size(); prev_size++) {
auto& collection_names = sort_fields[prev_size].nested_join_collection_names;
collection_names.insert(collection_names.begin(), parent_coll_name);
}
continue;
}
sort_field_expr += sort_by_str[index];
if (sort_by_str[index] != ' ') {
prev_non_space_char = sort_by_str[index];
}
}
if (paren_count != 0) {
return false;
}
if (!sort_field_expr.empty()) {
sort_fields.emplace_back("$" + collection_name + "(" + sort_field_expr + ")", "");
auto& collection_names = sort_fields.back().nested_join_collection_names;
collection_names.insert(collection_names.begin(), parent_coll_name);
collection_names.emplace_back(collection_name);
}
// Skip the space in between the sort_by expressions.
while (index + 1 < sort_by_str.size() && (sort_by_str[index + 1] == ' ' || sort_by_str[index + 1] == ',')) {
index++;
}
return true;
}
bool CollectionManager::parse_sort_by_str(std::string sort_by_str, std::vector<sort_by>& sort_fields) {
std::string sort_field_expr;
char prev_non_space_char = '`';
int brace_open_count = 0;
for(uint32_t i=0; i < sort_by_str.size(); i++) {
if (sort_field_expr.empty()) {
if (sort_by_str[i] == '$') {
// Sort by reference field
auto open_paren_pos = sort_by_str.find('(', i);
if (open_paren_pos == std::string::npos) {
return false;
}
auto const& collection_name = sort_by_str.substr(i + 1, open_paren_pos - i - 1);
i = open_paren_pos;
int paren_count = 1;
while (++i < sort_by_str.size() && paren_count > 0) {
if (sort_by_str[i] == '(') {
paren_count++;
} else if (sort_by_str[i] == ')' && --paren_count == 0) {
break;
}
if (sort_by_str[i] == '$' && (prev_non_space_char == '`' || prev_non_space_char == ',')) {
// Nested join sort_by
// Process the sort fields provided up until now. Doing this step to maintain the order of sort_by
// as specified. Eg, `$Customers(product_price:DESC, $foo(bar:asc))` should result into
// {`$Customers(product_price:DESC)`, `$Customers($foo(bar:asc))`} and not the other way around.
if (!sort_field_expr.empty()) {
sort_fields.emplace_back("$" + collection_name + "(" + sort_field_expr + ")", "");
sort_field_expr.clear();
}
if (!parse_nested_join_sort_by_str(sort_by_str, i, collection_name, sort_fields)) {
return false;
}
continue;
}
sort_field_expr += sort_by_str[i];
if (sort_by_str[i] != ' ') {
prev_non_space_char = sort_by_str[i];
}
}
if (paren_count != 0) {
return false;
}
if (!sort_field_expr.empty()) {
sort_fields.emplace_back("$" + collection_name + "(" + sort_field_expr + ")", "");
sort_field_expr.clear();
}
// Skip the space in between the sort_by expressions.
while (i + 1 < sort_by_str.size() && (sort_by_str[i + 1] == ' ' || sort_by_str[i + 1] == ',')) {
i++;
}
continue;
} else if (sort_by_str.substr(i, 5) == sort_field_const::eval) {
// Optional filtering
auto open_paren_pos = sort_by_str.find('(', i);
if (open_paren_pos == std::string::npos) {
return false;
}
i = open_paren_pos;
while(sort_by_str[++i] == ' ');
if (sort_by_str[i] == '$' && sort_by_str.find('(', i) != std::string::npos) {
// Reference expression inside `_eval()`
return false;
}
auto result = sort_by_str[i] == '[' ? parse_multi_eval(sort_by_str, i, sort_fields) :
parse_eval(sort_by_str, --i, sort_fields);
if (!result) {
return false;
}
// Skip the space in between the sort_by expressions.
while (i + 1 < sort_by_str.size() && sort_by_str[i + 1] == ' ') {
i++;
}
continue;
}
}
if(i == sort_by_str.size()-1 || (sort_by_str[i] == ',' && !isdigit(prev_non_space_char)
&& brace_open_count == 0)) {
if(i == sort_by_str.size()-1) {
sort_field_expr += sort_by_str[i];
}
int colon_index = sort_field_expr.size()-1;
while(colon_index >= 0) {
if(sort_field_expr[colon_index] == ':') {
break;
}
colon_index--;
}
if(colon_index < 0 || colon_index+1 == sort_field_expr.size()) {
return false;
}
std::string order_str = sort_field_expr.substr(colon_index+1, sort_field_expr.size()-colon_index+1);
StringUtils::trim(order_str);
StringUtils::toupper(order_str);
std::string field_name = sort_field_expr.substr(0, colon_index);
StringUtils::trim(field_name);
sort_fields.emplace_back(field_name, order_str);
sort_field_expr = "";
// Skip the space in between the sort_by expressions.
while (i + 1 < sort_by_str.size() && sort_by_str[i + 1] == ' ') {
i++;
}
} else {
if(sort_by_str[i] == '(') {
brace_open_count++;
} else if(sort_by_str[i] == ')') {
brace_open_count--;
}
sort_field_expr += sort_by_str[i];
}
if(sort_by_str[i] != ' ') {
prev_non_space_char = sort_by_str[i];
}
}
return true;
}
Option<bool> add_unsigned_int_param(const std::string& param_name, const std::string& str_val, size_t* int_val) {
if(!StringUtils::is_uint32_t(str_val)) {
return Option<bool>(400, "Parameter `" + std::string(param_name) + "` must be an unsigned integer.");
}
*int_val = std::stoul(str_val);
return Option<bool>(true);
}
Option<bool> add_unsigned_int_list_param(const std::string& param_name, const std::string& str_val,
std::vector<uint32_t>* int_vals) {
std::vector<std::string> str_vals;
StringUtils::split(str_val, str_vals, ",");
int_vals->clear();
for(auto& str : str_vals) {
if(StringUtils::is_uint32_t(str)) {
int_vals->push_back((uint32_t)std::stoul(str));
} else {
return Option<bool>(400, "Parameter `" + param_name + "` is malformed.");
}
}
return Option<bool>(true);
}
Option<bool> CollectionManager::do_search(std::map<std::string, std::string>& req_params,
nlohmann::json& embedded_params,
std::string& results_json_str,
uint64_t start_ts) {
auto begin = std::chrono::high_resolution_clock::now();
const char *NUM_TYPOS = "num_typos";
const char *MIN_LEN_1TYPO = "min_len_1typo";
const char *MIN_LEN_2TYPO = "min_len_2typo";
const char *PREFIX = "prefix";
const char *DROP_TOKENS_THRESHOLD = "drop_tokens_threshold";
const char *TYPO_TOKENS_THRESHOLD = "typo_tokens_threshold";
const char *FILTER = "filter_by";
const char *QUERY = "q";
const char *QUERY_BY = "query_by";
const char *QUERY_BY_WEIGHTS = "query_by_weights";
const char *SORT_BY = "sort_by";
const char *FACET_BY = "facet_by";
const char *FACET_QUERY = "facet_query";
const char *FACET_QUERY_NUM_TYPOS = "facet_query_num_typos";
const char *MAX_FACET_VALUES = "max_facet_values";
const char *FACET_STRATEGY = "facet_strategy";
const char *FACET_RETURN_PARENT = "facet_return_parent";
const char *VECTOR_QUERY = "vector_query";
const char* REMOTE_EMBEDDING_TIMEOUT_MS = "remote_embedding_timeout_ms";
const char* REMOTE_EMBEDDING_NUM_TRIES = "remote_embedding_num_tries";
const char *GROUP_BY = "group_by";
const char *GROUP_LIMIT = "group_limit";
const char *GROUP_MISSING_VALUES = "group_missing_values";
const char *LIMIT_HITS = "limit_hits";
const char *PER_PAGE = "per_page";
const char *PAGE = "page";
const char *OFFSET = "offset";
const char *LIMIT = "limit";
const char *RANK_TOKENS_BY = "rank_tokens_by";
const char *INCLUDE_FIELDS = "include_fields";
const char *EXCLUDE_FIELDS = "exclude_fields";
const char *PINNED_HITS = "pinned_hits";
const char *HIDDEN_HITS = "hidden_hits";
const char *ENABLE_OVERRIDES = "enable_overrides";
const char *FILTER_CURATED_HITS = "filter_curated_hits";
const char *ENABLE_SYNONYMS = "enable_synonyms";
const char *MAX_CANDIDATES = "max_candidates";
const char *INFIX = "infix";
const char *MAX_EXTRA_PREFIX = "max_extra_prefix";
const char *MAX_EXTRA_SUFFIX = "max_extra_suffix";
// strings under this length will be fully highlighted, instead of showing a snippet of relevant portion
const char *SNIPPET_THRESHOLD = "snippet_threshold";
// the number of tokens that should surround the highlighted text
const char *HIGHLIGHT_AFFIX_NUM_TOKENS = "highlight_affix_num_tokens";
// list of fields which will be highlighted fully without snippeting
const char *HIGHLIGHT_FULL_FIELDS = "highlight_full_fields";
const char *HIGHLIGHT_FIELDS = "highlight_fields";
const char *HIGHLIGHT_START_TAG = "highlight_start_tag";
const char *HIGHLIGHT_END_TAG = "highlight_end_tag";
const char *PRIORITIZE_EXACT_MATCH = "prioritize_exact_match";
const char *PRIORITIZE_TOKEN_POSITION = "prioritize_token_position";
const char *PRE_SEGMENTED_QUERY = "pre_segmented_query";
const char *SEARCH_CUTOFF_MS = "search_cutoff_ms";
const char *EXHAUSTIVE_SEARCH = "exhaustive_search";
const char *SPLIT_JOIN_TOKENS = "split_join_tokens";
const char *TEXT_MATCH_TYPE = "text_match_type";
const char *ENABLE_HIGHLIGHT_V1 = "enable_highlight_v1";
const char *FACET_SAMPLE_PERCENT = "facet_sample_percent";
const char *FACET_SAMPLE_THRESHOLD = "facet_sample_threshold";
const char *CONVERSATION = "conversation";
const char *CONVERSATION_ID = "conversation_id";
const char *SYSTEM_PROMPT = "system_prompt";
const char *CONVERSATION_MODEL_ID = "conversation_model_id";
const char *DROP_TOKENS_MODE = "drop_tokens_mode";
const char *PRIORITIZE_NUM_MATCHING_FIELDS = "prioritize_num_matching_fields";
const char *OVERRIDE_TAGS = "override_tags";
const char *VOICE_QUERY = "voice_query";
const char *ENABLE_TYPOS_FOR_NUMERICAL_TOKENS = "enable_typos_for_numerical_tokens";
const char *ENABLE_TYPOS_FOR_ALPHA_NUMERICAL_TOKENS = "enable_typos_for_alpha_numerical_tokens";
const char *ENABLE_LAZY_FILTER = "enable_lazy_filter";
const char *MAX_FILTER_BY_CANDIDATES = "max_filter_by_candidates";
const char *SYNONYM_PREFIX = "synonym_prefix";
const char *SYNONYM_NUM_TYPOS = "synonym_num_typos";
//query time flag to enable analyitcs for that query
const char *ENABLE_ANALYTICS = "enable_analytics";
//for hybrid search, compute text_match_score for only vector search results and vector_distance for only text_match results
const char* RERANK_HYBRID_MATCHES = "rerank_hybrid_matches";
const char* VALIDATE_FIELD_NAMES = "validate_field_names";
// enrich params with values from embedded params
for(auto& item: embedded_params.items()) {
if(item.key() == "expires_at") {
continue;
}
// overwrite = true as embedded params have higher priority
if (!AuthManager::add_item_to_params(req_params, item, true)) {
return Option<bool>(400, "Error applying search parameters inside Scoped Search API key");
}
}
const auto preset_it = req_params.find("preset");
if(preset_it != req_params.end()) {
nlohmann::json preset;
const auto& preset_op = CollectionManager::get_instance().get_preset(preset_it->second, preset);
// NOTE: we merge only single preset configuration because multi ("searches") preset value replaces
// the request body directly before we reach this single search request function.
if(preset_op.ok() && !preset.contains("searches")) {
if(!preset.is_object()) {
return Option<bool>(400, "Search preset is not an object.");
}
for(const auto& search_item: preset.items()) {
// overwrite = false since req params will contain embedded params and so has higher priority
bool populated = AuthManager::add_item_to_params(req_params, search_item, false);
if(!populated) {
return Option<bool>(400, "One or more search parameters are malformed.");
}
}
}
}
//check if stopword set is supplied
const auto stopword_it = req_params.find("stopwords");
std::string stopwords_set="";
if(stopword_it != req_params.end()) {
stopwords_set = stopword_it->second;
if(!StopwordsManager::get_instance().stopword_exists(stopwords_set)) {
return Option<bool>(404, "Could not find the stopword set named `" + stopwords_set + "`.");
}
}
CollectionManager & collectionManager = CollectionManager::get_instance();
const std::string& orig_coll_name = req_params["collection"];
auto collection = collectionManager.get_collection(orig_coll_name);
if(collection == nullptr) {
return Option<bool>(404, "Not found.");
}
// check presence of mandatory params here
if(req_params.count(QUERY) == 0 && req_params.count(VOICE_QUERY) == 0) {
return Option<bool>(400, std::string("Parameter `") + QUERY + "` is required.");
}
// end check for mandatory params
const std::string& raw_query = req_params[QUERY];
std::vector<uint32_t> num_typos = {2};
size_t min_len_1typo = 4;
size_t min_len_2typo = 7;
std::vector<bool> prefixes = {true};
size_t drop_tokens_threshold = Index::DROP_TOKENS_THRESHOLD;
size_t typo_tokens_threshold = Index::TYPO_TOKENS_THRESHOLD;
std::vector<std::string> search_fields;
std::string filter_query;
std::vector<std::string> facet_fields;
std::vector<sort_by> sort_fields;
size_t per_page = 10;
size_t page = 0;
size_t offset = 0;
token_ordering token_order = NOT_SET;
std::vector<std::string> facet_return_parent;
std::string vector_query;
std::vector<std::string> include_fields_vec;
std::vector<std::string> exclude_fields_vec;
std::vector<ref_include_exclude_fields> ref_include_exclude_fields_vec;
spp::sparse_hash_set<std::string> include_fields;
spp::sparse_hash_set<std::string> exclude_fields;
size_t max_facet_values = 10;
std::string simple_facet_query;
size_t facet_query_num_typos = 2;
size_t snippet_threshold = 30;
size_t highlight_affix_num_tokens = 4;
std::string highlight_full_fields;
std::string pinned_hits_str;
std::string hidden_hits_str;
std::vector<std::string> group_by_fields;
size_t group_limit = 3;
bool group_missing_values = true;
std::string highlight_start_tag = "<mark>";
std::string highlight_end_tag = "</mark>";
std::vector<uint32_t> query_by_weights;
size_t limit_hits = 1000000;
bool prioritize_exact_match = true;
bool prioritize_token_position = false;
bool pre_segmented_query = false;
bool enable_overrides = true;
bool enable_synonyms = true;
bool synonym_prefix = false;
size_t synonym_num_typos = 0;
bool filter_curated_hits_option = false;
std::string highlight_fields;
bool exhaustive_search = false;
size_t search_cutoff_ms = 30 * 1000;
enable_t split_join_tokens = fallback;
size_t max_candidates = 0;
std::vector<enable_t> infixes;
size_t max_extra_prefix = INT16_MAX;
size_t max_extra_suffix = INT16_MAX;
bool enable_highlight_v1 = true;
text_match_type_t match_type = max_score;
bool enable_typos_for_numerical_tokens = true;
bool enable_typos_for_alpha_numerical_tokens = true;
bool enable_lazy_filter = Config::get_instance().get_enable_lazy_filter();
size_t max_filter_by_candidates = DEFAULT_FILTER_BY_CANDIDATES;
std::string facet_strategy = "automatic";
size_t remote_embedding_timeout_ms = 5000;
size_t remote_embedding_num_tries = 2;
size_t facet_sample_percent = 100;
size_t facet_sample_threshold = 0;
bool conversation = false;
std::string conversation_id;
std::string conversation_model_id;
std::string drop_tokens_mode_str = "right_to_left";
bool prioritize_num_matching_fields = true;
std::string override_tags;
std::string voice_query;
bool enable_analytics = true;
bool rerank_hybrid_matches = false;
bool validate_field_names = true;
std::unordered_map<std::string, size_t*> unsigned_int_values = {
{MIN_LEN_1TYPO, &min_len_1typo},
{MIN_LEN_2TYPO, &min_len_2typo},
{DROP_TOKENS_THRESHOLD, &drop_tokens_threshold},
{TYPO_TOKENS_THRESHOLD, &typo_tokens_threshold},
{MAX_FACET_VALUES, &max_facet_values},
{LIMIT_HITS, &limit_hits},
{SNIPPET_THRESHOLD, &snippet_threshold},
{HIGHLIGHT_AFFIX_NUM_TOKENS, &highlight_affix_num_tokens},
{PAGE, &page},
{OFFSET, &offset},
{PER_PAGE, &per_page},
{LIMIT, &per_page},
{GROUP_LIMIT, &group_limit},
{SEARCH_CUTOFF_MS, &search_cutoff_ms},
{MAX_EXTRA_PREFIX, &max_extra_prefix},
{MAX_EXTRA_SUFFIX, &max_extra_suffix},
{MAX_CANDIDATES, &max_candidates},
{FACET_QUERY_NUM_TYPOS, &facet_query_num_typos},
{FACET_SAMPLE_PERCENT, &facet_sample_percent},
{FACET_SAMPLE_THRESHOLD, &facet_sample_threshold},
{REMOTE_EMBEDDING_TIMEOUT_MS, &remote_embedding_timeout_ms},
{REMOTE_EMBEDDING_NUM_TRIES, &remote_embedding_num_tries},
{SYNONYM_NUM_TYPOS, &synonym_num_typos},
{MAX_FILTER_BY_CANDIDATES, &max_filter_by_candidates}
};
std::unordered_map<std::string, std::string*> str_values = {
{FILTER, &filter_query},
{VECTOR_QUERY, &vector_query},
{FACET_QUERY, &simple_facet_query},
{HIGHLIGHT_FIELDS, &highlight_fields},
{HIGHLIGHT_FULL_FIELDS, &highlight_full_fields},
{HIGHLIGHT_START_TAG, &highlight_start_tag},
{HIGHLIGHT_END_TAG, &highlight_end_tag},
{PINNED_HITS, &pinned_hits_str},
{HIDDEN_HITS, &hidden_hits_str},
{CONVERSATION_ID, &conversation_id},
{DROP_TOKENS_MODE, &drop_tokens_mode_str},
{OVERRIDE_TAGS, &override_tags},
{CONVERSATION_MODEL_ID, &conversation_model_id},
{VOICE_QUERY, &voice_query},
{FACET_STRATEGY, &facet_strategy},
};
std::unordered_map<std::string, bool*> bool_values = {
{PRIORITIZE_EXACT_MATCH, &prioritize_exact_match},
{PRIORITIZE_TOKEN_POSITION, &prioritize_token_position},
{PRE_SEGMENTED_QUERY, &pre_segmented_query},
{EXHAUSTIVE_SEARCH, &exhaustive_search},
{ENABLE_OVERRIDES, &enable_overrides},
{ENABLE_HIGHLIGHT_V1, &enable_highlight_v1},
{CONVERSATION, &conversation},
{PRIORITIZE_NUM_MATCHING_FIELDS, &prioritize_num_matching_fields},
{GROUP_MISSING_VALUES, &group_missing_values},
{ENABLE_TYPOS_FOR_NUMERICAL_TOKENS, &enable_typos_for_numerical_tokens},
{ENABLE_SYNONYMS, &enable_synonyms},
{SYNONYM_PREFIX, &synonym_prefix},
{ENABLE_LAZY_FILTER, &enable_lazy_filter},
{ENABLE_TYPOS_FOR_ALPHA_NUMERICAL_TOKENS, &enable_typos_for_alpha_numerical_tokens},
{FILTER_CURATED_HITS, &filter_curated_hits_option},
{ENABLE_ANALYTICS, &enable_analytics},
{RERANK_HYBRID_MATCHES, &rerank_hybrid_matches},
{VALIDATE_FIELD_NAMES, &validate_field_names}
};
std::unordered_map<std::string, std::vector<std::string>*> str_list_values = {
{QUERY_BY, &search_fields},
{FACET_BY, &facet_fields},
{GROUP_BY, &group_by_fields},
{INCLUDE_FIELDS, &include_fields_vec},
{EXCLUDE_FIELDS, &exclude_fields_vec},
{FACET_RETURN_PARENT, &facet_return_parent},
};
std::unordered_map<std::string, std::vector<uint32_t>*> int_list_values = {
{QUERY_BY_WEIGHTS, &query_by_weights},
{NUM_TYPOS, &num_typos},
};
for(const auto& kv: req_params) {
const std::string& key = kv.first;
const std::string& val = kv.second;
if(key == PREFIX) {
if(val == "true" || val == "false") {
prefixes = {(val == "true")};
} else {
prefixes.clear();
std::vector<std::string> prefix_str;
StringUtils::split(val, prefix_str, ",");
for(auto& prefix_s : prefix_str) {
prefixes.push_back(prefix_s == "true");
}
}
}
else if(key == SPLIT_JOIN_TOKENS) {
if(val == "false") {
split_join_tokens = off;
} else if(val == "true") {
split_join_tokens = fallback;
} else {
auto enable_op = magic_enum::enum_cast<enable_t>(val);
if(enable_op.has_value()) {
split_join_tokens = enable_op.value();
}
}
}
else if(key == TEXT_MATCH_TYPE) {
auto match_op = magic_enum::enum_cast<text_match_type_t>(val);
if(match_op.has_value()) {
match_type = match_op.value();
}
}
else {
auto find_int_it = unsigned_int_values.find(key);
if(find_int_it != unsigned_int_values.end()) {
const auto& op = add_unsigned_int_param(key, val, find_int_it->second);
if(!op.ok()) {
return op;
}
continue;
}
auto find_str_it = str_values.find(key);
if(find_str_it != str_values.end()) {
*find_str_it->second = val;
continue;
}
auto find_bool_it = bool_values.find(key);
if(find_bool_it != bool_values.end()) {
*find_bool_it->second = (val == "true");
continue;
}
auto find_str_list_it = str_list_values.find(key);
if(find_str_list_it != str_list_values.end()) {
if(key == FACET_BY){
StringUtils::split_facet(val, *find_str_list_it->second);
}
else if(key == INCLUDE_FIELDS || key == EXCLUDE_FIELDS){
auto op = StringUtils::split_include_exclude_fields(val, *find_str_list_it->second);
if (!op.ok()) {
return op;
}
}
else{
StringUtils::split(val, *find_str_list_it->second, ",");
}
continue;
}
auto find_int_list_it = int_list_values.find(key);
if(find_int_list_it != int_list_values.end()) {
add_unsigned_int_list_param(key, val, find_int_list_it->second);
continue;
}
}
}
// special defaults
if(!req_params[FACET_QUERY].empty() && req_params.count(PER_PAGE) == 0) {
// for facet query we will set per_page to zero if it is not explicitly overridden
per_page = 0;
}
auto initialize_op = Join::initialize_ref_include_exclude_fields_vec(filter_query, include_fields_vec, exclude_fields_vec,
ref_include_exclude_fields_vec);
if (!initialize_op.ok()) {
return initialize_op;
}
include_fields.insert(include_fields_vec.begin(), include_fields_vec.end());
exclude_fields.insert(exclude_fields_vec.begin(), exclude_fields_vec.end());
bool parsed_sort_by = parse_sort_by_str(req_params[SORT_BY], sort_fields);
if(!parsed_sort_by) {
return Option<bool>(400,std::string("Parameter `") + SORT_BY + "` is malformed.");
}
if(sort_fields.size() > 3) {
return Option<bool>(400, "Only upto 3 sort fields are allowed.");
}
if(req_params.count(INFIX) != 0) {
std::vector<std::string> infix_strs;
StringUtils::split(req_params[INFIX], infix_strs, ",");
for(auto& infix_str: infix_strs) {
auto infix_op = magic_enum::enum_cast<enable_t>(infix_str);
if(infix_op.has_value()) {
infixes.push_back(infix_op.value());
}
}
} else {
infixes.push_back(off);
}
if(req_params.count(RANK_TOKENS_BY) != 0) {
StringUtils::toupper(req_params[RANK_TOKENS_BY]);
if (req_params[RANK_TOKENS_BY] == "DEFAULT_SORTING_FIELD") {
token_order = MAX_SCORE;
} else if(req_params[RANK_TOKENS_BY] == "FREQUENCY") {
token_order = FREQUENCY;
}
}
if(!max_candidates) {
max_candidates = exhaustive_search ? Index::COMBINATION_MAX_LIMIT :
(collection->get_num_documents() < 500000 ? Index::NUM_CANDIDATES_DEFAULT_MAX :
Index::NUM_CANDIDATES_DEFAULT_MIN);
}
Option<nlohmann::json> result_op = collection->search(raw_query, search_fields, filter_query, facet_fields,
sort_fields, num_typos,
per_page,
page,
token_order, prefixes, drop_tokens_threshold,
include_fields, exclude_fields,
max_facet_values,
simple_facet_query,
snippet_threshold,
highlight_affix_num_tokens,
highlight_full_fields,
typo_tokens_threshold,
pinned_hits_str,
hidden_hits_str,
group_by_fields,
group_limit,
highlight_start_tag,
highlight_end_tag,
query_by_weights,
limit_hits,
prioritize_exact_match,
pre_segmented_query,
enable_overrides,
highlight_fields,
exhaustive_search,
search_cutoff_ms,
min_len_1typo,
min_len_2typo,
split_join_tokens,
max_candidates,
infixes,
max_extra_prefix,
max_extra_suffix,
facet_query_num_typos,
filter_curated_hits_option,
prioritize_token_position,
vector_query,
enable_highlight_v1,
start_ts,
match_type,
facet_sample_percent,
facet_sample_threshold,
offset,
facet_strategy,
remote_embedding_timeout_ms,
remote_embedding_num_tries,
stopwords_set,
facet_return_parent,
ref_include_exclude_fields_vec,
drop_tokens_mode_str,
prioritize_num_matching_fields,
group_missing_values,
conversation,
conversation_model_id,
conversation_id,
override_tags,
voice_query,
enable_typos_for_numerical_tokens,
enable_synonyms,
synonym_prefix,
synonym_num_typos,
enable_lazy_filter,
enable_typos_for_alpha_numerical_tokens,
max_filter_by_candidates,
rerank_hybrid_matches,
validate_field_names);
uint64_t timeMillis = std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::high_resolution_clock::now() - begin).count();
AppMetrics::get_instance().increment_count(AppMetrics::SEARCH_LABEL, 1);
AppMetrics::get_instance().increment_duration(AppMetrics::SEARCH_LABEL, timeMillis);
if(!result_op.ok()) {
return Option<bool>(result_op.code(), result_op.error());
}
nlohmann::json result = result_op.get();
if(Config::get_instance().get_enable_search_analytics()) {
if(enable_analytics && result.contains("found")) {
std::string analytics_query = Tokenizer::normalize_ascii_no_spaces(raw_query);
if(result["found"].get<size_t>() != 0) {
const std::string& expanded_query = Tokenizer::normalize_ascii_no_spaces(
result["request_params"]["first_q"].get<std::string>());
AnalyticsManager::get_instance().add_suggestion(orig_coll_name, analytics_query, expanded_query,
true, req_params["x-typesense-user-id"]);
} else {
AnalyticsManager::get_instance().add_nohits_query(orig_coll_name, analytics_query,
true, req_params["x-typesense-user-id"]);
}
}
}
if(exclude_fields.count("search_time_ms") == 0) {
result["search_time_ms"] = timeMillis;
}
if(page == 0 && offset != 0) {
result["offset"] = offset;
} else {
result["page"] = (page == 0) ? 1 : page;
}
results_json_str = result.dump(-1, ' ', false, nlohmann::detail::error_handler_t::ignore);
//LOG(INFO) << "Time taken: " << timeMillis << "ms";
return Option<bool>(true);
}
ThreadPool* CollectionManager::get_thread_pool() const {
return thread_pool;
}
Option<nlohmann::json> CollectionManager::get_collection_summaries(uint32_t limit, uint32_t offset,
const std::vector<std::string>& exclude_fields,
const std::vector<std::string>& api_key_collections) const {
std::shared_lock lock(mutex);
auto collections_op = get_collections(limit, offset, api_key_collections);
if(!collections_op.ok()) {
return Option<nlohmann::json>(collections_op.code(), collections_op.error());
}
std::vector<Collection*> colls = collections_op.get();
nlohmann::json json_summaries = nlohmann::json::array();
for(Collection* collection: colls) {
nlohmann::json collection_json = collection->get_summary_json();
for(const auto& exclude_field: exclude_fields) {
collection_json.erase(exclude_field);
}
json_summaries.push_back(collection_json);
}
return Option<nlohmann::json>(json_summaries);
}
Option<Collection*> CollectionManager::create_collection(nlohmann::json& req_json) {
const char* NUM_MEMORY_SHARDS = "num_memory_shards";
const char* SYMBOLS_TO_INDEX = "symbols_to_index";
const char* TOKEN_SEPARATORS = "token_separators";
const char* ENABLE_NESTED_FIELDS = "enable_nested_fields";
const char* DEFAULT_SORTING_FIELD = "default_sorting_field";
const char* METADATA = "metadata";
// validate presence of mandatory fields
if(req_json.count("name") == 0) {
return Option<Collection*>(400, "Parameter `name` is required.");
}
if(!req_json["name"].is_string() || req_json["name"].get<std::string>().empty()) {
return Option<Collection*>(400, "Parameter `name` must be a non-empty string.");
}
if(req_json.count(NUM_MEMORY_SHARDS) == 0) {
req_json[NUM_MEMORY_SHARDS] = CollectionManager::DEFAULT_NUM_MEMORY_SHARDS;
}
if(req_json.count(SYMBOLS_TO_INDEX) == 0) {
req_json[SYMBOLS_TO_INDEX] = std::vector<std::string>();
}
if(req_json.count(TOKEN_SEPARATORS) == 0) {
req_json[TOKEN_SEPARATORS] = std::vector<std::string>();
}
if(req_json.count(ENABLE_NESTED_FIELDS) == 0) {
req_json[ENABLE_NESTED_FIELDS] = false;
}
if(req_json.count("fields") == 0) {
return Option<Collection*>(400, "Parameter `fields` is required.");
}
if(req_json.count(DEFAULT_SORTING_FIELD) == 0) {
req_json[DEFAULT_SORTING_FIELD] = "";
}
if(!req_json[DEFAULT_SORTING_FIELD].is_string()) {
return Option<Collection*>(400, std::string("`") + DEFAULT_SORTING_FIELD +
"` should be a string. It should be the name of an int32/float field.");
}
if(!req_json[NUM_MEMORY_SHARDS].is_number_unsigned()) {
return Option<Collection*>(400, std::string("`") + NUM_MEMORY_SHARDS + "` should be a positive integer.");
}
if(!req_json[SYMBOLS_TO_INDEX].is_array()) {
return Option<Collection*>(400, std::string("`") + SYMBOLS_TO_INDEX + "` should be an array of character symbols.");
}
if(!req_json[TOKEN_SEPARATORS].is_array()) {
return Option<Collection*>(400, std::string("`") + TOKEN_SEPARATORS + "` should be an array of character symbols.");
}
if(!req_json[ENABLE_NESTED_FIELDS].is_boolean()) {
return Option<Collection*>(400, std::string("`") + ENABLE_NESTED_FIELDS + "` should be a boolean.");
}
for (auto it = req_json[SYMBOLS_TO_INDEX].begin(); it != req_json[SYMBOLS_TO_INDEX].end(); ++it) {
if(!it->is_string() || it->get<std::string>().size() != 1 ) {
return Option<Collection*>(400, std::string("`") + SYMBOLS_TO_INDEX + "` should be an array of character symbols.");
}
}
for (auto it = req_json[TOKEN_SEPARATORS].begin(); it != req_json[TOKEN_SEPARATORS].end(); ++it) {
if(!it->is_string() || it->get<std::string>().size() != 1 ) {
return Option<Collection*>(400, std::string("`") + TOKEN_SEPARATORS + "` should be an array of character symbols.");
}
}
size_t num_memory_shards = req_json[NUM_MEMORY_SHARDS].get<size_t>();
if(num_memory_shards == 0) {
return Option<Collection*>(400, std::string("`") + NUM_MEMORY_SHARDS + "` should be a positive integer.");
}
// field specific validation
if(!req_json["fields"].is_array() || req_json["fields"].empty()) {
return Option<Collection *>(400, "The `fields` value should be an array of objects containing "
"`name`, `type` and optionally, `facet` properties.");
}
if(req_json.count(METADATA) != 0) {
if(!req_json[METADATA].is_object()) {
return Option<Collection *>(400, "The `metadata` value should be an object.");
}
} else {
req_json[METADATA] = {};
}
const std::string& default_sorting_field = req_json[DEFAULT_SORTING_FIELD].get<std::string>();
if(default_sorting_field == "id") {
return Option<Collection *>(400, "Invalid `default_sorting_field` value: cannot be `id`.");
}
std::string fallback_field_type;
std::vector<field> fields;
auto parse_op = field::json_fields_to_fields(req_json[ENABLE_NESTED_FIELDS].get<bool>(),
req_json["fields"], fallback_field_type, fields);
if(!parse_op.ok()) {
return Option<Collection*>(parse_op.code(), parse_op.error());
}
std::shared_ptr<VQModel> model = nullptr;
if(req_json.count(Collection::COLLECTION_VOICE_QUERY_MODEL) != 0) {
const nlohmann::json& voice_query_model = req_json[Collection::COLLECTION_VOICE_QUERY_MODEL];
if(!voice_query_model.is_object()) {
return Option<Collection*>(400, "Parameter `voice_query_model` must be an object.");
}
if(voice_query_model.count("model_name") == 0) {
return Option<Collection*>(400, "Parameter `voice_query_model.model_name` is required.");
}
if(!voice_query_model["model_name"].is_string() || voice_query_model["model_name"].get<std::string>().empty()) {
return Option<Collection*>(400, "Parameter `voice_query_model.model_name` must be a non-empty string.");
}
const std::string& model_name = voice_query_model["model_name"].get<std::string>();
auto model_res = VQModelManager::get_instance().validate_and_init_model(model_name);
if(!model_res.ok()) {
LOG(ERROR) << "Error while loading voice query model: " << model_res.error();
return Option<Collection*>(model_res.code(), model_res.error());
} else {
model = model_res.get();
}
}
const auto created_at = static_cast<uint64_t>(std::time(nullptr));
return CollectionManager::get_instance().create_collection(req_json["name"], num_memory_shards,
fields, default_sorting_field, created_at,
fallback_field_type,
req_json[SYMBOLS_TO_INDEX],
req_json[TOKEN_SEPARATORS],
req_json[ENABLE_NESTED_FIELDS],
model, req_json[METADATA]);
}
Option<bool> CollectionManager::load_collection(const nlohmann::json &collection_meta,
const size_t batch_size,
const StoreStatus& next_coll_id_status,
const std::atomic<bool>& quit,
spp::sparse_hash_map<std::string, std::string>& referenced_in,
spp::sparse_hash_map<std::string, std::set<reference_pair_t>>& async_referenced_ins) {
auto& cm = CollectionManager::get_instance();
if(!collection_meta.contains(Collection::COLLECTION_NAME_KEY)) {
return Option<bool>(500, "No collection name in collection meta: " + collection_meta.dump());
}
if(!collection_meta[Collection::COLLECTION_NAME_KEY].is_string()) {
LOG(ERROR) << collection_meta[Collection::COLLECTION_NAME_KEY];
LOG(ERROR) << Collection::COLLECTION_NAME_KEY;
LOG(ERROR) << "";
}
const std::string & this_collection_name = collection_meta[Collection::COLLECTION_NAME_KEY].get<std::string>();
std::string collection_next_seq_id_str;
StoreStatus next_seq_id_status = cm.store->get(Collection::get_next_seq_id_key(this_collection_name),
collection_next_seq_id_str);
if(next_seq_id_status == StoreStatus::ERROR) {
LOG(ERROR) << "Error while fetching next sequence ID for " << this_collection_name;
return Option<bool>(500, "Error while fetching collection's next sequence ID from the disk for "
"`" + this_collection_name + "`");
}
if(next_seq_id_status == StoreStatus::NOT_FOUND && next_coll_id_status == StoreStatus::FOUND) {
LOG(ERROR) << "collection's next sequence ID is missing";
return Option<bool>(500, "Next collection id was found, but collection's next sequence ID is missing for "
"`" + this_collection_name + "`");
}
uint32_t collection_next_seq_id = next_seq_id_status == StoreStatus::NOT_FOUND ? 0 :
StringUtils::deserialize_uint32_t(collection_next_seq_id_str);
{
std::shared_lock lock(cm.mutex);
Collection *existing_collection = cm.get_collection_unsafe(this_collection_name);
if(existing_collection != nullptr) {
// To maintain idempotency, if the collection already exists in-memory, drop it from memory
LOG(WARNING) << "Dropping duplicate collection " << this_collection_name << " before loading it again.";
lock.unlock();
cm.drop_collection(this_collection_name, false);
}
}
Collection* collection = init_collection(collection_meta, collection_next_seq_id, cm.store, 1.0f, referenced_in,
async_referenced_ins);
LOG(INFO) << "Loading collection " << collection->get_name();
// initialize overrides
std::vector<std::string> collection_override_jsons;
cm.store->scan_fill(Collection::get_override_key(this_collection_name, ""),
std::string(Collection::COLLECTION_OVERRIDE_PREFIX) + "_" + this_collection_name + "`",
collection_override_jsons);
for(const auto & collection_override_json: collection_override_jsons) {
nlohmann::json collection_override = nlohmann::json::parse(collection_override_json);
override_t override;
auto parse_op = override_t::parse(collection_override, "", override, "", collection->get_symbols_to_index(),
collection->get_token_separators());
if(parse_op.ok()) {
collection->add_override(override, false);
} else {
LOG(ERROR) << "Skipping loading of override: " << parse_op.error();
}
}
// initialize synonyms
std::vector<std::string> collection_synonym_jsons;
cm.store->scan_fill(SynonymIndex::get_synonym_key(this_collection_name, ""),
std::string(SynonymIndex::COLLECTION_SYNONYM_PREFIX) + "_" + this_collection_name + "`",
collection_synonym_jsons);
for(const auto & collection_synonym_json: collection_synonym_jsons) {
nlohmann::json collection_synonym = nlohmann::json::parse(collection_synonym_json);
collection->add_synonym(collection_synonym, false);
}
// Fetch records from the store and re-create memory index
const std::string seq_id_prefix = collection->get_seq_id_collection_prefix();
std::string upper_bound_key = collection->get_seq_id_collection_prefix() + "`"; // cannot inline this
rocksdb::Slice upper_bound(upper_bound_key);
rocksdb::Iterator* iter = cm.store->scan(seq_id_prefix, &upper_bound);
std::unique_ptr<rocksdb::Iterator> iter_guard(iter);
std::vector<index_record> index_records;
size_t num_found_docs = 0;
size_t num_valid_docs = 0;
size_t num_indexed_docs = 0;
size_t batch_doc_str_size = 0;
auto begin = std::chrono::high_resolution_clock::now();
while(iter->Valid() && iter->key().starts_with(seq_id_prefix)) {
num_found_docs++;
const uint32_t seq_id = Collection::get_seq_id_from_key(iter->key().ToString());
nlohmann::json document;
const std::string& doc_string = iter->value().ToString();
try {
document = nlohmann::json::parse(doc_string);
} catch(const std::exception& e) {
LOG(ERROR) << "JSON error: " << e.what();
return Option<bool>(400, "Bad JSON.");
}
batch_doc_str_size += doc_string.size();
if(collection->get_enable_nested_fields()) {
std::vector<field> flattened_fields;
field::flatten_doc(document, collection->get_nested_fields(), {}, true, flattened_fields);
}
auto dirty_values = DIRTY_VALUES::COERCE_OR_DROP;
num_valid_docs++;
index_records.emplace_back(index_record(0, seq_id, document, CREATE, dirty_values));
// Peek and check for last record right here so that we handle batched indexing correctly
// Without doing this, the "last batch" would have to be indexed outside the loop.
iter->Next();
bool last_record = !(iter->Valid() && iter->key().starts_with(seq_id_prefix));
// if expected memory usage exceeds 250M, we index the accumulated set without caring about batch size
bool exceeds_batch_mem_threshold = ((batch_doc_str_size * 7) > (250 * 1014 * 1024));
// batch must match atleast the number of shards
if(exceeds_batch_mem_threshold || (num_valid_docs % batch_size == 0) || last_record) {
size_t num_records = index_records.size();
size_t num_indexed = collection->batch_index_in_memory(index_records, 200, 60000, 2, false);
batch_doc_str_size = 0;
if(num_indexed != num_records) {
const std::string& index_error = get_first_index_error(index_records);
if(!index_error.empty()) {
// for now, we will just ignore errors during loading of collection
//return Option<bool>(400, index_error);
}
}
index_records.clear();
num_indexed_docs += num_indexed;
}
if(num_found_docs % ((1 << 14)) == 0) {
// having a cheaper higher layer check to prevent checking clock too often
auto time_elapsed = std::chrono::duration_cast<std::chrono::seconds>(
std::chrono::high_resolution_clock::now() - begin).count();
if(time_elapsed > 30) {
begin = std::chrono::high_resolution_clock::now();
LOG(INFO) << "Loaded " << num_found_docs << " documents from " << collection->get_name() << " so far.";
}
}
if(quit) {
break;
}
}
cm.add_to_collections(collection);
LOG(INFO) << "Indexed " << num_indexed_docs << "/" << num_found_docs
<< " documents into collection " << collection->get_name();
return Option<bool>(true);
}
spp::sparse_hash_map<std::string, nlohmann::json> CollectionManager::get_presets() const {
std::shared_lock lock(mutex);
return preset_configs;
}
Option<bool> CollectionManager::get_preset(const string& preset_name, nlohmann::json& preset) const {
std::shared_lock lock(mutex);
const auto& it = preset_configs.find(preset_name);
if(it != preset_configs.end()) {
preset = it->second;
return Option<bool>(true);
}
return Option<bool>(404, "Not found.");
}
Option<bool> CollectionManager::upsert_preset(const string& preset_name, const nlohmann::json& preset_config) {
std::unique_lock lock(mutex);
bool inserted = store->insert(get_preset_key(preset_name), preset_config.dump());
if(!inserted) {
return Option<bool>(500, "Unable to insert into store.");
}
preset_configs[preset_name] = preset_config;
return Option<bool>(true);
}
std::string CollectionManager::get_preset_key(const string& preset_name) {
return std::string(PRESET_PREFIX) + "_" + preset_name;
}
Option<bool> CollectionManager::delete_preset(const string& preset_name) {
std::unique_lock lock(mutex);
bool removed = store->remove(get_preset_key(preset_name));
if(!removed) {
return Option<bool>(500, "Unable to delete from store.");
}
preset_configs.erase(preset_name);
return Option<bool>(true);
}
Option<Collection*> CollectionManager::clone_collection(const string& existing_name, const nlohmann::json& req_json) {
std::shared_lock lock(mutex);
if(collections.count(existing_name) == 0) {
return Option<Collection*>(400, "Collection with name `" + existing_name + "` not found.");
}
if(req_json.count("name") == 0 || !req_json["name"].is_string()) {
return Option<Collection*>(400, "Collection name must be provided.");
}
const std::string& new_name = req_json["name"].get<std::string>();
if(collections.count(new_name) != 0) {
return Option<Collection*>(400, "Collection with name `" + new_name + "` already exists.");
}
Collection* existing_coll = collections[existing_name];
std::vector<std::string> symbols_to_index;
std::vector<std::string> token_separators;
for(auto c: existing_coll->get_symbols_to_index()) {
symbols_to_index.emplace_back(1, c);
}
for(auto c: existing_coll->get_token_separators()) {
token_separators.emplace_back(1, c);
}
lock.unlock();
auto coll_create_op = create_collection(new_name, DEFAULT_NUM_MEMORY_SHARDS, existing_coll->get_fields(),
existing_coll->get_default_sorting_field(), static_cast<uint64_t>(std::time(nullptr)),
existing_coll->get_fallback_field_type(), symbols_to_index, token_separators,
existing_coll->get_enable_nested_fields(), existing_coll->get_vq_model());
lock.lock();
if(!coll_create_op.ok()) {
return Option<Collection*>(coll_create_op.code(), coll_create_op.error());
}
Collection* new_coll = coll_create_op.get();
// copy synonyms
auto synonyms = existing_coll->get_synonyms().get();
for(const auto& synonym: synonyms) {
new_coll->get_synonym_index()->add_synonym(new_name, *synonym.second);
}
// copy overrides
auto overrides = existing_coll->get_overrides().get();
for(const auto& kv: overrides) {
new_coll->add_override(*kv.second);
}
return Option<Collection*>(new_coll);
}
void CollectionManager::add_referenced_in_backlog(const std::string& collection_name, reference_info_t&& ref_info) {
std::shared_lock lock(mutex);
referenced_in_backlog[collection_name].insert(ref_info);
}
std::map<std::string, std::set<reference_info_t>> CollectionManager::_get_referenced_in_backlog() const {
std::shared_lock lock(mutex);
return referenced_in_backlog;
}
void CollectionManager::process_embedding_field_delete(const std::string& model_name) {
std::shared_lock lock(mutex);
bool found = false;
for(const auto& collection: collections) {
// will be deadlock if we try to acquire lock on collection here
// caller of this function should have already acquired lock on collection
const auto& embedding_fields = collection.second->get_embedding_fields_unsafe();
for(const auto& embedding_field: embedding_fields) {
if(embedding_field.embed.count(fields::model_config) != 0) {
const auto& model_config = embedding_field.embed[fields::model_config];
if(model_config["model_name"].get<std::string>() == model_name) {
found = true;
break;
}
}
}
}
if(!found) {
LOG(INFO) << "Deleting text embedder: " << model_name;
EmbedderManager::get_instance().delete_text_embedder(model_name);
EmbedderManager::get_instance().delete_image_embedder(model_name);
}
}
std::unordered_set<std::string> CollectionManager::get_collection_references(const std::string& coll_name) {
std::shared_lock lock(mutex);
std::unordered_set<std::string> references;
auto it = collections.find(coll_name);
if (it == collections.end()) {
return references;
}
for (const auto& item: it->second->get_reference_fields()) {
const auto& ref_pair = item.second;
references.insert(ref_pair.collection);
}
return references;
}
bool CollectionManager::is_valid_api_key_collection(const std::vector<std::string>& api_collections, Collection* coll) const {
for(const auto& api_collection : api_collections) {
if(api_collection == "*") {
return true;
}
const std::regex pattern(api_collection);
if(std::regex_match(coll->get_name(), pattern)) {
return true;
}
}
return api_collections.size() > 0 ? false : true;
}
Option<bool> CollectionManager::update_collection_metadata(const std::string& collection, const nlohmann::json& metadata) {
auto collection_ptr = get_collection(collection);
if (collection_ptr == nullptr) {
return Option<bool>(400, "failed to get collection.");
}
collection_ptr->update_metadata(metadata);
std::string collection_meta_str;
auto collection_metakey = Collection::get_meta_key(collection);
store->get(collection_metakey, collection_meta_str);
auto collection_meta_json = nlohmann::json::parse(collection_meta_str);
collection_meta_json[Collection::COLLECTION_METADATA] = metadata;
if(store->insert(collection_metakey, collection_meta_json.dump())) {
return Option<bool>(true);
}
return Option<bool>(400, "failed to insert into store.");
}
| 95,697
|
C++
|
.cpp
| 1,858
| 39.198062
| 158
| 0.57404
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,764
|
index.cpp
|
typesense_typesense/src/index.cpp
|
#include "index.h"
#include <memory>
#include <numeric>
#include <chrono>
#include <set>
#include <unordered_map>
#include <random>
#include <art.h>
#include <array_utils.h>
#include <match_score.h>
#include <string_utils.h>
#include <tokenizer.h>
#include <s2/s2point.h>
#include <s2/s2latlng.h>
#include <s2/s2region_term_indexer.h>
#include <s2/s2cap.h>
#include <s2/s2loop.h>
#include <posting.h>
#include <thread_local_vars.h>
#include <unordered_set>
#include <or_iterator.h>
#include <timsort.hpp>
#include "logger.h"
#include "validator.h"
#include <collection_manager.h>
#define RETURN_CIRCUIT_BREAKER if((std::chrono::duration_cast<std::chrono::microseconds>( \
std::chrono::system_clock::now().time_since_epoch()).count() - search_begin_us) > search_stop_us) { \
search_cutoff = true; \
return ;\
}
#define RETURN_CIRCUIT_BREAKER_OP if((std::chrono::duration_cast<std::chrono::microseconds>( \
std::chrono::system_clock::now().time_since_epoch()).count() - search_begin_us) > search_stop_us) { \
search_cutoff = true; \
return Option<bool>(true);\
}
#define BREAK_CIRCUIT_BREAKER if((std::chrono::duration_cast<std::chrono::microseconds>( \
std::chrono::system_clock::now().time_since_epoch()).count() - search_begin_us) > search_stop_us) { \
search_cutoff = true; \
break;\
}
#define FACET_INDEX_THRESHOLD 1000000000
spp::sparse_hash_map<uint32_t, int64_t, Hasher32> Index::text_match_sentinel_value;
spp::sparse_hash_map<uint32_t, int64_t, Hasher32> Index::seq_id_sentinel_value;
spp::sparse_hash_map<uint32_t, int64_t, Hasher32> Index::eval_sentinel_value;
spp::sparse_hash_map<uint32_t, int64_t, Hasher32> Index::geo_sentinel_value;
spp::sparse_hash_map<uint32_t, int64_t, Hasher32> Index::str_sentinel_value;
spp::sparse_hash_map<uint32_t, int64_t, Hasher32> Index::vector_distance_sentinel_value;
spp::sparse_hash_map<uint32_t, int64_t, Hasher32> Index::vector_query_sentinel_value;
Index::Index(const std::string& name, const uint32_t collection_id, const Store* store,
SynonymIndex* synonym_index, ThreadPool* thread_pool,
const tsl::htrie_map<char, field> & search_schema,
const std::vector<char>& symbols_to_index, const std::vector<char>& token_separators):
name(name), collection_id(collection_id), store(store), synonym_index(synonym_index), thread_pool(thread_pool),
search_schema(search_schema),
seq_ids(new id_list_t(256)), symbols_to_index(symbols_to_index), token_separators(token_separators) {
facet_index_v4 = new facet_index_t();
for(const auto& a_field: search_schema) {
if(!a_field.index) {
continue;
}
if(a_field.num_dim > 0) {
auto hnsw_index = new hnsw_index_t(a_field.num_dim, 16, a_field.vec_dist, a_field.hnsw_params["M"].get<uint32_t>(), a_field.hnsw_params["ef_construction"].get<uint32_t>());
vector_index.emplace(a_field.name, hnsw_index);
continue;
}
if(a_field.is_string()) {
art_tree *t = new art_tree;
art_tree_init(t);
search_index.emplace(a_field.name, t);
} else if(a_field.is_geopoint()) {
geo_range_index.emplace(a_field.name, new NumericTrie(32));
if(!a_field.is_single_geopoint()) {
spp::sparse_hash_map<uint32_t, int64_t*> * doc_to_geos = new spp::sparse_hash_map<uint32_t, int64_t*>();
geo_array_index.emplace(a_field.name, doc_to_geos);
}
} else {
if (a_field.range_index) {
auto trie = a_field.is_bool() ? new NumericTrie(8) :
a_field.is_int32() ? new NumericTrie(32) : new NumericTrie(64);
range_index.emplace(a_field.name, trie);
} else {
num_tree_t* num_tree = new num_tree_t;
numerical_index.emplace(a_field.name, num_tree);
}
}
if(a_field.sort) {
if(a_field.type == field_types::STRING) {
adi_tree_t* tree = new adi_tree_t();
str_sort_index.emplace(a_field.name, tree);
} else if(a_field.type != field_types::GEOPOINT_ARRAY) {
auto doc_to_score = new spp::sparse_hash_map<uint32_t, int64_t, Hasher32>();
sort_index.emplace(a_field.name, doc_to_score);
}
}
if(a_field.facet) {
initialize_facet_indexes(a_field);
}
// initialize for non-string facet fields
if(a_field.facet && !a_field.is_string()) {
art_tree *ft = new art_tree;
art_tree_init(ft);
search_index.emplace(a_field.faceted_name(), ft);
}
if(a_field.infix) {
array_mapped_infix_t infix_sets(ARRAY_INFIX_DIM);
for(auto& infix_set: infix_sets) {
infix_set = new tsl::htrie_set<char>();
}
infix_index.emplace(a_field.name, infix_sets);
}
if (a_field.is_reference_helper && a_field.is_array()) {
auto num_tree = new num_tree_t;
reference_index.emplace(a_field.name, num_tree);
if (a_field.nested) {
std::vector<std::string> keys;
StringUtils::split(a_field.name, keys, ".");
// `object_array_reference_index` only includes the reference fields that are part of an object array.
if (search_schema.count(keys[0]) != 0 && search_schema.at(keys[0]).is_array()) {
auto index = new spp::sparse_hash_map<std::pair<uint32_t, uint32_t>, uint32_t, pair_hash>();
object_array_reference_index.emplace(a_field.name, index);
}
}
}
}
num_documents = 0;
}
Index::~Index() {
std::unique_lock lock(mutex);
for(auto & name_tree: search_index) {
art_tree_destroy(name_tree.second);
delete name_tree.second;
name_tree.second = nullptr;
}
search_index.clear();
for(auto & name_index: geo_range_index) {
delete name_index.second;
name_index.second = nullptr;
}
geo_range_index.clear();
for(auto& name_index: geo_array_index) {
for(auto& kv: *name_index.second) {
delete [] kv.second;
}
delete name_index.second;
name_index.second = nullptr;
}
geo_array_index.clear();
for(auto & name_tree: numerical_index) {
delete name_tree.second;
name_tree.second = nullptr;
}
numerical_index.clear();
for(auto & name_tree: range_index) {
delete name_tree.second;
name_tree.second = nullptr;
}
range_index.clear();
for(auto & name_map: sort_index) {
delete name_map.second;
name_map.second = nullptr;
}
sort_index.clear();
for(auto& kv: infix_index) {
for(auto& infix_set: kv.second) {
delete infix_set;
infix_set = nullptr;
}
}
infix_index.clear();
for(auto& name_tree: str_sort_index) {
delete name_tree.second;
name_tree.second = nullptr;
}
str_sort_index.clear();
delete facet_index_v4;
delete seq_ids;
for(auto& vec_index_kv: vector_index) {
delete vec_index_kv.second;
}
for(auto & name_tree: reference_index) {
delete name_tree.second;
name_tree.second = nullptr;
}
reference_index.clear();
for(auto & name_tree: object_array_reference_index) {
delete name_tree.second;
name_tree.second = nullptr;
}
object_array_reference_index.clear();
}
int64_t Index::get_points_from_doc(const nlohmann::json &document, const std::string & default_sorting_field) {
int64_t points = 0;
if(document[default_sorting_field].is_number_float()) {
// serialize float to an integer and reverse the inverted range
float n = document[default_sorting_field];
memcpy(&points, &n, sizeof(int32_t));
points ^= ((points >> (std::numeric_limits<int32_t>::digits - 1)) | INT32_MIN);
points = -1 * (INT32_MAX - points);
} else if(document[default_sorting_field].is_string()) {
// not much value in supporting default sorting field as string, so we will just dummy it out
points = 0;
} else {
points = document[default_sorting_field].is_boolean() ? int64_t(document[default_sorting_field].get<bool>()) :
document[default_sorting_field].get<int64_t>();
}
return points;
}
int64_t Index::float_to_int64_t(float f) {
// https://stackoverflow.com/questions/60530255/convert-float-to-int64-t-while-preserving-ordering
int32_t i;
memcpy(&i, &f, sizeof i);
if (i < 0) {
i ^= INT32_MAX;
}
return i;
}
float Index::int64_t_to_float(int64_t n) {
int32_t i = (int32_t) n;
if(i < 0) {
i ^= INT32_MAX;
}
float f;
memcpy(&f, &i, sizeof f);
return f;
}
void Index::compute_token_offsets_facets(index_record& record,
const tsl::htrie_map<char, field>& search_schema,
const std::vector<char>& local_token_separators,
const std::vector<char>& local_symbols_to_index) {
const auto& document = record.doc;
for(const auto& the_field: search_schema) {
const std::string& field_name = the_field.name;
if(document.count(field_name) == 0 || !the_field.index) {
continue;
}
offsets_facet_hashes_t offset_facet_hashes;
bool is_facet = search_schema.at(field_name).facet;
// non-string, non-geo faceted field should be indexed as faceted string field as well
if(the_field.facet && !the_field.is_string() && !the_field.is_geopoint()) {
if(the_field.is_array()) {
std::vector<std::string> strings;
if(the_field.type == field_types::INT32_ARRAY) {
for(int32_t value: document[field_name]){
auto str = std::to_string(value);
strings.emplace_back(std::move(str));
}
} else if(the_field.type == field_types::INT64_ARRAY) {
for(int64_t value: document[field_name]){
auto str = std::to_string(value);
strings.emplace_back(std::move(str));
}
} else if(the_field.type == field_types::FLOAT_ARRAY) {
for(float value: document[field_name]){
auto str = StringUtils::float_to_str(value);
strings.emplace_back(std::move(str));
}
} else if(the_field.type == field_types::BOOL_ARRAY) {
for(bool value: document[field_name]){
auto str = std::to_string(value);
strings.emplace_back(std::move(str));
}
}
tokenize_string_array(strings, the_field,
local_symbols_to_index, local_token_separators,
offset_facet_hashes.offsets);
} else {
std::string text;
if(the_field.type == field_types::INT32) {
auto val = document[field_name].get<int32_t>();
text = std::to_string(val);
} else if(the_field.type == field_types::INT64) {
auto val = document[field_name].get<int64_t>();
text = std::to_string(val);
} else if(the_field.type == field_types::FLOAT) {
auto val = document[field_name].get<float>();
text = StringUtils::float_to_str(val);
} else if(the_field.type == field_types::BOOL) {
auto val = document[field_name].get<bool>();
text = std::to_string(val);
}
tokenize_string(text, the_field,
local_symbols_to_index, local_token_separators,
offset_facet_hashes.offsets);
}
}
if(the_field.is_string()) {
if(the_field.type == field_types::STRING) {
tokenize_string(document[field_name], the_field,
local_symbols_to_index, local_token_separators,
offset_facet_hashes.offsets);
} else {
tokenize_string_array(document[field_name], the_field,
local_symbols_to_index, local_token_separators,
offset_facet_hashes.offsets);
}
}
if(!offset_facet_hashes.offsets.empty()) {
record.field_index.emplace(field_name, std::move(offset_facet_hashes));
}
}
}
bool doc_contains_field(const nlohmann::json& doc, const field& a_field,
const tsl::htrie_map<char, field> & search_schema) {
if(doc.count(a_field.name)) {
return true;
}
// check for a nested field, e.g. `foo.bar.baz` indexed but `foo.bar` present in schema
if(a_field.is_object()) {
auto prefix_it = search_schema.equal_prefix_range(a_field.name);
std::string nested_field_name;
for(auto kv = prefix_it.first; kv != prefix_it.second; kv++) {
kv.key(nested_field_name);
bool is_child_field = (nested_field_name.size() > a_field.name.size() &&
nested_field_name[a_field.name.size()] == '.');
if(is_child_field && doc.count(nested_field_name) != 0) {
return true;
}
}
}
return false;
}
bool validate_object_field(nlohmann::json& doc, const field& a_field) {
auto field_it = doc.find(a_field.name);
if(field_it != doc.end()) {
if(a_field.type == field_types::OBJECT && doc[a_field.name].is_object()) {
return true;
} else if(a_field.type == field_types::OBJECT_ARRAY && doc[a_field.name].is_array()) {
return true;
}
return false;
}
std::vector<std::string> field_parts;
StringUtils::split(a_field.name, field_parts, ".");
nlohmann::json* obj = &doc;
bool has_array = false;
for(auto& field_part: field_parts) {
if(obj->is_array()) {
has_array = true;
if(obj->empty()) {
return false;
}
obj = &obj->at(0);
if(!obj->is_object()) {
return false;
}
}
auto obj_it = obj->find(field_part);
if(obj_it == obj->end()) {
return false;
}
obj = &obj_it.value();
}
LOG(INFO) << "obj: " << *obj;
LOG(INFO) << "doc: " << doc;
if(a_field.type == field_types::OBJECT && obj->is_object()) {
return true;
} else if(a_field.type == field_types::OBJECT_ARRAY && (obj->is_array() || (has_array && obj->is_object()))) {
return true;
}
return false;
}
void Index::validate_and_preprocess(Index *index,
std::vector<index_record>& iter_batch,
const size_t batch_start_index, const size_t batch_size,
const std::string& default_sorting_field,
const tsl::htrie_map<char, field>& search_schema,
const tsl::htrie_map<char, field>& embedding_fields,
const std::string& fallback_field_type,
const std::vector<char>& token_separators,
const std::vector<char>& symbols_to_index,
const bool do_validation, const size_t remote_embedding_batch_size,
const size_t remote_embedding_timeout_ms, const size_t remote_embedding_num_tries, const bool generate_embeddings) {
// runs in a partitioned thread
std::vector<index_record*> records_to_embed;
for(size_t i = 0; i < batch_size; i++) {
index_record& index_rec = iter_batch[batch_start_index + i];
try {
if(!index_rec.indexed.ok()) {
// some records could have been invalidated upstream
continue;
}
if(index_rec.operation == DELETE) {
continue;
}
handle_doc_ops(search_schema, index_rec.doc, index_rec.old_doc);
if(do_validation) {
Option<uint32_t> validation_op = validator_t::validate_index_in_memory(index_rec.doc, index_rec.seq_id,
default_sorting_field,
search_schema,
embedding_fields,
index_rec.operation,
index_rec.is_update,
fallback_field_type,
index_rec.dirty_values, generate_embeddings);
if(!validation_op.ok()) {
index_rec.index_failure(validation_op.code(), validation_op.error());
continue;
}
}
if(index_rec.is_update) {
// scrub string fields to reduce delete ops
get_doc_changes(index_rec.operation, embedding_fields, index_rec.doc, index_rec.old_doc,
index_rec.new_doc, index_rec.del_doc);
/*if(index_rec.seq_id == 0) {
LOG(INFO) << "index_rec.doc: " << index_rec.doc;
LOG(INFO) << "index_rec.old_doc: " << index_rec.old_doc;
LOG(INFO) << "index_rec.new_doc: " << index_rec.new_doc;
LOG(INFO) << "index_rec.del_doc: " << index_rec.del_doc;
}*/
if(generate_embeddings) {
for(auto& field: index_rec.doc.items()) {
for(auto& embedding_field : embedding_fields) {
if(!embedding_field.embed[fields::from].is_null()) {
auto embed_from_vector = embedding_field.embed[fields::from].get<std::vector<std::string>>();
for(auto& embed_from: embed_from_vector) {
if(embed_from == field.key()) {
records_to_embed.push_back(&index_rec);
break;
}
}
}
}
}
}
} else {
if(generate_embeddings) {
records_to_embed.push_back(&index_rec);
}
}
compute_token_offsets_facets(index_rec, search_schema, token_separators, symbols_to_index);
int64_t points = 0;
if(index_rec.doc.count(default_sorting_field) == 0) {
auto default_sorting_field_it = index->sort_index.find(default_sorting_field);
if(default_sorting_field_it != index->sort_index.end()) {
auto seq_id_it = default_sorting_field_it->second->find(index_rec.seq_id);
if(seq_id_it != default_sorting_field_it->second->end()) {
points = seq_id_it->second;
} else {
points = INT64_MIN;
}
} else {
points = INT64_MIN;
}
} else {
points = get_points_from_doc(index_rec.doc, default_sorting_field);
}
index_rec.points = points;
index_rec.index_success();
} catch(const std::exception &e) {
LOG(INFO) << "Error while validating document: " << e.what();
index_rec.index_failure(400, e.what());
}
}
if(generate_embeddings) {
batch_embed_fields(records_to_embed, embedding_fields, search_schema, remote_embedding_batch_size, remote_embedding_timeout_ms, remote_embedding_num_tries);
}
}
size_t Index::
batch_memory_index(Index *index,
std::vector<index_record>& iter_batch,
const std::string & default_sorting_field,
const tsl::htrie_map<char, field> & actual_search_schema,
const tsl::htrie_map<char, field> & embedding_fields,
const std::string& fallback_field_type,
const std::vector<char>& token_separators,
const std::vector<char>& symbols_to_index,
const bool do_validation, const size_t remote_embedding_batch_size,
const size_t remote_embedding_timeout_ms, const size_t remote_embedding_num_tries,
const bool generate_embeddings,
const bool use_addition_fields, const tsl::htrie_map<char, field>& addition_fields,
const std::string& collection_name,
const spp::sparse_hash_map<std::string, std::set<reference_pair_t>>& async_referenced_ins) {
const size_t concurrency = 4;
const size_t num_threads = std::min(concurrency, iter_batch.size());
const size_t window_size = (num_threads == 0) ? 0 :
(iter_batch.size() + num_threads - 1) / num_threads; // rounds up
const auto& indexable_schema = use_addition_fields ? addition_fields : actual_search_schema;
size_t num_indexed = 0;
size_t num_processed = 0;
std::mutex m_process;
std::condition_variable cv_process;
size_t num_queued = 0;
size_t batch_index = 0;
// local is need to propogate the thread local inside threads launched below
auto local_write_log_index = write_log_index;
for(size_t thread_id = 0; thread_id < num_threads && batch_index < iter_batch.size(); thread_id++) {
size_t batch_len = window_size;
if(batch_index + window_size > iter_batch.size()) {
batch_len = iter_batch.size() - batch_index;
}
num_queued++;
index->thread_pool->enqueue([&, batch_index, batch_len]() {
write_log_index = local_write_log_index;
validate_and_preprocess(index, iter_batch, batch_index, batch_len, default_sorting_field, actual_search_schema,
embedding_fields, fallback_field_type, token_separators, symbols_to_index, do_validation, remote_embedding_batch_size, remote_embedding_timeout_ms, remote_embedding_num_tries, generate_embeddings);
std::unique_lock<std::mutex> lock(m_process);
num_processed++;
cv_process.notify_one();
});
batch_index += batch_len;
}
{
std::unique_lock<std::mutex> lock_process(m_process);
cv_process.wait(lock_process, [&](){ return num_processed == num_queued; });
}
std::unordered_set<std::string> found_fields;
for(size_t i = 0; i < iter_batch.size(); i++) {
auto& index_rec = iter_batch[i];
if(!index_rec.indexed.ok()) {
// some records could have been invalidated upstream
continue;
}
if(index_rec.is_update) {
index->remove(index_rec.seq_id, index_rec.del_doc, {}, index_rec.is_update);
} else if(index_rec.indexed.ok()) {
num_indexed++;
}
for(const auto& kv: index_rec.doc.items()) {
found_fields.insert(kv.key());
}
}
num_queued = num_processed = 0;
std::unique_lock ulock(index->mutex);
for(const auto& field_name: found_fields) {
//LOG(INFO) << "field name: " << field_name;
if(field_name != "id" && indexable_schema.count(field_name) == 0) {
continue;
}
num_queued++;
index->thread_pool->enqueue([&]() {
write_log_index = local_write_log_index;
const field& f = (field_name == "id") ?
field("id", field_types::STRING, false) : indexable_schema.at(field_name);
std::set<reference_pair_t> async_references;
auto it = async_referenced_ins.find(field_name);
if (it != async_referenced_ins.end()) {
async_references = it->second;
}
try {
index->index_field_in_memory(collection_name, f, iter_batch, async_references);
} catch(std::exception& e) {
LOG(ERROR) << "Unhandled Typesense error: " << e.what();
for(auto& record: iter_batch) {
record.index_failure(500, "Unhandled Typesense error in index batch, check logs for details.");
}
}
std::unique_lock<std::mutex> lock(m_process);
num_processed++;
cv_process.notify_one();
});
}
{
std::unique_lock<std::mutex> lock_process(m_process);
cv_process.wait(lock_process, [&](){ return num_processed == num_queued; });
}
return num_indexed;
}
void Index::index_field_in_memory(const std::string& collection_name, const field& afield,
std::vector<index_record>& iter_batch,
const std::set<reference_pair_t>& async_referenced_ins) {
// indexes a given field of all documents in the batch
if(afield.name == "id") {
for(const auto& record: iter_batch) {
if(!record.indexed.ok()) {
// some records could have been invalidated upstream
continue;
}
if(!record.is_update && record.indexed.ok()) {
// for updates, the seq_id will already exist
seq_ids->upsert(record.seq_id);
}
}
if (!async_referenced_ins.empty()) {
update_async_references(collection_name, afield, iter_batch, async_referenced_ins);
}
return;
}
if(!afield.index) {
return;
}
// We have to handle both these edge cases:
// a) `afield` might not exist in the document (optional field)
// b) `afield` value could be empty
// non-geo faceted field should be indexed as faceted string field as well
bool is_facet_field = (afield.facet && !afield.is_geopoint());
if(afield.is_string() || is_facet_field) {
std::unordered_map<std::string, std::vector<art_document>> token_to_doc_offsets;
int64_t max_score = INT64_MIN;
std::unordered_map<facet_value_id_t, std::vector<uint32_t>, facet_value_id_t::Hash> fvalue_to_seq_ids;
std::unordered_map<uint32_t, std::vector<facet_value_id_t>> seq_id_to_fvalues;
size_t total_num_docs = seq_ids->num_ids();
if(afield.facet && total_num_docs > 10*1000) {
facet_index_v4->check_for_high_cardinality(afield.name, total_num_docs);
}
for(const auto& record: iter_batch) {
if(!record.indexed.ok()) {
// some records could have been invalidated upstream
continue;
}
const auto& document = record.doc;
const auto seq_id = record.seq_id;
if(document.count(afield.name) == 0 || !record.indexed.ok()) {
continue;
}
auto field_index_it = record.field_index.find(afield.name);
if(field_index_it == record.field_index.end()) {
continue;
}
if(afield.facet) {
if(afield.is_array()) {
const auto& field_values = document[afield.name];
for(size_t i = 0; i < field_values.size(); i++) {
if(afield.type == field_types::INT32_ARRAY) {
int32_t raw_val = field_values[i].get<int32_t>();
auto fhash = reinterpret_cast<uint32_t&>(raw_val);
facet_value_id_t facet_value_id(std::to_string(raw_val), fhash);
fvalue_to_seq_ids[facet_value_id].push_back(seq_id);
seq_id_to_fvalues[seq_id].push_back(facet_value_id);
} else if(afield.type == field_types::INT64_ARRAY) {
int64_t raw_val = field_values[i].get<int64_t>();
facet_value_id_t facet_value_id(std::to_string(raw_val));
fvalue_to_seq_ids[facet_value_id].push_back(seq_id);
seq_id_to_fvalues[seq_id].push_back(facet_value_id);
} else if(afield.type == field_types::STRING_ARRAY) {
const std::string& raw_val =
field_values[i].get<std::string>().substr(0, facet_index_t::MAX_FACET_VAL_LEN);
facet_value_id_t facet_value_id(raw_val);
fvalue_to_seq_ids[facet_value_id].push_back(seq_id);
seq_id_to_fvalues[seq_id].push_back(facet_value_id);
} else if(afield.type == field_types::FLOAT_ARRAY) {
float raw_val = field_values[i].get<float>();
auto fhash = reinterpret_cast<uint32_t&>(raw_val);
facet_value_id_t facet_value_id(StringUtils::float_to_str(raw_val), fhash);
fvalue_to_seq_ids[facet_value_id].push_back(seq_id);
seq_id_to_fvalues[seq_id].push_back(facet_value_id);
} else if(afield.type == field_types::BOOL_ARRAY) {
bool raw_val = field_values[i].get<bool>();
auto fhash = (uint32_t)raw_val;
auto str_val = (raw_val == 1) ? "true" : "false";
facet_value_id_t facet_value_id(str_val, fhash);
fvalue_to_seq_ids[facet_value_id].push_back(seq_id);
seq_id_to_fvalues[seq_id].push_back(facet_value_id);
}
}
} else {
if(afield.type == field_types::INT32) {
int32_t raw_val = document[afield.name].get<int32_t>();
auto fhash = reinterpret_cast<uint32_t&>(raw_val);
facet_value_id_t facet_value_id(std::to_string(raw_val), fhash);
fvalue_to_seq_ids[facet_value_id].push_back(seq_id);
seq_id_to_fvalues[seq_id].push_back(facet_value_id);
}
else if(afield.type == field_types::INT64) {
int64_t raw_val = document[afield.name].get<int64_t>();
facet_value_id_t facet_value_id(std::to_string(raw_val));
fvalue_to_seq_ids[facet_value_id].push_back(seq_id);
seq_id_to_fvalues[seq_id].push_back(facet_value_id);
}
else if(afield.type == field_types::STRING) {
const std::string& raw_val =
document[afield.name].get<std::string>().substr(0, facet_index_t::MAX_FACET_VAL_LEN);
facet_value_id_t facet_value_id(raw_val);
fvalue_to_seq_ids[facet_value_id].push_back(seq_id);
seq_id_to_fvalues[seq_id].push_back(facet_value_id);
}
else if(afield.type == field_types::FLOAT) {
float raw_val = document[afield.name].get<float>();
const std::string& float_str_val = StringUtils::float_to_str(raw_val);
float normalized_raw_val = std::stof(float_str_val);
auto fhash = reinterpret_cast<uint32_t&>(normalized_raw_val);
facet_value_id_t facet_value_id(float_str_val, fhash);
fvalue_to_seq_ids[facet_value_id].push_back(seq_id);
seq_id_to_fvalues[seq_id].push_back(facet_value_id);
}
else if(afield.type == field_types::BOOL) {
bool raw_val = document[afield.name].get<bool>();
auto fhash = (uint32_t)raw_val;
auto str_val = (raw_val == 1) ? "true" : "false";
facet_value_id_t facet_value_id(str_val, fhash);
fvalue_to_seq_ids[facet_value_id].push_back(seq_id);
seq_id_to_fvalues[seq_id].push_back(facet_value_id);
}
}
}
if(record.points > max_score) {
max_score = record.points;
}
for(auto& token_offsets: field_index_it->second.offsets) {
token_to_doc_offsets[token_offsets.first].emplace_back(seq_id, record.points, token_offsets.second);
if(afield.infix) {
auto strhash = StringUtils::hash_wy(token_offsets.first.c_str(), token_offsets.first.size());
const auto& infix_sets = infix_index.at(afield.name);
infix_sets[strhash % 4]->insert(token_offsets.first);
}
}
}
facet_index_v4->insert(afield.name, fvalue_to_seq_ids, seq_id_to_fvalues, afield.is_string());
auto tree_it = search_index.find(afield.faceted_name());
if(tree_it == search_index.end()) {
return;
}
art_tree *t = tree_it->second;
for(auto& token_to_doc: token_to_doc_offsets) {
const std::string& token = token_to_doc.first;
std::vector<art_document>& documents = token_to_doc.second;
const auto *key = (const unsigned char *) token.c_str();
int key_len = (int) token.length() + 1; // for the terminating \0 char
//LOG(INFO) << "key: " << key << ", art_doc.id: " << art_doc.id;
art_inserts(t, key, key_len, max_score, documents);
}
}
if(!afield.is_string()) {
if (afield.type == field_types::INT32) {
auto num_tree = afield.range_index ? nullptr : numerical_index.at(afield.name);
auto trie = afield.range_index ? range_index.at(afield.name) : nullptr;
iterate_and_index_numerical_field(iter_batch, afield, [&afield, num_tree, trie]
(const index_record& record, uint32_t seq_id) {
int32_t value = record.doc[afield.name].get<int32_t>();
if (afield.range_index) {
trie->insert(value, seq_id);
} else {
num_tree->insert(value, seq_id);
}
});
}
else if(afield.type == field_types::INT64) {
auto num_tree = afield.range_index ? nullptr : numerical_index.at(afield.name);
auto trie = afield.range_index ? range_index.at(afield.name) : nullptr;
iterate_and_index_numerical_field(iter_batch, afield, [&afield, num_tree, trie]
(const index_record& record, uint32_t seq_id) {
int64_t value = record.doc[afield.name].get<int64_t>();
if (afield.range_index) {
trie->insert(value, seq_id);
} else {
num_tree->insert(value, seq_id);
}
});
}
else if(afield.type == field_types::FLOAT) {
auto num_tree = afield.range_index ? nullptr : numerical_index.at(afield.name);
auto trie = afield.range_index ? range_index.at(afield.name) : nullptr;
iterate_and_index_numerical_field(iter_batch, afield, [&afield, num_tree, trie]
(const index_record& record, uint32_t seq_id) {
float fvalue = record.doc[afield.name].get<float>();
int64_t value = float_to_int64_t(fvalue);
if (afield.range_index) {
trie->insert(value, seq_id);
} else {
num_tree->insert(value, seq_id);
}
});
} else if(afield.type == field_types::BOOL) {
auto num_tree = afield.range_index ? nullptr : numerical_index.at(afield.name);
auto trie = afield.range_index ? range_index.at(afield.name) : nullptr;
iterate_and_index_numerical_field(iter_batch, afield, [&afield, num_tree, trie]
(const index_record& record, uint32_t seq_id) {
bool value = record.doc[afield.name].get<bool>();
if (afield.range_index) {
trie->insert(value, seq_id);
} else {
num_tree->insert(value, seq_id);
}
});
} else if(afield.type == field_types::GEOPOINT || afield.type == field_types::GEOPOINT_ARRAY) {
auto geopoint_range_index = geo_range_index.at(afield.name);
iterate_and_index_numerical_field(iter_batch, afield,
[&afield, &geo_array_index=geo_array_index, geopoint_range_index](const index_record& record, uint32_t seq_id) {
// nested geopoint value inside an array of object will be a simple array so must be treated as geopoint
bool nested_obj_arr_geopoint = (afield.nested && afield.type == field_types::GEOPOINT_ARRAY &&
!record.doc[afield.name].empty() && record.doc[afield.name][0].is_number());
if(afield.type == field_types::GEOPOINT || nested_obj_arr_geopoint) {
// this could be a nested gepoint array so can have more than 2 array values
const std::vector<double>& latlongs = record.doc[afield.name];
for(size_t li = 0; li < latlongs.size(); li+=2) {
S2RegionTermIndexer::Options options;
options.set_index_contains_points_only(true);
S2RegionTermIndexer indexer(options);
S2Point point = S2LatLng::FromDegrees(latlongs[li], latlongs[li+1]).ToPoint();
auto cell = S2CellId(point);
geopoint_range_index->insert_geopoint(cell.id(), seq_id);
}
if(nested_obj_arr_geopoint) {
int64_t* packed_latlongs = new int64_t[(latlongs.size()/2) + 1];
packed_latlongs[0] = latlongs.size()/2;
size_t j_packed_latlongs = 0;
for(size_t li = 0; li < latlongs.size(); li+=2) {
int64_t packed_latlong = GeoPoint::pack_lat_lng(latlongs[li], latlongs[li+1]);
packed_latlongs[j_packed_latlongs + 1] = packed_latlong;
j_packed_latlongs++;
}
geo_array_index.at(afield.name)->emplace(seq_id, packed_latlongs);
}
} else {
const std::vector<std::vector<double>>& latlongs = record.doc[afield.name];
S2RegionTermIndexer::Options options;
options.set_index_contains_points_only(true);
S2RegionTermIndexer indexer(options);
int64_t* packed_latlongs = new int64_t[latlongs.size() + 1];
packed_latlongs[0] = latlongs.size();
for(size_t li = 0; li < latlongs.size(); li++) {
auto& latlong = latlongs[li];
S2Point point = S2LatLng::FromDegrees(latlong[0], latlong[1]).ToPoint();
auto cell = S2CellId(point);
geopoint_range_index->insert_geopoint(cell.id(), seq_id);
int64_t packed_latlong = GeoPoint::pack_lat_lng(latlong[0], latlong[1]);
packed_latlongs[li + 1] = packed_latlong;
}
geo_array_index.at(afield.name)->emplace(seq_id, packed_latlongs);
}
});
} else if(afield.is_array()) {
// handle vector index first
if(afield.type == field_types::FLOAT_ARRAY && afield.num_dim > 0) {
auto vec_index = vector_index[afield.name]->vecdex;
size_t curr_ele_count = vec_index->getCurrentElementCount();
if(curr_ele_count + iter_batch.size() > vec_index->getMaxElements()) {
vec_index->resizeIndex((curr_ele_count + iter_batch.size()) * 1.3);
}
const size_t num_threads = std::min<size_t>(4, iter_batch.size());
const size_t window_size = (num_threads == 0) ? 0 :
(iter_batch.size() + num_threads - 1) / num_threads; // rounds up
size_t num_processed = 0;
std::mutex m_process;
std::condition_variable cv_process;
size_t num_queued = 0;
size_t result_index = 0;
for(size_t thread_id = 0; thread_id < num_threads && result_index < iter_batch.size(); thread_id++) {
size_t batch_len = window_size;
if(result_index + window_size > iter_batch.size()) {
batch_len = iter_batch.size() - result_index;
}
num_queued++;
thread_pool->enqueue([thread_id, &afield, &vec_index, &records = iter_batch,
result_index, batch_len, &num_processed, &m_process, &cv_process]() {
size_t batch_counter = 0;
while(batch_counter < batch_len) {
auto& record = records[result_index + batch_counter];
if(record.doc.count(afield.name) == 0 || !record.indexed.ok()) {
batch_counter++;
continue;
}
try {
const std::vector<float>& float_vals = record.doc[afield.name].get<std::vector<float>>();
if(float_vals.size() != afield.num_dim) {
record.index_failure(400, "Vector size mismatch.");
} else {
if(afield.vec_dist == cosine) {
std::vector<float> normalized_vals(afield.num_dim);
hnsw_index_t::normalize_vector(float_vals, normalized_vals);
vec_index->addPoint(normalized_vals.data(), (size_t)record.seq_id, true);
} else {
vec_index->addPoint(float_vals.data(), (size_t)record.seq_id, true);
}
}
} catch(const std::exception &e) {
record.index_failure(400, e.what());
}
batch_counter++;
}
std::unique_lock<std::mutex> lock(m_process);
num_processed++;
cv_process.notify_one();
});
result_index += batch_len;
}
std::unique_lock<std::mutex> lock_process(m_process);
cv_process.wait(lock_process, [&](){ return num_processed == num_queued; });
return;
}
// all other numerical arrays
auto num_tree = afield.range_index ? nullptr : numerical_index.at(afield.name);
auto trie = afield.range_index ? range_index.at(afield.name) : nullptr;
auto reference = reference_index.count(afield.name) != 0 ? reference_index.at(afield.name) : nullptr;
auto object_array_reference = object_array_reference_index.count(afield.name) != 0 ?
object_array_reference_index.at(afield.name) : nullptr;
iterate_and_index_numerical_field(iter_batch, afield, [&afield, num_tree, trie, reference, object_array_reference]
(const index_record& record, uint32_t seq_id) {
for(size_t arr_i = 0; arr_i < record.doc[afield.name].size(); arr_i++) {
const auto& arr_value = record.doc[afield.name][arr_i];
if(afield.type == field_types::INT32_ARRAY) {
const int32_t value = arr_value;
if (afield.range_index) {
trie->insert(value, seq_id);
} else {
num_tree->insert(value, seq_id);
}
}
else if(afield.type == field_types::INT64_ARRAY) {
int64_t value;
if (object_array_reference != nullptr) { // arr_value is an array [object_index, value]
value = arr_value.at(1);
} else {
value = arr_value;
}
if (afield.range_index) {
trie->insert(value, seq_id);
} else {
num_tree->insert(value, seq_id);
}
if (reference != nullptr) {
reference->insert(seq_id, value);
}
if (object_array_reference != nullptr) {
(*object_array_reference)[std::make_pair(seq_id, arr_value.at(0))] = value;
}
}
else if(afield.type == field_types::FLOAT_ARRAY) {
const float fvalue = arr_value;
int64_t value = float_to_int64_t(fvalue);
if (afield.range_index) {
trie->insert(value, seq_id);
} else {
num_tree->insert(value, seq_id);
}
}
else if(afield.type == field_types::BOOL_ARRAY) {
const bool value = record.doc[afield.name][arr_i];
if (afield.range_index) {
trie->insert(value, seq_id);
} else {
num_tree->insert(value, seq_id);
}
}
}
});
}
// add numerical values automatically into sort index if sorting is enabled
if(afield.is_num_sortable() && afield.type != field_types::GEOPOINT_ARRAY) {
auto doc_to_score = sort_index.at(afield.name);
bool is_integer = afield.is_integer();
bool is_float = afield.is_float();
bool is_bool = afield.is_bool();
bool is_geopoint = afield.is_geopoint();
for(const auto& record: iter_batch) {
if(!record.indexed.ok()) {
continue;
}
const auto& document = record.doc;
const auto seq_id = record.seq_id;
if (document.count(afield.name) == 0 || !afield.index) {
continue;
}
if(is_integer) {
doc_to_score->emplace(seq_id, document[afield.name].get<int64_t>());
} else if(is_float) {
int64_t ifloat = float_to_int64_t(document[afield.name].get<float>());
doc_to_score->emplace(seq_id, ifloat);
} else if(is_bool) {
doc_to_score->emplace(seq_id, (int64_t) document[afield.name].get<bool>());
} else if(is_geopoint) {
const std::vector<double>& latlong = document[afield.name];
int64_t lat_lng = GeoPoint::pack_lat_lng(latlong[0], latlong[1]);
doc_to_score->emplace(seq_id, lat_lng);
}
}
}
} else if(afield.is_str_sortable()) {
adi_tree_t* str_tree = str_sort_index.at(afield.name);
for(const auto& record: iter_batch) {
if(!record.indexed.ok()) {
continue;
}
const auto& document = record.doc;
const auto seq_id = record.seq_id;
if (document.count(afield.name) == 0 || !afield.index) {
continue;
}
std::string raw_str = document[afield.name].get<std::string>();
Tokenizer str_tokenizer("", true, false, "", {' '});
str_tokenizer.tokenize(raw_str);
if(!raw_str.empty()) {
str_tree->index(seq_id, raw_str.substr(0, 2000));
}
}
}
if (!async_referenced_ins.empty()) {
update_async_references(collection_name, afield, iter_batch, async_referenced_ins);
}
}
void Index::update_async_references(const std::string& collection_name, const field& afield,
std::vector<index_record>& iter_batch,
const std::set<reference_pair_t>& async_referenced_ins) {
for (auto& record: iter_batch) {
if (!record.indexed.ok() || record.is_update) {
continue;
}
auto const& document = record.doc;
auto const& is_update = record.is_update;
auto const& seq_id = record.seq_id;
for (const auto& pair: async_referenced_ins) {
auto const& reference_collection_name = pair.collection;
auto const& reference_field_name = pair.field;
auto& cm = CollectionManager::get_instance();
auto ref_coll = cm.get_collection(reference_collection_name);
if (ref_coll == nullptr) {
record.index_failure(400, "Collection `" + reference_collection_name + "` with async_reference to the"
" collection `" += collection_name + "` not found.");
continue;
}
auto const& ref_fields = ref_coll->get_reference_fields();
auto const ref_field_it = ref_fields.find(reference_field_name);
if (ref_field_it == ref_fields.end()) {
record.index_failure(400, "Field `" + reference_field_name + "` not found in the ref schema of `" +=
reference_collection_name + "` having async_reference to `" += collection_name +
"` collection.");
continue;
}
if (ref_field_it->second.collection != collection_name) {
record.index_failure(400, "`" + reference_collection_name + "." += reference_field_name +
"` does not have a reference to `" += collection_name + "` collection.");
continue;
}
auto const& ref_schema = ref_coll->get_schema();
if (ref_schema.count(reference_field_name) == 0) {
record.index_failure(400, "Field `" + reference_field_name + "` not found in the schema of `" +=
reference_collection_name + "` having async_reference to `" +=
collection_name + "` collection.");
continue;
}
auto const& field_name = ref_field_it->second.field;
if (field_name != "id" && search_schema.count(field_name) == 0) {
record.index_failure(400, "Field `" + field_name + "`, referenced by `" += reference_collection_name +
"." += reference_field_name + "`, not found in `" += collection_name +
"` collection.");
continue;
}
auto const& optional = field_name != "id" && search_schema.at(field_name).optional;
auto is_required = !is_update && !optional;
if (is_required && document.count(field_name) != 1) {
record.index_failure(400, "Missing the required field `" + field_name + "` in the document.");
continue;
} else if (document.count(field_name) != 1) {
continue;
}
// After collecting the value(s) present in the field referenced by the other collection(ref_coll), we will add
// this document's seq_id as a reference where the value(s) match.
std::string ref_filter_value;
std::set<std::string> values;
if (document.at(field_name).is_array()) {
ref_filter_value = "[";
for (auto const& value: document[field_name]) {
if (value.is_number_integer()) {
auto const& v = std::to_string(value.get<int64_t>());
ref_filter_value += v;
values.insert(v);
} else if (value.is_string()) {
auto const& v = value.get<std::string>();
ref_filter_value += v;
values.insert(v);
} else {
record.index_failure(400, "Field `" + field_name + "` must only have string/int32/int64 values.");
continue;
}
ref_filter_value += ",";
}
ref_filter_value[ref_filter_value.size() - 1] = ']';
} else {
auto const& value = document[field_name];
if (value.is_number_integer()) {
auto const& v = std::to_string(value.get<int64_t>());
ref_filter_value += v;
values.insert(v);
} else if (value.is_string()) {
auto const& v = value.get<std::string>();
ref_filter_value += v;
values.insert(v);
} else {
record.index_failure(400, "Field `" + field_name + "` must only have string/int32/int64 values.");
continue;
}
}
if (values.empty()) {
continue;
}
auto const ref_filter = reference_field_name + ":= " += ref_filter_value;
auto update_op = ref_coll->update_async_references_with_lock(collection_name, ref_filter, values, seq_id,
reference_field_name);
if (!update_op.ok()) {
record.index_failure(400, "Error while updating async reference field `" + reference_field_name +
"` of collection `" += reference_collection_name + "`: " += update_op.error());
break;
}
}
}
}
void Index::tokenize_string(const std::string& text, const field& a_field,
const std::vector<char>& symbols_to_index,
const std::vector<char>& token_separators,
std::unordered_map<std::string, std::vector<uint32_t>>& token_to_offsets) {
Tokenizer tokenizer(text, true, !a_field.is_string(), a_field.locale, symbols_to_index, token_separators, a_field.get_stemmer());
std::string token;
std::string last_token;
size_t token_index = 0;
while(tokenizer.next(token, token_index)) {
if(token.empty()) {
continue;
}
if(token.size() > 100) {
token.erase(100);
}
token_to_offsets[token].push_back(token_index + 1);
last_token = token;
}
if(!token_to_offsets.empty()) {
// push 0 for the last occurring token (used for exact match ranking)
token_to_offsets[last_token].push_back(0);
}
}
void Index::tokenize_string_array(const std::vector<std::string>& strings,
const field& a_field,
const std::vector<char>& symbols_to_index,
const std::vector<char>& token_separators,
std::unordered_map<std::string, std::vector<uint32_t>>& token_to_offsets) {
for(size_t array_index = 0; array_index < strings.size(); array_index++) {
const std::string& str = strings[array_index];
std::set<std::string> token_set; // required to deal with repeating tokens
Tokenizer tokenizer(str, true, !a_field.is_string(), a_field.locale, symbols_to_index, token_separators, a_field.get_stemmer());
std::string token, last_token;
size_t token_index = 0;
// iterate and append offset positions
while(tokenizer.next(token, token_index)) {
if(token.empty()) {
continue;
}
if(token.size() > 100) {
token.erase(100);
}
token_to_offsets[token].push_back(token_index + 1);
token_set.insert(token);
last_token = token;
}
if(token_set.empty()) {
continue;
}
for(auto& the_token: token_set) {
// repeat last element to indicate end of offsets for this array index
token_to_offsets[the_token].push_back(token_to_offsets[the_token].back());
// iterate and append this array index to all tokens
token_to_offsets[the_token].push_back(array_index);
}
// push 0 for the last occurring token (used for exact match ranking)
token_to_offsets[last_token].push_back(0);
}
}
void Index::initialize_facet_indexes(const field& facet_field) {
facet_index_v4->initialize(facet_field.name);
}
void Index::compute_facet_stats(facet &a_facet, const std::string& raw_value, const std::string & field_type,
const size_t count) {
if(field_type == field_types::INT32 || field_type == field_types::INT32_ARRAY) {
int32_t val = std::stoi(raw_value);
if (val < a_facet.stats.fvmin) {
a_facet.stats.fvmin = val;
}
if (val > a_facet.stats.fvmax) {
a_facet.stats.fvmax = val;
}
a_facet.stats.fvsum += (count * val);
a_facet.stats.fvcount += count;
} else if(field_type == field_types::INT64 || field_type == field_types::INT64_ARRAY) {
int64_t val = std::stoll(raw_value);
if(val < a_facet.stats.fvmin) {
a_facet.stats.fvmin = val;
}
if(val > a_facet.stats.fvmax) {
a_facet.stats.fvmax = val;
}
a_facet.stats.fvsum += (count * val);
a_facet.stats.fvcount += count;
} else if(field_type == field_types::FLOAT || field_type == field_types::FLOAT_ARRAY) {
float val = std::stof(raw_value);
if(val < a_facet.stats.fvmin) {
a_facet.stats.fvmin = val;
}
if(val > a_facet.stats.fvmax) {
a_facet.stats.fvmax = val;
}
a_facet.stats.fvsum += (count * val);
a_facet.stats.fvcount += count;
}
}
void Index::compute_facet_stats(facet &a_facet, int64_t raw_value, const std::string & field_type) {
if(field_type == field_types::INT32 || field_type == field_types::INT32_ARRAY) {
int32_t val = raw_value;
if (val < a_facet.stats.fvmin) {
a_facet.stats.fvmin = val;
}
if (val > a_facet.stats.fvmax) {
a_facet.stats.fvmax = val;
}
a_facet.stats.fvsum += val;
a_facet.stats.fvcount++;
} else if(field_type == field_types::INT64 || field_type == field_types::INT64_ARRAY) {
int64_t val = raw_value;
if(val < a_facet.stats.fvmin) {
a_facet.stats.fvmin = val;
}
if(val > a_facet.stats.fvmax) {
a_facet.stats.fvmax = val;
}
a_facet.stats.fvsum += val;
a_facet.stats.fvcount++;
} else if(field_type == field_types::FLOAT || field_type == field_types::FLOAT_ARRAY) {
float val = reinterpret_cast<float&>(raw_value);
if(val < a_facet.stats.fvmin) {
a_facet.stats.fvmin = val;
}
if(val > a_facet.stats.fvmax) {
a_facet.stats.fvmax = val;
}
a_facet.stats.fvsum += val;
a_facet.stats.fvcount++;
}
}
int64_t Index::get_doc_val_from_sort_index(sort_index_iterator sort_index_it, uint32_t doc_seq_id) const {
if(sort_index_it != sort_index.end()){
auto doc_id_val_map = sort_index_it->second;
auto doc_seq_id_it = doc_id_val_map->find(doc_seq_id);
if(doc_seq_id_it != doc_id_val_map->end()){
return doc_seq_id_it->second;
}
}
return INT64_MAX;
}
std::vector<group_by_field_it_t> Index::get_group_by_field_iterators(const std::vector<std::string>& group_by_fields,
bool is_reverse) const {
std::vector<group_by_field_it_t> group_by_field_it_vec;
for (const auto &field_name: group_by_fields) {
if (!facet_index_v4->has_hash_index(field_name)) {
continue;
}
auto facet_index = facet_index_v4->get_facet_hash_index(field_name);
auto facet_index_it = is_reverse ? facet_index->new_rev_iterator() : facet_index->new_iterator();
group_by_field_it_t group_by_field_it_struct {field_name, std::move(facet_index_it),
search_schema.at(field_name).is_array()};
group_by_field_it_vec.emplace_back(std::move(group_by_field_it_struct));
}
return group_by_field_it_vec;
}
void Index::do_facets(std::vector<facet> & facets, facet_query_t & facet_query,
bool estimate_facets, size_t facet_sample_percent,
const std::vector<facet_info_t>& facet_infos,
const size_t group_limit, const std::vector<std::string>& group_by_fields,
const bool group_missing_values,
const uint32_t* result_ids, size_t results_size,
int max_facet_count, bool is_wildcard_no_filter_query,
const std::vector<facet_index_type_t>& facet_index_types) const {
if(results_size == 0) {
return ;
}
std::vector<group_by_field_it_t> group_by_field_it_vec;
size_t total_docs = seq_ids->num_ids();
// assumed that facet fields have already been validated upstream
for(auto& a_facet : facets) {
auto findex = a_facet.orig_index;
const auto& facet_field = facet_infos[findex].facet_field;
const bool use_facet_query = facet_infos[findex].use_facet_query;
const auto& fquery_hashes = facet_infos[findex].hashes;
const bool should_compute_stats = facet_infos[findex].should_compute_stats;
const bool use_value_index = facet_infos[findex].use_value_index;
auto sort_index_it = sort_index.find(a_facet.field_name);
auto facet_sort_index_it = sort_index.find(a_facet.sort_field);
if(facet_sample_percent == 0) {
facet_sample_percent = 1;
}
size_t facet_sample_mod_value = 100 / facet_sample_percent;
auto num_facet_values = facet_index_v4->get_facet_count(facet_field.name);
if(num_facet_values == 0) {
continue;
}
if(use_value_index) {
// LOG(INFO) << "Using intersection to find facets";
a_facet.is_intersected = true;
std::map<std::string, docid_count_t> facet_results;
std::string sort_order = a_facet.is_sort_by_alpha ? a_facet.sort_order : "";
facet_index_v4->intersect(a_facet, facet_field,use_facet_query,
estimate_facets, facet_sample_mod_value,
facet_infos[findex].fvalue_searched_tokens,
symbols_to_index, token_separators,
result_ids, results_size, max_facet_count, facet_results,
is_wildcard_no_filter_query, sort_order);
for(const auto& kv : facet_results) {
//range facet processing
if(a_facet.is_range_query) {
int64_t doc_val = std::stoll(kv.first);
std::pair<int64_t , std::string> range_pair {};
if(facet_field.is_float()) {
float val = std::stof(kv.first);
doc_val = Index::float_to_int64_t(val);
}
if(a_facet.get_range(doc_val, range_pair)) {
const auto& range_id = range_pair.first;
facet_count_t& facet_count = a_facet.result_map[range_id];
facet_count.count += kv.second.count;
}
} else {
facet_count_t& facet_count = a_facet.value_result_map[kv.first];
facet_count.count = kv.second.count;
facet_count.doc_id = kv.second.doc_id;
}
if(should_compute_stats) {
//LOG(INFO) << "Computing facet stats for facet value" << kv.first;
compute_facet_stats(a_facet, kv.first, facet_field.type, kv.second.count);
}
}
if(should_compute_stats) {
auto numerical_index_it = numerical_index.find(a_facet.field_name);
if(numerical_index_it != numerical_index.end()) {
auto min_max_pair = numerical_index_it->second->get_min_max(result_ids,
results_size);
if(facet_field.is_float()) {
a_facet.stats.fvmin = int64_t_to_float(min_max_pair.first);
a_facet.stats.fvmax = int64_t_to_float(min_max_pair.second);
} else {
a_facet.stats.fvmin = min_max_pair.first;
a_facet.stats.fvmax = min_max_pair.second;
}
}
}
} else {
//LOG(INFO) << "Using hashing to find facets";
bool facet_hash_index_exists = facet_index_v4->has_hash_index(facet_field.name);
if(!facet_hash_index_exists) {
continue;
}
const auto& fhash_int64_map = facet_index_v4->get_fhash_int64_map(a_facet.field_name);
const auto facet_field_is_array = facet_field.is_array();
const auto facet_field_is_int64 = facet_field.is_int64();
const auto& facet_index = facet_index_v4->get_facet_hash_index(facet_field.name);
posting_list_t::iterator_t facet_index_it = facet_index->new_iterator();
std::vector<uint32_t> facet_hashes(1);
if (group_limit != 0) {
group_by_field_it_vec = get_group_by_field_iterators(group_by_fields);
}
for(size_t i = 0; i < results_size; i++) {
// if sampling is enabled, we will skip a portion of the results to speed up things
if(estimate_facets) {
if(i % facet_sample_mod_value != 0) {
continue;
}
}
uint32_t doc_seq_id = result_ids[i];
facet_index_it.skip_to(doc_seq_id);
if(!facet_index_it.valid()) {
break;
}
if(facet_index_it.id() != doc_seq_id) {
continue;
}
facet_hashes.clear();
if(facet_field_is_array) {
posting_list_t::get_offsets(facet_index_it, facet_hashes);
} else {
facet_hashes.push_back(facet_index_it.offset());
}
uint64_t distinct_id = 0;
if(group_limit) {
distinct_id = 1;
for(auto& kv : group_by_field_it_vec) {
get_distinct_id(kv.it, doc_seq_id, kv.is_array, group_missing_values, distinct_id, false);
}
}
//LOG(INFO) << "facet_hash_count " << facet_hash_count;
if(((i + 1) % 16384) == 0) {
RETURN_CIRCUIT_BREAKER
}
std::set<uint32_t> unique_facet_hashes;
for(size_t j = 0; j < facet_hashes.size(); j++) {
const auto& fhash = facet_hashes[j];
// explicitly check for value of facet_hashes to avoid set lookup/insert for non-array faceting
if(facet_hashes.size() > 1) {
if(unique_facet_hashes.count(fhash) != 0) {
continue;
} else {
unique_facet_hashes.insert(fhash);
}
}
if(should_compute_stats) {
int64_t val = fhash;
if(facet_field_is_int64) {
if(fhash_int64_map.find(fhash) != fhash_int64_map.end()) {
val = fhash_int64_map.at(fhash);
} else {
val = INT64_MAX;
}
}
compute_facet_stats(a_facet, val, facet_field.type);
}
if(a_facet.is_range_query) {
int64_t doc_val = get_doc_val_from_sort_index(sort_index_it, doc_seq_id);
std::pair<int64_t , std::string> range_pair {};
if(a_facet.get_range(doc_val, range_pair)) {
const auto& range_id = range_pair.first;
facet_count_t& facet_count = a_facet.result_map[range_id];
facet_count.count += 1;
if(group_limit) {
a_facet.hash_groups[range_id].emplace(distinct_id);
}
}
} else if(!use_facet_query || fquery_hashes.find(fhash) != fquery_hashes.end()) {
facet_count_t& facet_count = a_facet.result_map[fhash];
//LOG(INFO) << "field: " << a_facet.field_name << ", doc id: " << doc_seq_id << ", hash: " << fhash;
facet_count.doc_id = doc_seq_id;
facet_count.array_pos = j;
if(group_limit) {
a_facet.hash_groups[fhash].emplace(distinct_id);
} else {
facet_count.count += 1;
}
if(use_facet_query) {
//LOG (INFO) << "adding hash tokens for hash " << fhash;
a_facet.hash_tokens[fhash] = fquery_hashes.at(fhash);
}
if(!a_facet.sort_field.empty()) {
facet_count.sort_field_val = get_doc_val_from_sort_index(facet_sort_index_it, doc_seq_id);
//LOG(INFO) << "found sort_field val " << facet_count.sort_field;
}
}
}
}
}
}
}
void Index::aggregate_topster(Topster* agg_topster, Topster* index_topster) {
if(index_topster->distinct) {
for(auto &group_topster_entry: index_topster->group_kv_map) {
Topster* group_topster = group_topster_entry.second;
for(const auto& map_kv: group_topster->kv_map) {
agg_topster->add(map_kv.second);
}
}
} else {
for(const auto& map_kv: index_topster->kv_map) {
agg_topster->add(map_kv.second);
}
}
}
Option<bool> Index::search_all_candidates(const size_t num_search_fields,
const text_match_type_t match_type,
const std::vector<search_field_t>& the_fields,
filter_result_iterator_t* const filter_result_iterator,
const uint32_t* excluded_result_ids, size_t excluded_result_ids_size,
const std::unordered_set<uint32_t>& excluded_group_ids,
const std::vector<sort_by>& sort_fields,
std::vector<tok_candidates>& token_candidates_vec,
std::vector<std::vector<art_leaf*>>& searched_queries,
tsl::htrie_map<char, token_leaf>& qtoken_set,
const std::vector<token_t>& dropped_tokens,
Topster*& topster,
spp::sparse_hash_map<uint64_t, uint32_t>& groups_processed,
size_t& num_keyword_matches,
uint32_t*& all_result_ids, size_t& all_result_ids_len,
const size_t typo_tokens_threshold,
const size_t group_limit,
const std::vector<std::string>& group_by_fields,
const bool group_missing_values,
const std::vector<token_t>& query_tokens,
const std::vector<uint32_t>& num_typos,
const std::vector<bool>& prefixes,
bool prioritize_exact_match,
const bool prioritize_token_position,
const bool prioritize_num_matching_fields,
const bool exhaustive_search,
const size_t max_candidates,
int syn_orig_num_tokens,
const int* sort_order,
std::array<spp::sparse_hash_map<uint32_t, int64_t, Hasher32>*, 3>& field_values,
const std::vector<size_t>& geopoint_indices,
std::set<uint64>& query_hashes,
std::vector<uint32_t>& id_buff, const std::string& collection_name) const {
/*if(!token_candidates_vec.empty()) {
LOG(INFO) << "Prefix candidates size: " << token_candidates_vec.back().candidates.size();
LOG(INFO) << "max_candidates: " << max_candidates;
LOG(INFO) << "token_candidates_vec.size(): " << token_candidates_vec.size();
}*/
auto product = []( long long a, tok_candidates & b ) { return a*b.candidates.size(); };
long long int N = std::accumulate(token_candidates_vec.begin(), token_candidates_vec.end(), 1LL, product);
// escape hatch to prevent too much looping but subject to being overriden explicitly via `max_candidates`
long long combination_limit = (num_search_fields == 1 && prefixes[0]) ? max_candidates :
std::max<size_t>(Index::COMBINATION_MIN_LIMIT, max_candidates);
for(long long n = 0; n < N && n < combination_limit; ++n) {
RETURN_CIRCUIT_BREAKER_OP
std::vector<token_t> query_suggestion(token_candidates_vec.size());
uint64 qhash;
uint32_t total_cost = next_suggestion2(token_candidates_vec, n, query_suggestion, qhash);
/*LOG(INFO) << "n: " << n;
std::stringstream fullq;
for(const auto& qtok : query_suggestion) {
fullq << qtok.value << " ";
}
LOG(INFO) << "query: " << fullq.str() << ", total_cost: " << total_cost
<< ", all_result_ids_len: " << all_result_ids_len << ", bufsiz: " << id_buff.size();*/
if(query_hashes.find(qhash) != query_hashes.end()) {
// skip this query since it has already been processed before
//LOG(INFO) << "Skipping qhash " << qhash;
continue;
}
//LOG(INFO) << "field_num_results: " << field_num_results << ", typo_tokens_threshold: " << typo_tokens_threshold;
auto search_across_fields_op = search_across_fields(query_suggestion, num_typos, prefixes, the_fields,
num_search_fields, match_type,
sort_fields, topster,groups_processed,
searched_queries, qtoken_set, dropped_tokens,
group_limit, group_by_fields, group_missing_values,
prioritize_exact_match, prioritize_token_position,
prioritize_num_matching_fields,
filter_result_iterator,
total_cost, syn_orig_num_tokens,
excluded_result_ids, excluded_result_ids_size,
excluded_group_ids,
sort_order, field_values, geopoint_indices,
id_buff, num_keyword_matches,
all_result_ids, all_result_ids_len,
collection_name);
if (!search_across_fields_op.ok()) {
return search_across_fields_op;
}
query_hashes.insert(qhash);
filter_result_iterator->reset();
search_cutoff = search_cutoff || filter_result_iterator->validity == filter_result_iterator_t::timed_out;
}
return Option<bool>(true);
}
bool Index::field_is_indexed(const std::string& field_name) const {
return search_index.count(field_name) != 0 ||
numerical_index.count(field_name) != 0 ||
range_index.count(field_name) != 0 ||
geo_range_index.count(field_name) != 0;
}
Option<bool> Index::do_filtering_with_lock(filter_node_t* const filter_tree_root,
filter_result_t& filter_result,
const std::string& collection_name,
const bool& should_timeout) const {
std::shared_lock lock(mutex);
auto filter_result_iterator = filter_result_iterator_t(collection_name, this, filter_tree_root, false,
DEFAULT_FILTER_BY_CANDIDATES,
search_begin_us, should_timeout ? search_stop_us : UINT64_MAX);
auto filter_init_op = filter_result_iterator.init_status();
if (!filter_init_op.ok()) {
return filter_init_op;
}
filter_result_iterator.compute_iterators();
if (filter_result_iterator.approx_filter_ids_length == 0) {
return Option(true);
}
if (filter_result_iterator.reference.empty()) {
filter_result.count = filter_result_iterator.to_filter_id_array(filter_result.docs);
return Option(true);
}
uint32_t count = filter_result_iterator.approx_filter_ids_length, dummy;
auto ref_filter_result = new filter_result_t();
std::unique_ptr<filter_result_t> ref_filter_result_guard(ref_filter_result);
filter_result_iterator.get_n_ids(count, dummy, nullptr, 0, ref_filter_result);
if (filter_result_iterator.validity == filter_result_iterator_t::timed_out) {
return Option<bool>(true);
}
filter_result = std::move(*ref_filter_result);
return Option(true);
}
void aggregate_nested_references(single_filter_result_t *const reference_result,
reference_filter_result_t& ref_filter_result) {
// Add reference doc id in result.
auto temp_docs = new uint32_t[ref_filter_result.count + 1];
std::copy(ref_filter_result.docs, ref_filter_result.docs + ref_filter_result.count, temp_docs);
temp_docs[ref_filter_result.count] = reference_result->seq_id;
delete[] ref_filter_result.docs;
ref_filter_result.docs = temp_docs;
ref_filter_result.count++;
ref_filter_result.is_reference_array_field = false;
// Add references of the reference doc id in result.
auto& references = ref_filter_result.coll_to_references;
auto temp_references = new std::map<std::string, reference_filter_result_t>[ref_filter_result.count] {};
for (uint32_t i = 0; i < ref_filter_result.count - 1; i++) {
temp_references[i] = std::move(references[i]);
}
temp_references[ref_filter_result.count - 1] = std::move(reference_result->reference_filter_results);
delete[] references;
references = temp_references;
}
Option<bool> Index::do_reference_filtering_with_lock(filter_node_t* const filter_tree_root,
filter_result_t& filter_result,
const std::string& ref_collection_name,
const std::string& field_name) const {
std::shared_lock lock(mutex);
auto ref_filter_result_iterator = filter_result_iterator_t(ref_collection_name, this, filter_tree_root, false,
DEFAULT_FILTER_BY_CANDIDATES,
search_begin_us, search_stop_us);
auto filter_init_op = ref_filter_result_iterator.init_status();
if (!filter_init_op.ok()) {
return filter_init_op;
}
ref_filter_result_iterator.compute_iterators();
if (ref_filter_result_iterator.approx_filter_ids_length == 0) {
return Option(true);
}
uint32_t count = ref_filter_result_iterator.approx_filter_ids_length, dummy;
auto ref_filter_result = new filter_result_t();
std::unique_ptr<filter_result_t> ref_filter_result_guard(ref_filter_result);
ref_filter_result_iterator.get_n_ids(count, dummy, nullptr, 0, ref_filter_result);
if (ref_filter_result_iterator.validity == filter_result_iterator_t::timed_out) {
return Option<bool>(true);
}
uint32_t* reference_docs = ref_filter_result->docs;
ref_filter_result->docs = nullptr;
std::unique_ptr<uint32_t[]> docs_guard(reference_docs);
auto const reference_helper_field_name = field_name + fields::REFERENCE_HELPER_FIELD_SUFFIX;
auto const is_nested_join = !ref_filter_result_iterator.reference.empty();
if (search_schema.at(reference_helper_field_name).is_singular()) { // Only one reference per doc.
if (sort_index.count(reference_helper_field_name) == 0) {
return Option<bool>(400, "`" + reference_helper_field_name + "` is not present in sort index.");
}
auto const& ref_index = *sort_index.at(reference_helper_field_name);
if (is_nested_join) {
// In case of nested join, we need to collect all the doc ids from the reference ids along with their references.
std::vector<std::pair<uint32_t, single_filter_result_t*>> id_pairs;
std::unordered_set<uint32_t> unique_doc_ids;
for (uint32_t i = 0; i < count; i++) {
auto& reference_doc_id = reference_docs[i];
auto reference_doc_references = std::move(ref_filter_result->coll_to_references[i]);
if (ref_index.count(reference_doc_id) == 0) { // Reference field might be optional.
continue;
}
auto doc_id = ref_index.at(reference_doc_id);
id_pairs.emplace_back(std::make_pair(doc_id, new single_filter_result_t(reference_doc_id,
std::move(reference_doc_references),
false)));
unique_doc_ids.insert(doc_id);
}
if (id_pairs.empty()) {
return Option(true);
}
std::sort(id_pairs.begin(), id_pairs.end(), [](auto const& left, auto const& right) {
return left.first < right.first;
});
filter_result.count = unique_doc_ids.size();
filter_result.docs = new uint32_t[unique_doc_ids.size()];
filter_result.coll_to_references = new std::map<std::string, reference_filter_result_t>[unique_doc_ids.size()] {};
reference_filter_result_t previous_doc_references;
for (uint32_t i = 0, previous_doc = id_pairs[0].first + 1, result_index = 0; i < id_pairs.size(); i++) {
auto const& current_doc = id_pairs[i].first;
auto& reference_result = id_pairs[i].second;
if (current_doc != previous_doc) {
filter_result.docs[result_index] = current_doc;
if (result_index > 0) {
std::map<std::string, reference_filter_result_t> references;
references[ref_collection_name] = std::move(previous_doc_references);
filter_result.coll_to_references[result_index - 1] = std::move(references);
}
result_index++;
previous_doc = current_doc;
aggregate_nested_references(reference_result, previous_doc_references);
} else {
aggregate_nested_references(reference_result, previous_doc_references);
}
}
if (previous_doc_references.count != 0) {
std::map<std::string, reference_filter_result_t> references;
references[ref_collection_name] = std::move(previous_doc_references);
filter_result.coll_to_references[filter_result.count - 1] = std::move(references);
}
for (auto &item: id_pairs) {
delete item.second;
}
return Option(true);
}
// Collect all the doc ids from the reference ids.
std::vector<std::pair<uint32_t, uint32_t>> id_pairs;
std::unordered_set<uint32_t> unique_doc_ids;
for (uint32_t i = 0; i < count; i++) {
auto& reference_doc_id = reference_docs[i];
if (ref_index.count(reference_doc_id) == 0) { // Reference field might be optional.
continue;
}
auto doc_id = ref_index.at(reference_doc_id);
if (doc_id == Index::reference_helper_sentinel_value) {
continue;
}
id_pairs.emplace_back(std::make_pair(doc_id, reference_doc_id));
unique_doc_ids.insert(doc_id);
}
if (id_pairs.empty()) {
return Option(true);
}
std::sort(id_pairs.begin(), id_pairs.end(), [](auto const& left, auto const& right) {
return left.first < right.first;
});
filter_result.count = unique_doc_ids.size();
filter_result.docs = new uint32_t[unique_doc_ids.size()];
filter_result.coll_to_references = new std::map<std::string, reference_filter_result_t>[unique_doc_ids.size()] {};
std::vector<uint32_t> previous_doc_references;
for (uint32_t i = 0, previous_doc = id_pairs[0].first + 1, result_index = 0; i < id_pairs.size(); i++) {
auto const& current_doc = id_pairs[i].first;
auto const& reference_doc_id = id_pairs[i].second;
if (current_doc != previous_doc) {
filter_result.docs[result_index] = current_doc;
if (result_index > 0) {
auto& reference_result = filter_result.coll_to_references[result_index - 1];
auto r = reference_filter_result_t(previous_doc_references.size(),
new uint32_t[previous_doc_references.size()],
false);
std::copy(previous_doc_references.begin(), previous_doc_references.end(), r.docs);
reference_result[ref_collection_name] = std::move(r);
previous_doc_references.clear();
}
result_index++;
previous_doc = current_doc;
previous_doc_references.push_back(reference_doc_id);
} else {
previous_doc_references.push_back(reference_doc_id);
}
}
if (!previous_doc_references.empty()) {
auto& reference_result = filter_result.coll_to_references[filter_result.count - 1];
auto r = reference_filter_result_t(previous_doc_references.size(),
new uint32_t[previous_doc_references.size()],
false);
std::copy(previous_doc_references.begin(), previous_doc_references.end(), r.docs);
reference_result[ref_collection_name] = std::move(r);
}
return Option(true);
}
// Multiple references per doc.
if (reference_index.count(reference_helper_field_name) == 0) {
return Option<bool>(400, "`" + reference_helper_field_name + "` is not present in reference index.");
}
auto& ref_index = *reference_index.at(reference_helper_field_name);
if (is_nested_join) {
// In case of nested join, we need to collect all the doc ids from the reference ids along with their references.
std::vector<std::pair<uint32_t, single_filter_result_t*>> id_pairs;
std::unordered_set<uint32_t> unique_doc_ids;
for (uint32_t i = 0; i < count; i++) {
auto& reference_doc_id = reference_docs[i];
auto reference_doc_references = std::move(ref_filter_result->coll_to_references[i]);
size_t doc_ids_len = 0;
uint32_t* doc_ids = nullptr;
ref_index.search(EQUALS, reference_doc_id, &doc_ids, doc_ids_len);
for (size_t j = 0; j < doc_ids_len; j++) {
auto doc_id = doc_ids[j];
auto reference_doc_references_copy = reference_doc_references;
id_pairs.emplace_back(std::make_pair(doc_id, new single_filter_result_t(reference_doc_id,
std::move(reference_doc_references_copy),
false)));
unique_doc_ids.insert(doc_id);
}
delete[] doc_ids;
}
if (id_pairs.empty()) {
return Option(true);
}
std::sort(id_pairs.begin(), id_pairs.end(), [](auto const& left, auto const& right) {
return left.first < right.first;
});
filter_result.count = unique_doc_ids.size();
filter_result.docs = new uint32_t[unique_doc_ids.size()];
filter_result.coll_to_references = new std::map<std::string, reference_filter_result_t>[unique_doc_ids.size()] {};
reference_filter_result_t previous_doc_references;
for (uint32_t i = 0, previous_doc = id_pairs[0].first + 1, result_index = 0; i < id_pairs.size(); i++) {
auto const& current_doc = id_pairs[i].first;
auto& reference_result = id_pairs[i].second;
if (current_doc != previous_doc) {
filter_result.docs[result_index] = current_doc;
if (result_index > 0) {
std::map<std::string, reference_filter_result_t> references;
references[ref_collection_name] = std::move(previous_doc_references);
filter_result.coll_to_references[result_index - 1] = std::move(references);
}
result_index++;
previous_doc = current_doc;
aggregate_nested_references(reference_result, previous_doc_references);
} else {
aggregate_nested_references(reference_result, previous_doc_references);
}
}
if (previous_doc_references.count != 0) {
std::map<std::string, reference_filter_result_t> references;
references[ref_collection_name] = std::move(previous_doc_references);
filter_result.coll_to_references[filter_result.count - 1] = std::move(references);
}
for (auto &item: id_pairs) {
delete item.second;
}
return Option<bool>(true);
}
std::vector<std::pair<uint32_t, uint32_t>> id_pairs;
std::unordered_set<uint32_t> unique_doc_ids;
for (uint32_t i = 0; i < count; i++) {
auto& reference_doc_id = reference_docs[i];
size_t doc_ids_len = 0;
uint32_t* doc_ids = nullptr;
ref_index.search(EQUALS, reference_doc_id, &doc_ids, doc_ids_len);
for (size_t j = 0; j < doc_ids_len; j++) {
auto doc_id = doc_ids[j];
id_pairs.emplace_back(std::make_pair(doc_id, reference_doc_id));
unique_doc_ids.insert(doc_id);
}
delete[] doc_ids;
}
if (id_pairs.empty()) {
return Option(true);
}
std::sort(id_pairs.begin(), id_pairs.end(), [](auto const& left, auto const& right) {
return left.first < right.first;
});
filter_result.count = unique_doc_ids.size();
filter_result.docs = new uint32_t[unique_doc_ids.size()];
filter_result.coll_to_references = new std::map<std::string, reference_filter_result_t>[unique_doc_ids.size()] {};
std::vector<uint32_t> previous_doc_references;
for (uint32_t i = 0, previous_doc = id_pairs[0].first + 1, result_index = 0; i < id_pairs.size(); i++) {
auto const& current_doc = id_pairs[i].first;
auto const& reference_doc_id = id_pairs[i].second;
if (current_doc != previous_doc) {
filter_result.docs[result_index] = current_doc;
if (result_index > 0) {
auto& reference_result = filter_result.coll_to_references[result_index - 1];
auto r = reference_filter_result_t(previous_doc_references.size(), new uint32_t[previous_doc_references.size()]);
std::copy(previous_doc_references.begin(), previous_doc_references.end(), r.docs);
reference_result[ref_collection_name] = std::move(r);
previous_doc_references.clear();
}
result_index++;
previous_doc = current_doc;
previous_doc_references.push_back(reference_doc_id);
} else {
previous_doc_references.push_back(reference_doc_id);
}
}
if (!previous_doc_references.empty()) {
auto& reference_result = filter_result.coll_to_references[filter_result.count - 1];
auto r = reference_filter_result_t(previous_doc_references.size(), new uint32_t[previous_doc_references.size()]);
std::copy(previous_doc_references.begin(), previous_doc_references.end(), r.docs);
reference_result[ref_collection_name] = std::move(r);
}
return Option(true);
}
Option<filter_result_t> Index::do_filtering_with_reference_ids(const std::string& field_name,
const std::string& ref_collection_name,
filter_result_t&& ref_filter_result) const {
filter_result_t filter_result;
auto const& count = ref_filter_result.count;
auto const& reference_docs = ref_filter_result.docs;
auto const is_nested_join = ref_filter_result.coll_to_references != nullptr;
if (count == 0) {
return Option<filter_result_t>(filter_result);
}
auto const reference_helper_field_name = field_name + fields::REFERENCE_HELPER_FIELD_SUFFIX;
if (numerical_index.count(reference_helper_field_name) == 0) {
return Option<filter_result_t>(400, "`" + reference_helper_field_name + "` is not present in index.");
}
auto num_tree = numerical_index.at(reference_helper_field_name);
if (is_nested_join) {
// In case of nested join, we need to collect all the doc ids from the reference ids along with their references.
std::vector<std::pair<uint32_t, single_filter_result_t*>> id_pairs;
std::unordered_set<uint32_t> unique_doc_ids;
for (uint32_t i = 0; i < count; i++) {
auto& reference_doc_id = reference_docs[i];
auto reference_doc_references = std::move(ref_filter_result.coll_to_references[i]);
size_t doc_ids_len = 0;
uint32_t* doc_ids = nullptr;
num_tree->search(NUM_COMPARATOR::EQUALS, reference_doc_id, &doc_ids, doc_ids_len);
for (size_t j = 0; j < doc_ids_len; j++) {
auto doc_id = doc_ids[j];
auto reference_doc_references_copy = reference_doc_references;
id_pairs.emplace_back(std::make_pair(doc_id, new single_filter_result_t(reference_doc_id,
std::move(reference_doc_references_copy),
false)));
unique_doc_ids.insert(doc_id);
}
delete[] doc_ids;
}
if (id_pairs.empty()) {
return Option(filter_result);
}
std::sort(id_pairs.begin(), id_pairs.end(), [](auto const& left, auto const& right) {
return left.first < right.first;
});
filter_result.count = unique_doc_ids.size();
filter_result.docs = new uint32_t[unique_doc_ids.size()];
filter_result.coll_to_references = new std::map<std::string, reference_filter_result_t>[unique_doc_ids.size()] {};
reference_filter_result_t previous_doc_references;
for (uint32_t i = 0, previous_doc = id_pairs[0].first + 1, result_index = 0; i < id_pairs.size(); i++) {
auto const& current_doc = id_pairs[i].first;
auto& reference_result = id_pairs[i].second;
if (current_doc != previous_doc) {
filter_result.docs[result_index] = current_doc;
if (result_index > 0) {
std::map<std::string, reference_filter_result_t> references;
references[ref_collection_name] = std::move(previous_doc_references);
filter_result.coll_to_references[result_index - 1] = std::move(references);
}
result_index++;
previous_doc = current_doc;
aggregate_nested_references(reference_result, previous_doc_references);
} else {
aggregate_nested_references(reference_result, previous_doc_references);
}
}
if (previous_doc_references.count != 0) {
std::map<std::string, reference_filter_result_t> references;
references[ref_collection_name] = std::move(previous_doc_references);
filter_result.coll_to_references[filter_result.count - 1] = std::move(references);
}
for (auto &item: id_pairs) {
delete item.second;
}
return Option<filter_result_t>(filter_result);
}
// Collect all the doc ids from the reference ids.
std::vector<std::pair<uint32_t, uint32_t>> id_pairs;
std::unordered_set<uint32_t> unique_doc_ids;
for (uint32_t i = 0; i < count; i++) {
auto& reference_doc_id = reference_docs[i];
size_t doc_ids_len = 0;
uint32_t* doc_ids = nullptr;
num_tree->search(NUM_COMPARATOR::EQUALS, reference_doc_id, &doc_ids, doc_ids_len);
for (size_t j = 0; j < doc_ids_len; j++) {
auto doc_id = doc_ids[j];
id_pairs.emplace_back(std::make_pair(doc_id, reference_doc_id));
unique_doc_ids.insert(doc_id);
}
delete[] doc_ids;
}
if (id_pairs.empty()) {
return Option(filter_result);
}
std::sort(id_pairs.begin(), id_pairs.end(), [](auto const& left, auto const& right) {
return left.first < right.first;
});
filter_result.count = unique_doc_ids.size();
filter_result.docs = new uint32_t[unique_doc_ids.size()];
filter_result.coll_to_references = new std::map<std::string, reference_filter_result_t>[unique_doc_ids.size()] {};
std::vector<uint32_t> previous_doc_references;
for (uint32_t i = 0, previous_doc = id_pairs[0].first + 1, result_index = 0; i < id_pairs.size(); i++) {
auto const& current_doc = id_pairs[i].first;
auto const& reference_doc_id = id_pairs[i].second;
if (current_doc != previous_doc) {
filter_result.docs[result_index] = current_doc;
if (result_index > 0) {
auto& reference_result = filter_result.coll_to_references[result_index - 1];
auto r = reference_filter_result_t(previous_doc_references.size(),
new uint32_t[previous_doc_references.size()],
false);
std::copy(previous_doc_references.begin(), previous_doc_references.end(), r.docs);
reference_result[ref_collection_name] = std::move(r);
previous_doc_references.clear();
}
result_index++;
previous_doc = current_doc;
previous_doc_references.push_back(reference_doc_id);
} else {
previous_doc_references.push_back(reference_doc_id);
}
}
if (!previous_doc_references.empty()) {
auto& reference_result = filter_result.coll_to_references[filter_result.count - 1];
auto r = reference_filter_result_t(previous_doc_references.size(),
new uint32_t[previous_doc_references.size()],
false);
std::copy(previous_doc_references.begin(), previous_doc_references.end(), r.docs);
reference_result[ref_collection_name] = std::move(r);
}
return Option<filter_result_t>(filter_result);
}
Option<bool> Index::run_search(search_args* search_params, const std::string& collection_name,
const std::vector<facet_index_type_t>& facet_index_types, bool enable_typos_for_numerical_tokens,
bool enable_synonyms, bool synonym_prefix, uint32_t synonym_num_typos,
bool enable_typos_for_alpha_numerical_tokens, bool rerank_hybrid_matches) {
auto res = search(search_params->field_query_tokens,
search_params->search_fields,
search_params->match_type,
search_params->filter_tree_root, search_params->facets, search_params->facet_query,
search_params->max_facet_values,
search_params->included_ids, search_params->excluded_ids,
search_params->sort_fields_std, search_params->num_typos,
search_params->topster, search_params->curated_topster,
search_params->fetch_size,
search_params->per_page, search_params->offset, search_params->token_order,
search_params->prefixes, search_params->drop_tokens_threshold,
search_params->all_result_ids_len, search_params->groups_processed,
search_params->searched_queries,
search_params->qtoken_set,
search_params->raw_result_kvs, search_params->override_result_kvs,
search_params->typo_tokens_threshold,
search_params->group_limit,
search_params->group_by_fields,
search_params->group_missing_values,
search_params->default_sorting_field,
search_params->prioritize_exact_match,
search_params->prioritize_token_position,
search_params->prioritize_num_matching_fields,
search_params->exhaustive_search,
search_params->concurrency,
search_params->search_cutoff_ms,
search_params->min_len_1typo,
search_params->min_len_2typo,
search_params->max_candidates,
search_params->infixes,
search_params->max_extra_prefix,
search_params->max_extra_suffix,
search_params->facet_query_num_typos,
search_params->filter_curated_hits,
search_params->split_join_tokens,
search_params->vector_query,
search_params->facet_sample_percent,
search_params->facet_sample_threshold,
collection_name,
search_params->drop_tokens_mode,
facet_index_types,
enable_typos_for_numerical_tokens,
enable_synonyms,
synonym_prefix,
synonym_num_typos,
search_params->enable_lazy_filter,
enable_typos_for_alpha_numerical_tokens,
search_params->max_filter_by_candidates,
rerank_hybrid_matches
);
return res;
}
void Index::collate_included_ids(const std::vector<token_t>& q_included_tokens,
const std::map<size_t, std::map<size_t, uint32_t>> & included_ids_map,
Topster*& curated_topster,
std::vector<std::vector<art_leaf*>> & searched_queries) const {
if(included_ids_map.empty()) {
return;
}
for(const auto& pos_ids: included_ids_map) {
const size_t outer_pos = pos_ids.first;
for(const auto& index_seq_id: pos_ids.second) {
uint32_t inner_pos = index_seq_id.first;
uint32_t seq_id = index_seq_id.second;
uint64_t distinct_id = outer_pos; // outer pos is the group distinct key
uint64_t match_score = (64000 - outer_pos - inner_pos); // both outer pos and inner pos inside group
// LOG(INFO) << "seq_id: " << seq_id << " - " << match_score;
int64_t scores[3];
scores[0] = match_score;
scores[1] = int64_t(1);
scores[2] = int64_t(1);
KV kv(0, seq_id, distinct_id, 0, scores);
curated_topster->add(&kv);
}
}
}
void Index::concat_topster_ids(Topster*& topster, spp::sparse_hash_map<uint64_t, std::vector<KV*>>& topster_ids) {
if(topster->distinct) {
for(auto &group_topster_entry: topster->group_kv_map) {
Topster* group_topster = group_topster_entry.second;
for(const auto& map_kv: group_topster->kv_map) {
topster_ids[map_kv.first].push_back(map_kv.second);
}
}
} else {
for(const auto& map_kv: topster->kv_map) {
//LOG(INFO) << "map_kv.second.key: " << map_kv.second->key;
//LOG(INFO) << "map_kv.first: " << map_kv.first;
topster_ids[map_kv.first].push_back(map_kv.second);
}
}
}
bool Index::static_filter_query_eval(const override_t* override,
std::vector<std::string>& tokens,
filter_node_t*& filter_tree_root) const {
std::string query = StringUtils::join(tokens, " ");
bool tag_matched = (!override->rule.tags.empty() && override->rule.filter_by.empty() &&
override->rule.query.empty());
bool wildcard_tag_matched = (override->rule.tags.size() == 1 && *override->rule.tags.begin() == "*");
if (tag_matched || wildcard_tag_matched ||
(override->rule.match == override_t::MATCH_EXACT && override->rule.normalized_query == query) ||
(override->rule.match == override_t::MATCH_CONTAINS &&
StringUtils::contains_word(query, override->rule.normalized_query))) {
filter_node_t* new_filter_tree_root = nullptr;
Option<bool> filter_op = filter::parse_filter_query(override->filter_by, search_schema,
store, "", new_filter_tree_root);
if (filter_op.ok()) {
if (filter_tree_root == nullptr) {
filter_tree_root = new_filter_tree_root;
} else {
auto root = new filter_node_t(AND, filter_tree_root,
new_filter_tree_root);
filter_tree_root = root;
}
return true;
} else {
delete new_filter_tree_root;
}
}
return false;
}
bool Index::resolve_override(const std::vector<std::string>& rule_tokens, const bool exact_rule_match,
const std::vector<std::string>& query_tokens,
token_ordering token_order, std::set<std::string>& absorbed_tokens,
std::string& filter_by_clause, bool enable_typos_for_numerical_tokens,
bool enable_typos_for_alpha_numerical_tokens) const {
bool resolved_override = false;
size_t i = 0, j = 0;
std::unordered_map<std::string, std::vector<std::string>> field_placeholder_tokens;
while(i < rule_tokens.size()) {
if(rule_tokens[i].front() == '{' && rule_tokens[i].back() == '}') {
// found a field placeholder
std::vector<std::string> field_names;
std::string rule_part = rule_tokens[i];
field_names.emplace_back(rule_part.erase(0, 1).erase(rule_part.size() - 1));
// skip until we find a non-placeholder token
i++;
while(i < rule_tokens.size() && (rule_tokens[i].front() == '{' && rule_tokens[i].back() == '}')) {
rule_part = rule_tokens[i];
field_names.emplace_back(rule_part.erase(0, 1).erase(rule_part.size() - 1));
i++;
}
std::vector<std::string> matched_tokens;
// `i` now points to either end of array or at a non-placeholder rule token
// end of array: add remaining query tokens as matched tokens
// non-placeholder: skip query tokens until it matches a rule token
while(j < query_tokens.size() && (i == rule_tokens.size() || rule_tokens[i] != query_tokens[j])) {
matched_tokens.emplace_back(query_tokens[j]);
j++;
}
resolved_override = true;
// we try to map `field_names` against `matched_tokens` now
for(size_t findex = 0; findex < field_names.size(); findex++) {
const auto& field_name = field_names[findex];
bool slide_window = (findex == 0); // fields following another field should match exactly
std::vector<std::string> field_absorbed_tokens;
resolved_override &= check_for_overrides(token_order, field_name, slide_window,
exact_rule_match, matched_tokens, absorbed_tokens,
field_absorbed_tokens, enable_typos_for_numerical_tokens,
enable_typos_for_alpha_numerical_tokens);
if(!resolved_override) {
goto RETURN_EARLY;
}
field_placeholder_tokens[field_name] = field_absorbed_tokens;
}
} else {
// rule token is not a placeholder, so we have to skip the query tokens until it matches rule token
while(j < query_tokens.size() && query_tokens[j] != rule_tokens[i]) {
if(exact_rule_match) {
// a single mismatch is enough to fail exact match
return false;
}
j++;
}
// either we have exhausted all query tokens
if(j == query_tokens.size()) {
return false;
}
// or query token matches rule token, so we can proceed
i++;
j++;
}
}
RETURN_EARLY:
if(!resolved_override || (exact_rule_match && query_tokens.size() != absorbed_tokens.size())) {
return false;
}
// replace placeholder with field_absorbed_tokens in rule_tokens
for(const auto& kv: field_placeholder_tokens) {
std::string pattern = "{" + kv.first + "}";
std::string replacement = StringUtils::join(kv.second, " ");
StringUtils::replace_all(filter_by_clause, pattern, replacement);
}
return true;
}
void Index::process_filter_overrides(const std::vector<const override_t*>& filter_overrides,
std::vector<std::string>& query_tokens,
token_ordering token_order,
filter_node_t*& filter_tree_root,
std::vector<const override_t*>& matched_dynamic_overrides,
nlohmann::json& override_metadata,
bool enable_typos_for_numerical_tokens,
bool enable_typos_for_alpha_numerical_tokens) const {
std::shared_lock lock(mutex);
for (auto& override : filter_overrides) {
if (!override->rule.dynamic_query) {
// Simple static filtering: add to filter_by and rewrite query if needed.
// Check the original query and then the synonym variants until a rule matches.
bool resolved_override = static_filter_query_eval(override, query_tokens, filter_tree_root);
if (resolved_override) {
if(override_metadata.empty()) {
override_metadata = override->metadata;
}
if (override->remove_matched_tokens) {
std::vector<std::string> rule_tokens;
Tokenizer(override->rule.query, true).tokenize(rule_tokens);
std::set<std::string> rule_token_set(rule_tokens.begin(), rule_tokens.end());
remove_matched_tokens(query_tokens, rule_token_set);
}
if (override->stop_processing) {
return;
}
}
} else {
// need to extract placeholder field names from the search query, filter on them and rewrite query
// we will cover both original query and synonyms
std::vector<std::string> rule_parts;
StringUtils::split(override->rule.normalized_query, rule_parts, " ");
bool exact_rule_match = override->rule.match == override_t::MATCH_EXACT;
std::string filter_by_clause = override->filter_by;
std::set<std::string> absorbed_tokens;
bool resolved_override = resolve_override(rule_parts, exact_rule_match, query_tokens,
token_order, absorbed_tokens, filter_by_clause,
enable_typos_for_numerical_tokens,
enable_typos_for_alpha_numerical_tokens);
if (resolved_override) {
if(override_metadata.empty()) {
override_metadata = override->metadata;
}
filter_node_t* new_filter_tree_root = nullptr;
Option<bool> filter_op = filter::parse_filter_query(filter_by_clause, search_schema,
store, "", new_filter_tree_root);
if (filter_op.ok()) {
// have to ensure that dropped hits take precedence over added hits
matched_dynamic_overrides.push_back(override);
if (override->remove_matched_tokens) {
std::vector<std::string>& tokens = query_tokens;
remove_matched_tokens(tokens, absorbed_tokens);
}
if (filter_tree_root == nullptr) {
filter_tree_root = new_filter_tree_root;
} else {
filter_node_t* root = new filter_node_t(AND, filter_tree_root,
new_filter_tree_root);
filter_tree_root = root;
}
} else {
delete new_filter_tree_root;
}
if (override->stop_processing) {
return;
}
}
}
}
}
void Index::remove_matched_tokens(std::vector<std::string>& tokens, const std::set<std::string>& rule_token_set) {
std::vector<std::string> new_tokens;
for(std::string& token: tokens) {
if(rule_token_set.count(token) == 0) {
new_tokens.push_back(token);
}
}
if(new_tokens.empty()) {
tokens = {"*"};
} else {
tokens = new_tokens;
}
}
bool Index::check_for_overrides(const token_ordering& token_order, const string& field_name, const bool slide_window,
bool exact_rule_match, std::vector<std::string>& tokens,
std::set<std::string>& absorbed_tokens,
std::vector<std::string>& field_absorbed_tokens,
bool enable_typos_for_numerical_tokens,
bool enable_typos_for_alpha_numerical_tokens) const {
for(size_t window_len = tokens.size(); window_len > 0; window_len--) {
for(size_t start_index = 0; start_index+window_len-1 < tokens.size(); start_index++) {
std::vector<token_t> window_tokens;
std::set<std::string> window_tokens_set;
for (size_t i = start_index; i < start_index + window_len; i++) {
bool is_prefix = (i == (start_index + window_len - 1));
window_tokens.emplace_back(i, tokens[i], is_prefix, tokens[i].size(), 0);
window_tokens_set.emplace(tokens[i]);
}
std::vector<facet> facets;
std::vector<std::vector<art_leaf*>> searched_queries;
Topster* topster = nullptr;
spp::sparse_hash_map<uint64_t, uint32_t> groups_processed;
uint32_t* result_ids = nullptr;
size_t result_ids_len = 0;
size_t field_num_results = 0;
std::vector<std::string> group_by_fields;
std::set<uint64> query_hashes;
size_t num_toks_dropped = 0;
auto field_it = search_schema.find(field_name);
if(field_it == search_schema.end()) {
continue;
}
std::vector<sort_by> sort_fields;
std::vector<search_field_t> fq_fields;
fq_fields.emplace_back(field_name, field_it.value().faceted_name(), 1, 0, false, enable_t::off);
uint32_t* filter_ids = nullptr;
filter_result_iterator_t filter_result_it(filter_ids, 0);
std::array<spp::sparse_hash_map<uint32_t, int64_t, Hasher32>*, 3> field_values{};
const std::vector<size_t> geopoint_indices;
tsl::htrie_map<char, token_leaf> qtoken_set;
auto fuzzy_search_fields_op = fuzzy_search_fields(
fq_fields, window_tokens, {}, text_match_type_t::max_score, nullptr, 0,
&filter_result_it, {}, {}, sort_fields, {0}, searched_queries,
qtoken_set, topster, groups_processed, result_ids, result_ids_len,
0, group_by_fields, false, true, false, false, query_hashes, MAX_SCORE, {false}, 1,
false, 4, 3, 7, 0, nullptr, field_values, geopoint_indices, "", true);
if(!fuzzy_search_fields_op.ok()) {
continue;
}
if(result_ids_len != 0) {
// we need to narrow onto the exact matches
std::vector<void*> posting_lists;
art_tree* t = search_index.at(field_name);
for(auto& w_token: window_tokens) {
art_leaf* leaf = (art_leaf *) art_search(t, (const unsigned char*) w_token.value.c_str(),
w_token.value.length()+1);
if(leaf == nullptr) {
continue;
}
posting_lists.push_back(leaf->values);
}
uint32_t* exact_strt_ids = new uint32_t[result_ids_len];
size_t exact_strt_size = 0;
posting_t::get_exact_matches(posting_lists, field_it.value().is_array(), result_ids, result_ids_len,
exact_strt_ids, exact_strt_size);
delete [] result_ids;
delete [] exact_strt_ids;
if(exact_strt_size != 0) {
// remove window_tokens from `tokens`
std::vector<std::string> new_tokens;
for(size_t new_i = start_index; new_i < tokens.size(); new_i++) {
const auto& token = tokens[new_i];
if(window_tokens_set.count(token) == 0) {
new_tokens.emplace_back(token);
} else {
absorbed_tokens.insert(token);
field_absorbed_tokens.emplace_back(token);
}
}
tokens = new_tokens;
return true;
}
}
if(!slide_window) {
break;
}
}
}
return false;
}
Option<bool> Index::search_infix(const std::string& query, const std::string& field_name, std::vector<uint32_t>& ids,
const size_t max_extra_prefix, const size_t max_extra_suffix) const {
auto infix_maps_it = infix_index.find(field_name);
if(infix_maps_it == infix_index.end()) {
return Option<bool>(400, "Could not find `" + field_name + "` in the infix index. Make sure to enable infix "
"search by specifying `infix: true` in the schema.");
}
auto infix_sets = infix_maps_it->second;
std::vector<art_leaf*> leaves;
size_t num_processed = 0;
std::mutex m_process;
std::condition_variable cv_process;
auto search_tree = search_index.at(field_name);
const auto parent_search_begin = search_begin_us;
const auto parent_search_stop_ms = search_stop_us;
auto parent_search_cutoff = search_cutoff;
for(auto infix_set: infix_sets) {
thread_pool->enqueue([infix_set, &leaves, search_tree, &query, max_extra_prefix, max_extra_suffix,
&num_processed, &m_process, &cv_process,
&parent_search_begin, &parent_search_stop_ms, &parent_search_cutoff]() {
search_begin_us = parent_search_begin;
search_cutoff = false;
auto op_search_stop_ms = parent_search_stop_ms/2;
std::vector<art_leaf*> this_leaves;
std::string key_buffer;
size_t num_iterated = 0;
for(auto it = infix_set->begin(); it != infix_set->end(); it++) {
it.key(key_buffer);
num_iterated++;
auto start_index = key_buffer.find(query);
if(start_index != std::string::npos && start_index <= max_extra_prefix &&
(key_buffer.size() - (start_index + query.size())) <= max_extra_suffix) {
art_leaf* l = (art_leaf *) art_search(search_tree,
(const unsigned char *) key_buffer.c_str(),
key_buffer.size()+1);
if(l != nullptr) {
this_leaves.push_back(l);
}
}
// check for search cutoff but only once every 2^10 docs to reduce overhead
if(((num_iterated + 1) % (1 << 12)) == 0) {
if ((std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::system_clock::now().
time_since_epoch()).count() - search_begin_us) > op_search_stop_ms) {
search_cutoff = true;
break;
}
}
}
std::unique_lock<std::mutex> lock(m_process);
leaves.insert(leaves.end(), this_leaves.begin(), this_leaves.end());
num_processed++;
parent_search_cutoff = parent_search_cutoff || search_cutoff;
cv_process.notify_one();
});
}
std::unique_lock<std::mutex> lock_process(m_process);
cv_process.wait(lock_process, [&](){ return num_processed == infix_sets.size(); });
search_cutoff = parent_search_cutoff;
for(auto leaf: leaves) {
posting_t::merge({leaf->values}, ids);
}
return Option<bool>(true);
}
Option<bool> Index::search(std::vector<query_tokens_t>& field_query_tokens, const std::vector<search_field_t>& the_fields,
const text_match_type_t match_type,
filter_node_t*& filter_tree_root, std::vector<facet>& facets, facet_query_t& facet_query,
const int max_facet_values,
const std::vector<std::pair<uint32_t, uint32_t>>& included_ids,
const std::vector<uint32_t>& excluded_ids, std::vector<sort_by>& sort_fields_std,
const std::vector<uint32_t>& num_typos, Topster*& topster, Topster*& curated_topster,
const size_t fetch_size,
const size_t per_page,
const size_t offset, const token_ordering token_order, const std::vector<bool>& prefixes,
const size_t drop_tokens_threshold, size_t& all_result_ids_len,
spp::sparse_hash_map<uint64_t, uint32_t>& groups_processed,
std::vector<std::vector<art_leaf*>>& searched_queries,
tsl::htrie_map<char, token_leaf>& qtoken_set,
std::vector<std::vector<KV*>>& raw_result_kvs, std::vector<std::vector<KV*>>& override_result_kvs,
const size_t typo_tokens_threshold, const size_t group_limit,
const std::vector<std::string>& group_by_fields,
const bool group_missing_values,
const string& default_sorting_field, bool prioritize_exact_match,
const bool prioritize_token_position, const bool prioritize_num_matching_fields, bool exhaustive_search,
size_t concurrency, size_t search_cutoff_ms, size_t min_len_1typo, size_t min_len_2typo,
size_t max_candidates, const std::vector<enable_t>& infixes, const size_t max_extra_prefix,
const size_t max_extra_suffix, const size_t facet_query_num_typos,
const bool filter_curated_hits, const enable_t split_join_tokens,
const vector_query_t& vector_query,
size_t facet_sample_percent, size_t facet_sample_threshold,
const std::string& collection_name,
const drop_tokens_param_t drop_tokens_mode,
const std::vector<facet_index_type_t>& facet_index_types,
bool enable_typos_for_numerical_tokens,
bool enable_synonyms, bool synonym_prefix,
uint32_t synonym_num_typos,
bool enable_lazy_filter,
bool enable_typos_for_alpha_numerical_tokens, const size_t& max_filter_by_candidates,
bool rerank_hybrid_matches) const {
std::shared_lock lock(mutex);
if(field_query_tokens.empty()) {
// this can happen if missing query_by fields are configured to be ignored
return Option<bool>(true);
}
auto filter_result_iterator = new filter_result_iterator_t(collection_name, this, filter_tree_root,
enable_lazy_filter, max_filter_by_candidates,
search_begin_us, search_stop_us);
std::unique_ptr<filter_result_iterator_t> filter_iterator_guard(filter_result_iterator);
auto filter_init_op = filter_result_iterator->init_status();
if (!filter_init_op.ok()) {
return filter_init_op;
}
#ifdef TEST_BUILD
if (filter_result_iterator->approx_filter_ids_length > 20) {
filter_result_iterator->compute_iterators();
}
#else
if (!enable_lazy_filter || filter_result_iterator->approx_filter_ids_length < COMPUTE_FILTER_ITERATOR_THRESHOLD) {
filter_result_iterator->compute_iterators();
}
#endif
size_t topster_size = std::max<size_t>(fetch_size, DEFAULT_TOPSTER_SIZE);
if(filter_result_iterator->approx_filter_ids_length != 0 && filter_result_iterator->reference.empty()) {
topster_size = std::min<size_t>(topster_size, filter_result_iterator->approx_filter_ids_length);
} else {
topster_size = std::min<size_t>(topster_size, num_seq_ids());
}
topster_size = std::max((size_t)1, topster_size); // needs to be atleast 1 since scoring is mandatory
topster = new Topster(topster_size, group_limit);
curated_topster = new Topster(topster_size, group_limit);
std::set<uint32_t> curated_ids;
std::map<size_t, std::map<size_t, uint32_t>> included_ids_map; // outer pos => inner pos => list of IDs
std::vector<uint32_t> included_ids_vec;
std::unordered_set<uint32_t> excluded_group_ids;
process_curated_ids(included_ids, excluded_ids, group_by_fields, group_limit,
group_missing_values, filter_curated_hits,
filter_result_iterator, curated_ids, included_ids_map,
included_ids_vec, excluded_group_ids);
collate_included_ids({}, included_ids_map, curated_topster, searched_queries);
filter_result_iterator->reset();
search_cutoff = search_cutoff || filter_result_iterator->validity == filter_result_iterator_t::timed_out;
std::vector<uint32_t> curated_ids_sorted(curated_ids.begin(), curated_ids.end());
std::sort(curated_ids_sorted.begin(), curated_ids_sorted.end());
bool const& filter_by_provided = filter_tree_root != nullptr;
bool const& no_filter_by_matches = filter_by_provided && filter_result_iterator->approx_filter_ids_length == 0;
// If curation is not involved and there are no filter matches, return early.
if (curated_ids_sorted.empty() && no_filter_by_matches) {
return Option(true);
}
// Order of `fields` are used to sort results
// auto begin = std::chrono::high_resolution_clock::now();
uint32_t* all_result_ids = nullptr;
const size_t num_search_fields = std::min(the_fields.size(), (size_t) FIELD_LIMIT_NUM);
// handle exclusion of tokens/phrases
uint32_t* exclude_token_ids = nullptr;
size_t exclude_token_ids_size = 0;
handle_exclusion(num_search_fields, field_query_tokens, the_fields, exclude_token_ids, exclude_token_ids_size);
int sort_order[3]; // 1 or -1 based on DESC or ASC respectively
std::array<spp::sparse_hash_map<uint32_t, int64_t, Hasher32>*, 3> field_values;
std::vector<size_t> geopoint_indices;
auto populate_op = populate_sort_mapping(sort_order, geopoint_indices, sort_fields_std, field_values);
if (!populate_op.ok()) {
return populate_op;
}
// Prepare excluded document IDs that we can later remove from the result set
uint32_t* excluded_result_ids = nullptr;
size_t excluded_result_ids_size = ArrayUtils::or_scalar(exclude_token_ids, exclude_token_ids_size,
&curated_ids_sorted[0], curated_ids_sorted.size(),
&excluded_result_ids);
auto is_wildcard_query = !field_query_tokens.empty() && !field_query_tokens[0].q_include_tokens.empty() &&
field_query_tokens[0].q_include_tokens[0].value == "*";
// phrase queries are handled as a filtering query
bool is_wildcard_non_phrase_query = is_wildcard_query && field_query_tokens[0].q_phrases.empty();
// handle phrase searches
if (!field_query_tokens[0].q_phrases.empty()) {
auto do_phrase_search_op = do_phrase_search(num_search_fields, the_fields, field_query_tokens,
sort_fields_std, searched_queries, group_limit, group_by_fields,
group_missing_values,
topster, sort_order, field_values, geopoint_indices,
filter_result_iterator, all_result_ids, all_result_ids_len,
groups_processed,
excluded_result_ids, excluded_result_ids_size, excluded_group_ids,
is_wildcard_query, collection_name);
filter_iterator_guard.release();
filter_iterator_guard.reset(filter_result_iterator);
if (!do_phrase_search_op.ok()) {
delete [] all_result_ids;
return do_phrase_search_op;
}
if (filter_result_iterator->approx_filter_ids_length == 0) {
goto process_search_results;
}
}
// for phrase query, parser will set field_query_tokens to "*", need to handle that
if (is_wildcard_non_phrase_query) {
if(!filter_by_provided && facets.empty() && group_by_fields.empty() && curated_ids.empty() &&
vector_query.field_name.empty() && sort_fields_std.size() == 1 &&
sort_fields_std[0].name == sort_field_const::seq_id && sort_fields_std[0].order == sort_field_const::desc) {
// optimize for this path specifically
std::vector<uint32_t> result_ids;
auto it = seq_ids->new_rev_iterator();
std::vector<group_by_field_it_t> group_by_field_it_vec;
if (group_limit != 0) {
group_by_field_it_vec = get_group_by_field_iterators(group_by_fields, true);
}
while (it.valid()) {
uint32_t seq_id = it.id();
uint64_t distinct_id = seq_id;
if (group_limit != 0) {
distinct_id = 1;
for(auto& kv : group_by_field_it_vec) {
get_distinct_id(kv.it, seq_id, kv.is_array, group_missing_values, distinct_id, true);
}
if(excluded_group_ids.count(distinct_id) != 0) {
continue;
}
if(groups_processed.size() == fetch_size) {
break;
}
}
int64_t scores[3] = {0};
scores[0] = seq_id;
int64_t match_score_index = -1;
result_ids.push_back(seq_id);
KV kv(searched_queries.size(), seq_id, distinct_id, match_score_index, scores);
int ret = topster->add(&kv);
if(group_limit != 0 && ret < 2) {
groups_processed[distinct_id]++;
}
if (result_ids.size() == fetch_size && group_limit == 0) {
break;
}
it.previous();
}
all_result_ids_len = seq_ids->num_ids();
goto process_search_results;
}
if (!vector_query.field_name.empty()) {
auto k = vector_query.k == 0 ? std::max<size_t>(vector_query.k, fetch_size) : vector_query.k;
VectorFilterFunctor filterFunctor(filter_result_iterator, excluded_result_ids, excluded_result_ids_size);
auto& field_vector_index = vector_index.at(vector_query.field_name);
if(vector_query.query_doc_given && filterFunctor(vector_query.seq_id)) {
// since query doc will be omitted from results, we will request for 1 more doc
k++;
}
filter_result_iterator->reset();
std::vector<std::pair<float, single_filter_result_t>> dist_results;
filter_result_iterator->compute_iterators();
uint32_t filter_id_count = filter_result_iterator->approx_filter_ids_length;
if (filter_by_provided && filter_id_count < vector_query.flat_search_cutoff) {
while (filter_result_iterator->validity == filter_result_iterator_t::valid) {
auto seq_id = filter_result_iterator->seq_id;
auto filter_result = single_filter_result_t(seq_id, std::move(filter_result_iterator->reference));
filter_result_iterator->next();
std::vector<float> values;
try {
values = field_vector_index->vecdex->getDataByLabel<float>(seq_id);
} catch (...) {
// likely not found
continue;
}
float dist;
if (field_vector_index->distance_type == cosine) {
std::vector<float> normalized_q(vector_query.values.size());
hnsw_index_t::normalize_vector(vector_query.values, normalized_q);
dist = field_vector_index->space->get_dist_func()(normalized_q.data(), values.data(),
&field_vector_index->num_dim);
} else {
dist = field_vector_index->space->get_dist_func()(vector_query.values.data(), values.data(),
&field_vector_index->num_dim);
}
dist_results.emplace_back(dist, filter_result);
}
}
filter_result_iterator->reset();
search_cutoff = search_cutoff || filter_result_iterator->validity == filter_result_iterator_t::timed_out;
if(!filter_by_provided ||
(filter_id_count >= vector_query.flat_search_cutoff && filter_result_iterator->validity == filter_result_iterator_t::valid)) {
dist_results.clear();
std::vector<std::pair<float, size_t>> pairs;
if(field_vector_index->distance_type == cosine) {
std::vector<float> normalized_q(vector_query.values.size());
hnsw_index_t::normalize_vector(vector_query.values, normalized_q);
pairs = field_vector_index->vecdex->searchKnnCloserFirst(normalized_q.data(), k, vector_query.ef, &filterFunctor);
} else {
pairs = field_vector_index->vecdex->searchKnnCloserFirst(vector_query.values.data(), k, vector_query.ef, &filterFunctor);
}
std::sort(pairs.begin(), pairs.end(), [](auto& x, auto& y) {
return x.second < y.second;
});
filter_result_iterator->reset();
if (!filter_result_iterator->reference.empty()) {
// We'll have to get the references of each document.
for (auto pair: pairs) {
if (filter_result_iterator->validity == filter_result_iterator_t::timed_out) {
// Overriding timeout since we need to get the references of matched docs.
filter_result_iterator->reset(true);
search_cutoff = true;
}
auto const& seq_id = pair.second;
if (filter_result_iterator->is_valid(seq_id, search_cutoff) != 1) {
continue;
}
// The seq_id must be valid otherwise it would've been filtered out upstream.
auto filter_result = single_filter_result_t(seq_id,
std::move(filter_result_iterator->reference));
dist_results.emplace_back(pair.first, filter_result);
}
} else {
for (const auto &pair: pairs) {
auto filter_result = single_filter_result_t(pair.second, {});
dist_results.emplace_back(pair.first, filter_result);
}
}
}
std::vector<uint32_t> nearest_ids;
std::vector<uint32_t> eval_filter_indexes;
std::vector<group_by_field_it_t> group_by_field_it_vec;
if (group_limit != 0) {
group_by_field_it_vec = get_group_by_field_iterators(group_by_fields);
}
for (auto& dist_result : dist_results) {
auto& seq_id = dist_result.second.seq_id;
auto references = std::move(dist_result.second.reference_filter_results);
if(vector_query.query_doc_given && vector_query.seq_id == seq_id) {
continue;
}
uint64_t distinct_id = seq_id;
if (group_limit != 0) {
distinct_id = 1;
for(auto &kv : group_by_field_it_vec) {
get_distinct_id(kv.it, seq_id, kv.is_array, group_missing_values, distinct_id);
}
if(excluded_group_ids.count(distinct_id) != 0) {
continue;
}
}
auto vec_dist_score = (field_vector_index->distance_type == cosine) ? std::abs(dist_result.first) :
dist_result.first;
if(vec_dist_score > vector_query.distance_threshold) {
continue;
}
int64_t scores[3] = {0};
int64_t match_score_index = -1;
bool should_skip = false;
auto compute_sort_scores_op = compute_sort_scores(sort_fields_std, sort_order, field_values,
geopoint_indices, seq_id, references, eval_filter_indexes,
0, scores, match_score_index, should_skip, vec_dist_score,
collection_name);
if (!compute_sort_scores_op.ok()) {
return compute_sort_scores_op;
}
if(should_skip) {
continue;
}
KV kv(searched_queries.size(), seq_id, distinct_id, match_score_index, scores, std::move(references));
kv.vector_distance = vec_dist_score;
int ret = topster->add(&kv);
if(group_limit != 0 && ret < 2) {
groups_processed[distinct_id]++;
}
nearest_ids.push_back(seq_id);
}
if(!nearest_ids.empty()) {
std::sort(nearest_ids.begin(), nearest_ids.end()); // seq_ids should be in ascending order
all_result_ids = new uint32[nearest_ids.size()];
std::copy(nearest_ids.begin(), nearest_ids.end(), all_result_ids);
all_result_ids_len = nearest_ids.size();
}
} else {
// if filters were not provided, use the seq_ids index to generate the list of all document ids
if (!filter_by_provided) {
filter_result_iterator = new filter_result_iterator_t(seq_ids->uncompress(), seq_ids->num_ids(),
max_filter_by_candidates,
search_begin_us, search_stop_us);
filter_iterator_guard.reset(filter_result_iterator);
}
auto search_wildcard_op = search_wildcard(filter_tree_root, sort_fields_std, topster,
curated_topster, groups_processed, searched_queries, group_limit, group_by_fields,
group_missing_values,
excluded_result_ids, excluded_result_ids_size, excluded_group_ids,
all_result_ids, all_result_ids_len,
filter_result_iterator, concurrency,
sort_order, field_values, geopoint_indices, collection_name);
if (!search_wildcard_op.ok()) {
return search_wildcard_op;
}
}
if(excluded_result_ids_size != 0) {
uint32_t* excluded_all_result_ids = nullptr;
all_result_ids_len = ArrayUtils::exclude_scalar(all_result_ids, all_result_ids_len, excluded_result_ids,
excluded_result_ids_size, &excluded_all_result_ids);
delete[] all_result_ids;
all_result_ids = excluded_all_result_ids;
}
} else {
// Non-wildcard
// In multi-field searches, a record can be matched across different fields, so we use this for aggregation
//begin = std::chrono::high_resolution_clock::now();
// FIXME: needed?
std::set<uint64> query_hashes;
// resolve synonyms so that we can compute `syn_orig_num_tokens`
std::vector<std::vector<token_t>> all_queries = {field_query_tokens[0].q_unstemmed_tokens.empty() ?
field_query_tokens[0].q_include_tokens : field_query_tokens[0].q_unstemmed_tokens};
std::vector<std::vector<token_t>> q_pos_synonyms;
std::vector<std::string> q_include_tokens;
int syn_orig_num_tokens = -1;
if(!field_query_tokens[0].q_unstemmed_tokens.empty()) {
for(size_t j = 0; j < field_query_tokens[0].q_unstemmed_tokens.size(); j++) {
q_include_tokens.push_back(field_query_tokens[0].q_unstemmed_tokens[j].value);
}
} else {
for(size_t j = 0; j < field_query_tokens[0].q_include_tokens.size(); j++) {
q_include_tokens.push_back(field_query_tokens[0].q_include_tokens[j].value);
}
}
auto search_field_it = search_schema.find(the_fields[0].name);
const bool found_search_field = (search_field_it != search_schema.end());
if(enable_synonyms && found_search_field) {
synonym_index->synonym_reduction(q_include_tokens, search_field_it->locale,
field_query_tokens[0].q_synonyms,
synonym_prefix, synonym_num_typos);
}
if(!field_query_tokens[0].q_synonyms.empty()) {
syn_orig_num_tokens = field_query_tokens[0].q_include_tokens.size();
}
const bool do_stemming = found_search_field && search_field_it->stem;
for(const auto& q_syn_vec: field_query_tokens[0].q_synonyms) {
std::vector<token_t> q_pos_syn;
for(size_t j=0; j < q_syn_vec.size(); j++) {
bool is_prefix = (j == q_syn_vec.size()-1);
std::string token_val = q_syn_vec[j];
if (do_stemming) {
auto stemmer = search_schema.at(the_fields[0].name).get_stemmer();
token_val = stemmer->stem(q_syn_vec[j]);
}
q_pos_syn.emplace_back(j, token_val, is_prefix, token_val.size(), 0);
}
q_pos_synonyms.push_back(q_pos_syn);
all_queries.push_back(q_pos_syn);
if((int)q_syn_vec.size() > syn_orig_num_tokens) {
syn_orig_num_tokens = (int) q_syn_vec.size();
}
}
auto fuzzy_search_fields_op = fuzzy_search_fields(the_fields, field_query_tokens[0].q_include_tokens, {}, match_type,
excluded_result_ids, excluded_result_ids_size,
filter_result_iterator, curated_ids_sorted,
excluded_group_ids, sort_fields_std, num_typos,
searched_queries, qtoken_set, topster, groups_processed,
all_result_ids, all_result_ids_len,
group_limit, group_by_fields, group_missing_values, prioritize_exact_match,
prioritize_token_position, prioritize_num_matching_fields,
query_hashes, token_order, prefixes,
typo_tokens_threshold, exhaustive_search,
max_candidates, min_len_1typo, min_len_2typo,
syn_orig_num_tokens, sort_order, field_values, geopoint_indices,
collection_name, enable_typos_for_numerical_tokens,
enable_typos_for_alpha_numerical_tokens);
if (!fuzzy_search_fields_op.ok()) {
return fuzzy_search_fields_op;
}
// try split/joining tokens if no results are found
if(split_join_tokens == always || (all_result_ids_len == 0 && split_join_tokens == fallback)) {
std::vector<std::vector<std::string>> space_resolved_queries;
for (size_t i = 0; i < num_search_fields; i++) {
std::vector<std::string> orig_q_include_tokens;
for(auto& q_include_token: field_query_tokens[i].q_include_tokens) {
orig_q_include_tokens.push_back(q_include_token.value);
}
resolve_space_as_typos(orig_q_include_tokens, the_fields[i].name,space_resolved_queries);
if (!space_resolved_queries.empty()) {
break;
}
}
// only one query is resolved for now, so just use that
if (!space_resolved_queries.empty()) {
const auto& resolved_query = space_resolved_queries[0];
std::vector<token_t> resolved_tokens;
for(size_t j=0; j < resolved_query.size(); j++) {
bool is_prefix = (j == resolved_query.size()-1);
resolved_tokens.emplace_back(j, space_resolved_queries[0][j], is_prefix,
space_resolved_queries[0][j].size(), 0);
}
auto fuzzy_search_fields_op = fuzzy_search_fields(the_fields, resolved_tokens, {}, match_type, excluded_result_ids,
excluded_result_ids_size, filter_result_iterator, curated_ids_sorted,
excluded_group_ids,
sort_fields_std, num_typos, searched_queries,
qtoken_set, topster, groups_processed,
all_result_ids, all_result_ids_len,
group_limit, group_by_fields, group_missing_values,
prioritize_exact_match, prioritize_token_position,
prioritize_num_matching_fields,
query_hashes, token_order,
prefixes, typo_tokens_threshold, exhaustive_search,
max_candidates, min_len_1typo, min_len_2typo,
syn_orig_num_tokens, sort_order, field_values, geopoint_indices,
collection_name);
if (!fuzzy_search_fields_op.ok()) {
return fuzzy_search_fields_op;
}
}
}
// do synonym based searches
auto do_synonym_search_op = do_synonym_search(the_fields, match_type, filter_tree_root,
sort_fields_std, curated_topster, token_order, 0, group_limit,
group_by_fields, group_missing_values, prioritize_exact_match, prioritize_token_position,
prioritize_num_matching_fields, exhaustive_search, concurrency, prefixes,
min_len_1typo, min_len_2typo, max_candidates, curated_ids, curated_ids_sorted,
excluded_result_ids, excluded_result_ids_size, excluded_group_ids,
topster, q_pos_synonyms, syn_orig_num_tokens,
groups_processed, searched_queries, all_result_ids, all_result_ids_len,
filter_result_iterator, query_hashes,
sort_order, field_values, geopoint_indices,
qtoken_set, collection_name);
if (!do_synonym_search_op.ok()) {
return do_synonym_search_op;
}
filter_result_iterator->reset();
search_cutoff = search_cutoff || filter_result_iterator->validity == filter_result_iterator_t::timed_out;
// gather up both original query and synonym queries and do drop tokens
if (exhaustive_search || all_result_ids_len < drop_tokens_threshold) {
for (size_t qi = 0; qi < all_queries.size(); qi++) {
auto& orig_tokens = all_queries[qi];
size_t num_tokens_dropped = 0;
size_t total_dirs_done = 0;
// NOTE: when dropping both sides we will ignore exhaustive search
auto curr_direction = drop_tokens_mode.mode;
bool drop_both_sides = false;
if(drop_tokens_mode.mode == both_sides) {
if(orig_tokens.size() <= drop_tokens_mode.token_limit) {
drop_both_sides = true;
} else {
curr_direction = right_to_left;
}
}
while(exhaustive_search || all_result_ids_len < drop_tokens_threshold || drop_both_sides) {
// When atleast two tokens from the query are available we can drop one
std::vector<token_t> truncated_tokens;
std::vector<token_t> dropped_tokens;
if(num_tokens_dropped >= orig_tokens.size() - 1) {
// swap direction and reset counter
curr_direction = (curr_direction == right_to_left) ? left_to_right : right_to_left;
num_tokens_dropped = 0;
total_dirs_done++;
}
if(orig_tokens.size() > 1 && total_dirs_done < 2) {
bool prefix_search = false;
if (curr_direction == right_to_left) {
// drop from right
size_t truncated_len = orig_tokens.size() - num_tokens_dropped - 1;
for (size_t i = 0; i < orig_tokens.size(); i++) {
if(i < truncated_len) {
truncated_tokens.emplace_back(orig_tokens[i]);
} else {
dropped_tokens.emplace_back(orig_tokens[i]);
}
}
} else {
// drop from left
prefix_search = true;
size_t start_index = (num_tokens_dropped + 1);
for(size_t i = 0; i < orig_tokens.size(); i++) {
if(i >= start_index) {
truncated_tokens.emplace_back(orig_tokens[i]);
} else {
dropped_tokens.emplace_back(orig_tokens[i]);
}
}
}
num_tokens_dropped++;
std::vector<bool> drop_token_prefixes;
for (const auto p : prefixes) {
drop_token_prefixes.push_back(p && prefix_search);
}
auto fuzzy_search_fields_op = fuzzy_search_fields(the_fields, truncated_tokens, dropped_tokens, match_type,
excluded_result_ids, excluded_result_ids_size,
filter_result_iterator,
curated_ids_sorted, excluded_group_ids,
sort_fields_std, num_typos, searched_queries,
qtoken_set, topster, groups_processed,
all_result_ids, all_result_ids_len,
group_limit, group_by_fields, group_missing_values,
prioritize_exact_match, prioritize_token_position,
prioritize_num_matching_fields, query_hashes,
token_order, prefixes, typo_tokens_threshold,
exhaustive_search, max_candidates, min_len_1typo,
min_len_2typo, -1, sort_order, field_values, geopoint_indices,
collection_name);
if (!fuzzy_search_fields_op.ok()) {
return fuzzy_search_fields_op;
}
} else {
break;
}
}
}
}
auto do_infix_search_op = do_infix_search(num_search_fields, the_fields, infixes, sort_fields_std,
searched_queries,
group_limit, group_by_fields, group_missing_values,
max_extra_prefix, max_extra_suffix,
field_query_tokens[0].q_include_tokens,
topster, filter_result_iterator,
sort_order, field_values, geopoint_indices,
curated_ids_sorted, excluded_group_ids,
all_result_ids, all_result_ids_len, groups_processed,
collection_name);
if (!do_infix_search_op.ok()) {
return do_infix_search_op;
}
filter_result_iterator->reset();
search_cutoff = search_cutoff || filter_result_iterator->validity == filter_result_iterator_t::timed_out;
if(!vector_query.field_name.empty()) {
// check at least one of sort fields is text match
bool has_text_match = false;
for(auto& sort_field : sort_fields_std) {
if(sort_field.name == sort_field_const::text_match) {
has_text_match = true;
break;
}
}
if(has_text_match) {
// For hybrid search, we need to give weight to text match and vector search
const float VECTOR_SEARCH_WEIGHT = vector_query.alpha;
const float TEXT_MATCH_WEIGHT = 1.0 - VECTOR_SEARCH_WEIGHT;
VectorFilterFunctor filterFunctor(filter_result_iterator, excluded_result_ids, excluded_result_ids_size);
auto& field_vector_index = vector_index.at(vector_query.field_name);
std::vector<std::pair<float, size_t>> dist_labels;
// use k as 100 by default for ensuring results stability in pagination
size_t default_k = 100;
auto k = vector_query.k == 0 ? std::max<size_t>(fetch_size, default_k) : vector_query.k;
if(field_vector_index->distance_type == cosine) {
std::vector<float> normalized_q(vector_query.values.size());
hnsw_index_t::normalize_vector(vector_query.values, normalized_q);
dist_labels = field_vector_index->vecdex->searchKnnCloserFirst(normalized_q.data(), k, vector_query.ef, &filterFunctor);
} else {
dist_labels = field_vector_index->vecdex->searchKnnCloserFirst(vector_query.values.data(), k, vector_query.ef, &filterFunctor);
}
filter_result_iterator->reset();
search_cutoff = search_cutoff || filter_result_iterator->validity == filter_result_iterator_t::timed_out;
std::vector<std::pair<uint32_t,float>> vec_results;
for (const auto& dist_label : dist_labels) {
uint32_t seq_id = dist_label.second;
auto vec_dist_score = (field_vector_index->distance_type == cosine) ? std::abs(dist_label.first) :
dist_label.first;
if(vec_dist_score > vector_query.distance_threshold) {
continue;
}
vec_results.emplace_back(seq_id, vec_dist_score);
}
// iteration needs to happen on sorted sequence ID but score wise sort needed for compute rank fusion
std::sort(vec_results.begin(), vec_results.end(), [](const auto& a, const auto& b) {
return a.second < b.second;
});
std::unordered_map<uint32_t, uint32_t> seq_id_to_rank;
for(size_t vec_index = 0; vec_index < vec_results.size(); vec_index++) {
seq_id_to_rank.emplace(vec_results[vec_index].first, vec_index);
}
std::sort(vec_results.begin(), vec_results.end(), [](const auto& a, const auto& b) {
return a.first < b.first;
});
std::vector<KV*> kvs;
if(group_limit != 0) {
for(auto& kv_map : topster->group_kv_map) {
for(int i = 0; i < kv_map.second->size; i++) {
kvs.push_back(kv_map.second->getKV(i));
}
}
std::sort(kvs.begin(), kvs.end(), Topster::is_greater);
} else {
topster->sort();
}
// Reciprocal rank fusion
// Score is sum of (1 / rank_of_document) * WEIGHT from each list (text match and vector search)
auto size = (group_limit != 0) ? kvs.size() : topster->size;
for(uint32_t i = 0; i < size; i++) {
auto result = (group_limit != 0) ? kvs[i] : topster->getKV(i);
if(result->match_score_index < 0 || result->match_score_index > 2) {
continue;
}
// (1 / rank_of_document) * WEIGHT)
result->text_match_score = result->scores[result->match_score_index];
result->scores[result->match_score_index] = float_to_int64_t((1.0 / (i + 1)) * TEXT_MATCH_WEIGHT);
}
std::vector<uint32_t> vec_search_ids; // list of IDs found only in vector search
std::vector<uint32_t> eval_filter_indexes;
std::vector<group_by_field_it_t> group_by_field_it_vec;
if (group_limit != 0) {
group_by_field_it_vec = get_group_by_field_iterators(group_by_fields);
}
for(size_t res_index = 0; res_index < vec_results.size() &&
filter_result_iterator->validity != filter_result_iterator_t::timed_out; res_index++) {
auto& vec_result = vec_results[res_index];
auto seq_id = vec_result.first;
if (filter_by_provided && filter_result_iterator->is_valid(seq_id) != 1) {
continue;
}
auto references = std::move(filter_result_iterator->reference);
filter_result_iterator->reset();
KV* found_kv = nullptr;
if(group_limit != 0) {
for(auto& kv : kvs) {
if(kv->key == seq_id) {
found_kv = kv;
break;
}
}
} else {
auto result_it = topster->kv_map.find(seq_id);
if(result_it != topster->kv_map.end()) {
found_kv = result_it->second;
}
}
if(found_kv) {
if(found_kv->match_score_index < 0 || found_kv->match_score_index > 2) {
continue;
}
// result overlaps with keyword search: we have to combine the scores
// old_score + (1 / rank_of_document) * WEIGHT)
found_kv->vector_distance = vec_result.second;
int64_t match_score = float_to_int64_t(
(int64_t_to_float(found_kv->scores[found_kv->match_score_index])) +
((1.0 / (seq_id_to_rank[seq_id] + 1)) * VECTOR_SEARCH_WEIGHT));
int64_t match_score_index = -1;
int64_t scores[3] = {0};
bool should_skip = false;
auto compute_sort_scores_op = compute_sort_scores(sort_fields_std, sort_order, field_values,
geopoint_indices, seq_id, references, eval_filter_indexes,
match_score, scores, match_score_index, should_skip,
vec_result.second, collection_name);
if (!compute_sort_scores_op.ok()) {
return compute_sort_scores_op;
}
if(should_skip) {
continue;
}
for(int i = 0; i < 3; i++) {
found_kv->scores[i] = scores[i];
}
found_kv->match_score_index = match_score_index;
} else {
// Result has been found only in vector search: we have to add it to both KV and result_ids
// (1 / rank_of_document) * WEIGHT)
int64_t scores[3] = {0};
int64_t match_score = float_to_int64_t((1.0 / (seq_id_to_rank[seq_id] + 1)) * VECTOR_SEARCH_WEIGHT);
int64_t match_score_index = -1;
bool should_skip = false;
auto compute_sort_scores_op = compute_sort_scores(sort_fields_std, sort_order, field_values,
geopoint_indices, seq_id, references, eval_filter_indexes,
match_score, scores, match_score_index, should_skip,
vec_result.second, collection_name);
if (!compute_sort_scores_op.ok()) {
return compute_sort_scores_op;
}
if(should_skip) {
continue;
}
uint64_t distinct_id = seq_id;
if (group_limit != 0) {
distinct_id = 1;
for(auto& kv : group_by_field_it_vec) {
get_distinct_id(kv.it, seq_id, kv.is_array, group_missing_values, distinct_id);
}
if(excluded_group_ids.count(distinct_id) != 0) {
continue;
}
}
KV kv(searched_queries.size(), seq_id, distinct_id, match_score_index, scores, std::move(references));
kv.text_match_score = 0;
kv.vector_distance = vec_result.second;
auto ret = topster->add(&kv);
vec_search_ids.push_back(seq_id);
if(group_limit != 0 && ret < 2) {
groups_processed[distinct_id]++;
}
}
}
search_cutoff = search_cutoff || filter_result_iterator->validity == filter_result_iterator_t::timed_out;
if(!vec_search_ids.empty()) {
uint32_t* new_all_result_ids = nullptr;
all_result_ids_len = ArrayUtils::or_scalar(all_result_ids, all_result_ids_len, &vec_search_ids[0],
vec_search_ids.size(), &new_all_result_ids);
delete[] all_result_ids;
all_result_ids = new_all_result_ids;
}
}
}
/*auto timeMillis0 = std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::high_resolution_clock::now() - begin0).count();
LOG(INFO) << "Time taken for multi-field aggregation: " << timeMillis0 << "ms";*/
}
//LOG(INFO) << "topster size: " << topster->size;
process_search_results:
//for hybrid search, optionally compute aux scores
if(!vector_query.field_name.empty() && !is_wildcard_query && rerank_hybrid_matches) {
compute_aux_scores(topster, the_fields, field_query_tokens[0].q_include_tokens, searched_queries.size(),
sort_fields_std, sort_order, vector_query);
compute_aux_scores(curated_topster, the_fields, field_query_tokens[0].q_include_tokens, searched_queries.size(),
sort_fields_std, sort_order, vector_query);
}
topster->sort();
curated_topster->sort();
Collection::populate_result_kvs(topster, raw_result_kvs, groups_processed, sort_fields_std);
Collection::populate_result_kvs(curated_topster, override_result_kvs, groups_processed, sort_fields_std);
std::vector<uint32_t> top_k_result_ids, top_k_curated_result_ids;
std::vector<facet> top_k_facets;
delete [] exclude_token_ids;
delete [] excluded_result_ids;
bool estimate_facets = (facet_sample_percent > 0 && facet_sample_percent < 100 &&
all_result_ids_len > facet_sample_threshold);
bool is_wildcard_no_filter_query = is_wildcard_non_phrase_query && !filter_by_provided && vector_query.field_name.empty();
if(!facets.empty()) {
const size_t num_threads = std::min(concurrency, all_result_ids_len);
const size_t window_size = (num_threads == 0) ? 0 :
(all_result_ids_len + num_threads - 1) / num_threads; // rounds up
size_t num_processed = 0;
std::mutex m_process;
std::condition_variable cv_process;
std::vector<facet_info_t> facet_infos(facets.size());
compute_facet_infos(facets, facet_query, facet_query_num_typos, all_result_ids, all_result_ids_len,
group_by_fields, group_limit, is_wildcard_no_filter_query,
max_candidates, facet_infos, facet_index_types);
std::vector<std::vector<facet>> facet_batches(num_threads);
std::vector<std::vector<facet>> value_facets(concurrency);
size_t num_value_facets = 0;
for(size_t i = 0; i < facets.size(); i++) {
const auto& this_facet = facets[i];
//process facets separately which has top_k set to true
if(this_facet.is_top_k) {
top_k_facets.emplace_back(this_facet.field_name, this_facet.orig_index, this_facet.is_top_k, this_facet.facet_range_map,
this_facet.is_range_query, this_facet.is_sort_by_alpha, this_facet.sort_order, this_facet.sort_field);
continue;
}
if(facet_infos[i].use_value_index) {
// value based faceting on a single thread
value_facets[num_value_facets % num_threads].emplace_back(this_facet.field_name, this_facet.orig_index,
this_facet.is_top_k, this_facet.facet_range_map,
this_facet.is_range_query, this_facet.is_sort_by_alpha,
this_facet.sort_order, this_facet.sort_field);
num_value_facets++;
continue;
}
for(size_t j = 0; j < num_threads; j++) {
facet_batches[j].emplace_back(this_facet.field_name, this_facet.orig_index, this_facet.is_top_k,
this_facet.facet_range_map, this_facet.is_range_query,
this_facet.is_sort_by_alpha, this_facet.sort_order, this_facet.sort_field);
}
}
size_t num_queued = 0;
size_t result_index = 0;
const auto parent_search_begin = search_begin_us;
const auto parent_search_stop_ms = search_stop_us;
auto parent_search_cutoff = search_cutoff;
//auto beginF = std::chrono::high_resolution_clock::now();
if(top_k_facets.size() > 0) {
get_top_k_result_ids(raw_result_kvs, top_k_result_ids);
do_facets(top_k_facets, facet_query, estimate_facets, facet_sample_percent,
facet_infos, group_limit, group_by_fields, group_missing_values, top_k_result_ids.data(),
top_k_result_ids.size(), max_facet_values, is_wildcard_no_filter_query,
facet_index_types);
}
for(size_t thread_id = 0; thread_id < num_threads && result_index < all_result_ids_len; thread_id++) {
size_t batch_res_len = window_size;
if(result_index + window_size > all_result_ids_len) {
batch_res_len = all_result_ids_len - result_index;
}
if(facet_batches[thread_id].empty()) {
continue;
}
uint32_t* batch_result_ids = all_result_ids + result_index;
num_queued++;
thread_pool->enqueue([this, thread_id, &facets, &facet_batches, &facet_query, group_limit, group_by_fields,
batch_result_ids, batch_res_len, &facet_infos, max_facet_values,
is_wildcard_no_filter_query, estimate_facets,
facet_sample_percent, group_missing_values,
&parent_search_begin, &parent_search_stop_ms, &parent_search_cutoff,
&num_processed, &m_process, &cv_process, &facet_index_types]() {
search_begin_us = parent_search_begin;
search_stop_us = parent_search_stop_ms;
search_cutoff = false;
auto fq = facet_query;
do_facets(facet_batches[thread_id], fq, estimate_facets, facet_sample_percent,
facet_infos, group_limit, group_by_fields, group_missing_values,
batch_result_ids, batch_res_len, max_facet_values,
is_wildcard_no_filter_query, facet_index_types);
std::unique_lock<std::mutex> lock(m_process);
auto& facet_batch = facet_batches[thread_id];
for(auto& this_facet : facet_batch) {
auto& acc_facet = facets[this_facet.orig_index];
aggregate_facet(group_limit, this_facet, acc_facet);
}
num_processed++;
parent_search_cutoff = parent_search_cutoff || search_cutoff;
cv_process.notify_one();
});
result_index += batch_res_len;
}
// do value based faceting field-wise parallel but on the entire result set
for(size_t thread_id = 0; thread_id < concurrency && num_value_facets > 0; thread_id++) {
if(value_facets[thread_id].empty()) {
continue;
}
num_queued++;
thread_pool->enqueue([this, thread_id, &facets, &value_facets, &facet_query, group_limit, group_by_fields,
all_result_ids, all_result_ids_len, &facet_infos, max_facet_values,
is_wildcard_no_filter_query, estimate_facets,
facet_sample_percent, group_missing_values,
&parent_search_begin, &parent_search_stop_ms, &parent_search_cutoff,
&num_processed, &m_process, &cv_process, facet_index_types]() {
search_begin_us = parent_search_begin;
search_stop_us = parent_search_stop_ms;
search_cutoff = false;
auto fq = facet_query;
do_facets({value_facets[thread_id]}, fq, estimate_facets, facet_sample_percent,
facet_infos, group_limit, group_by_fields, group_missing_values,
all_result_ids, all_result_ids_len, max_facet_values,
is_wildcard_no_filter_query, facet_index_types);
std::unique_lock<std::mutex> lock(m_process);
for(auto& this_facet : value_facets[thread_id]) {
auto& acc_facet = facets[this_facet.orig_index];
aggregate_facet(group_limit, this_facet, acc_facet);
}
num_processed++;
parent_search_cutoff = parent_search_cutoff || search_cutoff;
cv_process.notify_one();
});
}
std::unique_lock<std::mutex> lock_process(m_process);
cv_process.wait(lock_process, [&](){ return num_processed == num_queued; });
search_cutoff = parent_search_cutoff;
for(auto & acc_facet: facets) {
for(auto& facet_kv: acc_facet.result_map) {
if(group_limit) {
facet_kv.second.count = acc_facet.hash_groups[facet_kv.first].size();
}
if(estimate_facets) {
facet_kv.second.count = size_t(double(facet_kv.second.count) * (100.0f / facet_sample_percent));
}
}
// value_result_map already contains the scaled counts
if(estimate_facets) {
acc_facet.sampled = true;
}
}
/*long long int timeMillisF = std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::high_resolution_clock::now() - beginF).count();
LOG(INFO) << "Time for faceting: " << timeMillisF;*/
}
std::vector<facet_info_t> facet_infos(facets.size());
compute_facet_infos(facets, facet_query, facet_query_num_typos,
&included_ids_vec[0], included_ids_vec.size(), group_by_fields,
group_limit, is_wildcard_no_filter_query,
max_candidates, facet_infos, facet_index_types);
do_facets(facets, facet_query, estimate_facets, facet_sample_percent,
facet_infos, group_limit, group_by_fields, group_missing_values, &included_ids_vec[0],
included_ids_vec.size(), max_facet_values, is_wildcard_no_filter_query,
facet_index_types);
if(top_k_facets.size() > 0) {
get_top_k_result_ids(override_result_kvs, top_k_curated_result_ids);
do_facets(top_k_facets, facet_query, estimate_facets, facet_sample_percent,
facet_infos, group_limit, group_by_fields, group_missing_values, top_k_curated_result_ids.data(),
top_k_curated_result_ids.size(), max_facet_values, is_wildcard_no_filter_query,
facet_index_types);
}
all_result_ids_len += curated_topster->size;
if(!included_ids_map.empty() && group_limit != 0) {
for (auto &acc_facet: facets) {
for (auto &facet_kv: acc_facet.result_map) {
facet_kv.second.count = acc_facet.hash_groups[facet_kv.first].size();
if (estimate_facets) {
facet_kv.second.count = size_t(double(facet_kv.second.count) * (100.0f / facet_sample_percent));
}
}
if (estimate_facets) {
acc_facet.sampled = true;
}
}
}
//copy top_k facets data
if(!top_k_facets.empty()) {
for(auto& this_facet : top_k_facets) {
auto& acc_facet = facets[this_facet.orig_index];
aggregate_facet(group_limit, this_facet, acc_facet);
}
}
delete [] all_result_ids;
//LOG(INFO) << "all_result_ids_len " << all_result_ids_len << " for index " << name;
//long long int timeMillis = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::high_resolution_clock::now() - begin).count();
//LOG(INFO) << "Time taken for result calc: " << timeMillis << "ms";
return Option(true);
}
void Index::aggregate_facet(const size_t group_limit, facet& this_facet, facet& acc_facet) const {
acc_facet.is_intersected = this_facet.is_intersected;
acc_facet.is_sort_by_alpha = this_facet.is_sort_by_alpha;
acc_facet.sort_order = this_facet.sort_order;
acc_facet.sort_field = this_facet.sort_field;
for(auto & facet_kv: this_facet.result_map) {
uint32_t fhash = 0;
if(group_limit) {
fhash = facet_kv.first;
// we have to add all group sets
acc_facet.hash_groups[fhash].insert(
this_facet.hash_groups[fhash].begin(),
this_facet.hash_groups[fhash].end()
);
} else {
size_t count = 0;
if (acc_facet.result_map.count(facet_kv.first) == 0) {
// not found, so set it
count = facet_kv.second.count;
} else {
count = acc_facet.result_map[facet_kv.first].count + facet_kv.second.count;
}
acc_facet.result_map[facet_kv.first].count = count;
}
acc_facet.result_map[facet_kv.first].doc_id = facet_kv.second.doc_id;
acc_facet.result_map[facet_kv.first].array_pos = facet_kv.second.array_pos;
acc_facet.result_map[facet_kv.first].sort_field_val = facet_kv.second.sort_field_val;
acc_facet.hash_tokens[facet_kv.first] = this_facet.hash_tokens[facet_kv.first];
}
for(auto& facet_kv: this_facet.value_result_map) {
size_t count = 0;
if(acc_facet.value_result_map.count(facet_kv.first) == 0) {
// not found, so set it
count = facet_kv.second.count;
} else {
count = acc_facet.value_result_map[facet_kv.first].count + facet_kv.second.count;
}
acc_facet.value_result_map[facet_kv.first].count = count;
acc_facet.value_result_map[facet_kv.first].doc_id = facet_kv.second.doc_id;
acc_facet.value_result_map[facet_kv.first].array_pos = facet_kv.second.array_pos;
acc_facet.fvalue_tokens[facet_kv.first] = this_facet.fvalue_tokens[facet_kv.first];
}
if(this_facet.stats.fvcount != 0) {
acc_facet.stats.fvcount += this_facet.stats.fvcount;
acc_facet.stats.fvsum += this_facet.stats.fvsum;
acc_facet.stats.fvmax = std::max(acc_facet.stats.fvmax, this_facet.stats.fvmax);
acc_facet.stats.fvmin = std::min(acc_facet.stats.fvmin, this_facet.stats.fvmin);
}
}
void Index::process_curated_ids(const std::vector<std::pair<uint32_t, uint32_t>>& included_ids,
const std::vector<uint32_t>& excluded_ids,
const std::vector<std::string>& group_by_fields, const size_t group_limit,
const bool group_missing_values,
const bool filter_curated_hits, filter_result_iterator_t* const filter_result_iterator,
std::set<uint32_t>& curated_ids,
std::map<size_t, std::map<size_t, uint32_t>>& included_ids_map,
std::vector<uint32_t>& included_ids_vec,
std::unordered_set<uint32_t>& excluded_group_ids) const {
for(const auto& seq_id_pos: included_ids) {
included_ids_vec.push_back(seq_id_pos.first);
}
//sort the included ids to keep unidirectional iterators valid
std::sort(included_ids_vec.begin(), included_ids_vec.end());
if(group_limit != 0) {
// if one `id` of a group is present in curated hits, we have to exclude that entire group from results
auto group_by_field_it_vec = get_group_by_field_iterators(group_by_fields);
for(auto seq_id: included_ids_vec) {
uint64_t distinct_id = 1;
for(auto& kv : group_by_field_it_vec) {
get_distinct_id(kv.it, seq_id, kv.is_array, group_missing_values, distinct_id);
}
excluded_group_ids.emplace(distinct_id);
}
}
// if `filter_curated_hits` is enabled, we will remove curated hits that don't match filter condition
std::set<uint32_t> included_ids_set;
if(!filter_result_iterator->is_filter_provided() || !filter_curated_hits) {
included_ids_set.insert(included_ids_vec.begin(), included_ids_vec.end());
} else if(filter_result_iterator->validity == filter_result_iterator_t::valid) {
for (const auto &included_id: included_ids_vec) {
auto result = filter_result_iterator->is_valid(included_id);
if (result == -1) {
break;
}
if (result == 1) {
included_ids_set.insert(included_id);
}
}
}
included_ids_vec.clear();
included_ids_vec.insert(included_ids_vec.begin(), included_ids_set.begin(), included_ids_set.end());
std::sort(included_ids_vec.begin(), included_ids_vec.end());
std::map<size_t, std::vector<uint32_t>> included_ids_grouped; // pos -> seq_ids
std::vector<uint32_t> all_positions;
for(const auto& seq_id_pos: included_ids) {
all_positions.push_back(seq_id_pos.second);
if(included_ids_set.count(seq_id_pos.first) == 0) {
continue;
}
included_ids_grouped[seq_id_pos.second].push_back(seq_id_pos.first);
}
for(const auto& pos_ids: included_ids_grouped) {
size_t outer_pos = pos_ids.first;
size_t ids_per_pos = std::max(size_t(1), group_limit);
auto num_inner_ids = std::min(ids_per_pos, pos_ids.second.size());
for(size_t inner_pos = 0; inner_pos < num_inner_ids; inner_pos++) {
auto seq_id = pos_ids.second[inner_pos];
included_ids_map[outer_pos][inner_pos] = seq_id;
curated_ids.insert(seq_id);
}
}
curated_ids.insert(excluded_ids.begin(), excluded_ids.end());
if(all_positions.size() > included_ids_map.size()) {
// Some curated IDs may have been removed via filtering or simply don't exist.
// We have to shift lower placed hits upwards to fill those positions.
std::sort(all_positions.begin(), all_positions.end());
all_positions.erase(unique(all_positions.begin(), all_positions.end()), all_positions.end());
std::map<size_t, std::map<size_t, uint32_t>> new_included_ids_map;
auto included_id_it = included_ids_map.begin();
auto all_pos_it = all_positions.begin();
while(included_id_it != included_ids_map.end()) {
new_included_ids_map[*all_pos_it] = included_id_it->second;
all_pos_it++;
included_id_it++;
}
included_ids_map = new_included_ids_map;
}
}
Option<bool> Index::fuzzy_search_fields(const std::vector<search_field_t>& the_fields,
const std::vector<token_t>& query_tokens,
const std::vector<token_t>& dropped_tokens,
const text_match_type_t match_type,
const uint32_t* excluded_result_ids,
size_t excluded_result_ids_size,
filter_result_iterator_t* const filter_result_iterator,
const std::vector<uint32_t>& curated_ids,
const std::unordered_set<uint32_t>& excluded_group_ids,
const std::vector<sort_by> & sort_fields,
const std::vector<uint32_t>& num_typos,
std::vector<std::vector<art_leaf*>> & searched_queries,
tsl::htrie_map<char, token_leaf>& qtoken_set,
Topster*& topster, spp::sparse_hash_map<uint64_t, uint32_t>& groups_processed,
uint32_t*& all_result_ids, size_t & all_result_ids_len,
const size_t group_limit, const std::vector<std::string>& group_by_fields,
const bool group_missing_values,
bool prioritize_exact_match,
const bool prioritize_token_position,
const bool prioritize_num_matching_fields,
std::set<uint64>& query_hashes,
const token_ordering token_order,
const std::vector<bool>& prefixes,
const size_t typo_tokens_threshold,
const bool exhaustive_search,
const size_t max_candidates,
size_t min_len_1typo,
size_t min_len_2typo,
int syn_orig_num_tokens,
const int* sort_order,
std::array<spp::sparse_hash_map<uint32_t, int64_t, Hasher32>*, 3>& field_values,
const std::vector<size_t>& geopoint_indices,
const std::string& collection_name,
bool enable_typos_for_numerical_tokens,
bool enable_typos_for_alpha_numerical_tokens) const {
// Return early in case filter_by is provided but it matches no docs.
if (filter_result_iterator != nullptr && filter_result_iterator->is_filter_provided() &&
filter_result_iterator->approx_filter_ids_length == 0) {
return Option<bool>(true);
}
// NOTE: `query_tokens` preserve original tokens, while `search_tokens` could be a result of dropped tokens
// To prevent us from doing ART search repeatedly as we iterate through possible corrections
spp::sparse_hash_map<std::string, std::vector<std::string>> token_cost_cache;
std::vector<std::vector<int>> token_to_costs;
for(size_t stoken_index=0; stoken_index < query_tokens.size(); stoken_index++) {
const std::string& token = query_tokens[stoken_index].value;
std::vector<int> all_costs;
// This ensures that we don't end up doing a cost of 1 for a single char etc.
int bounded_cost = get_bounded_typo_cost(2, token , token.length(), min_len_1typo, min_len_2typo,
enable_typos_for_numerical_tokens, enable_typos_for_alpha_numerical_tokens);
for(int cost = 0; cost <= bounded_cost; cost++) {
all_costs.push_back(cost);
}
token_to_costs.push_back(all_costs);
}
// stores candidates for each token, i.e. i-th index would have all possible tokens with a cost of "c"
std::vector<tok_candidates> token_candidates_vec;
std::set<std::string> unique_tokens;
const size_t num_search_fields = std::min(the_fields.size(), (size_t) FIELD_LIMIT_NUM);
auto product = []( long long a, std::vector<int>& b ) { return a*b.size(); };
long long n = 0;
long long int N = token_to_costs.size() > 30 ? 1 :
std::accumulate(token_to_costs.begin(), token_to_costs.end(), 1LL, product);
const long long combination_limit = exhaustive_search ? Index::COMBINATION_MAX_LIMIT : Index::COMBINATION_MIN_LIMIT;
while(n < N && n < combination_limit) {
RETURN_CIRCUIT_BREAKER_OP
//LOG(INFO) << "fuzzy_search_fields, n: " << n;
// Outerloop generates combinations of [cost to max_cost] for each token
// For e.g. for a 3-token query: [0, 0, 0], [0, 0, 1], [0, 1, 1] etc.
std::vector<uint32_t> costs(token_to_costs.size());
ldiv_t q { n, 0 };
for(long long i = (token_to_costs.size() - 1); 0 <= i ; --i ) {
q = ldiv(q.quot, token_to_costs[i].size());
costs[i] = token_to_costs[i][q.rem];
}
unique_tokens.clear();
token_candidates_vec.clear();
size_t token_index = 0;
size_t num_keyword_matches = 0;
while(token_index < query_tokens.size()) {
// For each token, look up the generated cost for this iteration and search using that cost
const std::string& token = query_tokens[token_index].value;
const std::string token_cost_hash = token + std::to_string(costs[token_index]);
std::vector<std::string> leaf_tokens;
if(token_cost_cache.count(token_cost_hash) != 0) {
leaf_tokens = token_cost_cache[token_cost_hash];
} else {
//auto begin = std::chrono::high_resolution_clock::now();
// Prefix query with a preceding token should be handled in such a way that we give preference to
// possible phrase continuation. Example: "steve j" for "steve jobs" name field query. To do this,
// we will first attempt to match the prefix with the most "popular" fields of the preceding token.
// Tokens matched from popular fields will also be searched across other query fields.
// Only when we find *no results* for such an expansion, we will attempt cross field matching.
bool last_token = query_tokens.size() > 1 && dropped_tokens.empty() &&
(token_index == (query_tokens.size() - 1));
std::vector<size_t> query_field_ids(num_search_fields);
for(size_t field_id = 0; field_id < num_search_fields; field_id++) {
query_field_ids[field_id] = field_id;
}
std::vector<size_t> popular_field_ids; // fields containing the token most across documents
if(last_token) {
popular_fields_of_token(search_index,
token_candidates_vec.back().candidates[0],
the_fields, num_search_fields, popular_field_ids);
if(popular_field_ids.empty()) {
break;
}
}
const std::vector<size_t>& field_ids = last_token ? popular_field_ids : query_field_ids;
for(size_t field_id: field_ids) {
// NOTE: when accessing other field ordered properties like prefixes or num_typos we have to index
// them by `the_field.orig_index` since the original fields could be reordered on their weights.
auto& the_field = the_fields[field_id];
const bool field_prefix = the_fields[field_id].prefix;
const bool prefix_search = field_prefix && query_tokens[token_index].is_prefix_searched;
const size_t token_len = prefix_search ? (int) token.length() : (int) token.length() + 1;
/*LOG(INFO) << "Searching for field: " << the_field.name << ", token:"
<< token << " - cost: " << costs[token_index] << ", prefix_search: " << prefix_search;*/
int64_t field_num_typos = the_fields[field_id].num_typos;
const auto& search_field = search_schema.at(the_field.name);
auto& locale = search_field.locale;
if(locale != "" && (locale == "zh" || locale == "ko" || locale == "ja")) {
// disable fuzzy trie traversal for CJK locales
field_num_typos = 0;
}
if(costs[token_index] > field_num_typos) {
continue;
}
const auto& prev_token = last_token ? token_candidates_vec.back().candidates[0] : "";
std::vector<art_leaf*> field_leaves;
art_fuzzy_search_i(search_index.at(search_field.faceted_name()),
(const unsigned char *) token.c_str(), token_len,
costs[token_index], costs[token_index], max_candidates, token_order, prefix_search,
last_token, prev_token, filter_result_iterator, field_leaves, unique_tokens);
filter_result_iterator->reset();
if (filter_result_iterator->validity == filter_result_iterator_t::timed_out) {
search_cutoff = true;
return Option<bool>(true);
}
/*auto timeMillis = std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::high_resolution_clock::now() - begin).count();
LOG(INFO) << "Time taken for fuzzy search: " << timeMillis << "ms";*/
//LOG(INFO) << "Searching field: " << the_field.name << ", token:" << token << ", sz: " << field_leaves.size();
if(field_leaves.empty()) {
// look at the next field
continue;
}
for(size_t i = 0; i < field_leaves.size(); i++) {
auto leaf = field_leaves[i];
std::string tok(reinterpret_cast<char*>(leaf->key), leaf->key_len - 1);
leaf_tokens.push_back(tok);
}
token_cost_cache.emplace(token_cost_hash, leaf_tokens);
if(leaf_tokens.size() >= max_candidates) {
goto token_done;
}
}
if(last_token && num_search_fields > 1 && leaf_tokens.size() < max_candidates) {
// matching previous token has failed, look at all fields
for(size_t field_id: query_field_ids) {
auto& the_field = the_fields[field_id];
const bool field_prefix = the_fields[field_id].prefix;
const bool prefix_search = field_prefix && query_tokens[token_index].is_prefix_searched;
const size_t token_len = prefix_search ? (int) token.length() : (int) token.length() + 1;
int64_t field_num_typos = the_fields[field_id].num_typos;
const auto& search_field = search_schema.at(the_field.name);
auto& locale = search_field.locale;
if(locale != "" && locale != "en" && locale != "th" && !Tokenizer::is_cyrillic(locale)) {
// disable fuzzy trie traversal for non-english locales
field_num_typos = 0;
}
if(costs[token_index] > field_num_typos) {
continue;
}
std::vector<art_leaf*> field_leaves;
art_fuzzy_search_i(search_index.at(the_field.name), (const unsigned char *) token.c_str(), token_len,
costs[token_index], costs[token_index], max_candidates, token_order, prefix_search,
false, "", filter_result_iterator, field_leaves, unique_tokens);
filter_result_iterator->reset();
if (filter_result_iterator->validity == filter_result_iterator_t::timed_out) {
search_cutoff = true;
return Option<bool>(true);
}
if(field_leaves.empty()) {
// look at the next field
continue;
}
for(size_t i = 0; i < field_leaves.size(); i++) {
auto leaf = field_leaves[i];
std::string tok(reinterpret_cast<char*>(leaf->key), leaf->key_len - 1);
leaf_tokens.push_back(tok);
}
token_cost_cache.emplace(token_cost_hash, leaf_tokens);
if(leaf_tokens.size() >= max_candidates) {
goto token_done;
}
}
}
}
token_done:
if(!leaf_tokens.empty()) {
//log_leaves(costs[token_index], token, leaves);
token_candidates_vec.push_back(tok_candidates{query_tokens[token_index], costs[token_index],
query_tokens[token_index].is_prefix_searched, leaf_tokens});
} else {
// No result at `cost = costs[token_index]`. Remove `cost` for token and re-do combinations
auto it = std::find(token_to_costs[token_index].begin(), token_to_costs[token_index].end(), costs[token_index]);
if(it != token_to_costs[token_index].end()) {
token_to_costs[token_index].erase(it);
// when no more costs are left for this token
if(token_to_costs[token_index].empty()) {
// we cannot proceed further, as this token is not found within cost limits
// and, dropping of tokens are done elsewhere.
return Option<bool>(true);
}
}
// Continue outerloop on new cost combination
n = -1;
N = std::accumulate(token_to_costs.begin(), token_to_costs.end(), 1LL, product);
goto resume_typo_loop;
}
token_index++;
}
if(token_candidates_vec.size() == query_tokens.size()) {
std::vector<uint32_t> id_buff;
auto search_all_candidates_op = search_all_candidates(num_search_fields, match_type, the_fields,
filter_result_iterator,
excluded_result_ids, excluded_result_ids_size, excluded_group_ids,
sort_fields, token_candidates_vec, searched_queries, qtoken_set,
dropped_tokens, topster,
groups_processed, num_keyword_matches, all_result_ids, all_result_ids_len,
typo_tokens_threshold, group_limit, group_by_fields,
group_missing_values, query_tokens,
num_typos, prefixes, prioritize_exact_match, prioritize_token_position,
prioritize_num_matching_fields, exhaustive_search, max_candidates,
syn_orig_num_tokens, sort_order, field_values, geopoint_indices,
query_hashes, id_buff, collection_name);
if (!search_all_candidates_op.ok()) {
return search_all_candidates_op;
}
if(id_buff.size() > 1) {
gfx::timsort(id_buff.begin(), id_buff.end());
id_buff.erase(std::unique( id_buff.begin(), id_buff.end() ), id_buff.end());
}
uint32_t* new_all_result_ids = nullptr;
all_result_ids_len = ArrayUtils::or_scalar(all_result_ids, all_result_ids_len, &id_buff[0],
id_buff.size(), &new_all_result_ids);
delete[] all_result_ids;
all_result_ids = new_all_result_ids;
}
resume_typo_loop:
auto results_count = group_limit != 0 ? groups_processed.size() : all_result_ids_len;
if(!exhaustive_search && (results_count >= typo_tokens_threshold ||
(num_keyword_matches >= typo_tokens_threshold && !curated_ids.empty()))) {
// if typo threshold is breached, we are done
// Also, if there are curated hits where all hits overlap with curated hits, we will end up
// with result_count=0. We use num_keyword_matches to handle this scenario as we should not end up
// looking for typo matches then.
return Option<bool>(true);
}
n++;
}
return Option<bool>(true);
}
void Index::popular_fields_of_token(const spp::sparse_hash_map<std::string, art_tree*>& search_index,
const std::string& previous_token,
const std::vector<search_field_t>& the_fields,
const size_t num_search_fields,
std::vector<size_t>& popular_field_ids) {
const auto prev_token_c_str = (const unsigned char*) previous_token.c_str();
const int prev_token_len = (int) previous_token.size() + 1;
std::vector<std::pair<size_t, size_t>> field_id_doc_counts;
for(size_t i = 0; i < num_search_fields; i++) {
const std::string& field_name = the_fields[i].name;
auto leaf = static_cast<art_leaf*>(art_search(search_index.at(field_name), prev_token_c_str, prev_token_len));
if(!leaf) {
continue;
}
auto num_docs = posting_t::num_ids(leaf->values);
field_id_doc_counts.emplace_back(i, num_docs);
}
std::sort(field_id_doc_counts.begin(), field_id_doc_counts.end(), [](const auto& p1, const auto& p2) {
return p1.second > p2.second;
});
for(const auto& field_id_doc_count: field_id_doc_counts) {
popular_field_ids.push_back(field_id_doc_count.first);
}
}
void Index::find_across_fields(const token_t& previous_token,
const std::string& previous_token_str,
const std::vector<search_field_t>& the_fields,
const size_t num_search_fields,
filter_result_iterator_t* const filter_result_iterator,
const uint32_t* exclude_token_ids, size_t exclude_token_ids_size,
std::vector<uint32_t>& prev_token_doc_ids,
std::vector<size_t>& top_prefix_field_ids) const {
// one iterator for each token, each underlying iterator contains results of token across multiple fields
std::vector<or_iterator_t> token_its;
// used to track plists that must be destructed once done
std::vector<posting_list_t*> expanded_plists;
result_iter_state_t istate(exclude_token_ids, exclude_token_ids_size, filter_result_iterator);
const bool prefix_search = previous_token.is_prefix_searched;
const uint32_t token_num_typos = previous_token.num_typos;
const bool token_prefix = previous_token.is_prefix_searched;
auto& token_str = previous_token_str;
auto token_c_str = (const unsigned char*) token_str.c_str();
const size_t token_len = token_str.size() + 1;
std::vector<posting_list_t::iterator_t> its;
std::vector<std::pair<size_t, size_t>> field_id_doc_counts;
for(size_t i = 0; i < num_search_fields; i++) {
const std::string& field_name = the_fields[i].name;
art_tree* tree = search_index.at(field_name);
art_leaf* leaf = static_cast<art_leaf*>(art_search(tree, token_c_str, token_len));
if(!leaf) {
continue;
}
/*LOG(INFO) << "Token: " << token_str << ", field_name: " << field_name
<< ", num_ids: " << posting_t::num_ids(leaf->values);*/
if(IS_COMPACT_POSTING(leaf->values)) {
auto compact_posting_list = COMPACT_POSTING_PTR(leaf->values);
posting_list_t* full_posting_list = compact_posting_list->to_full_posting_list();
expanded_plists.push_back(full_posting_list);
its.push_back(full_posting_list->new_iterator(nullptr, nullptr, i)); // moved, not copied
} else {
posting_list_t* full_posting_list = (posting_list_t*)(leaf->values);
its.push_back(full_posting_list->new_iterator(nullptr, nullptr, i)); // moved, not copied
}
field_id_doc_counts.emplace_back(i, posting_t::num_ids(leaf->values));
}
if(its.empty()) {
// this token does not have any match across *any* field: probably a typo
LOG(INFO) << "No matching field found for token: " << token_str;
return;
}
std::sort(field_id_doc_counts.begin(), field_id_doc_counts.end(), [](const auto& p1, const auto& p2) {
return p1.second > p2.second;
});
for(auto& field_id_doc_count: field_id_doc_counts) {
top_prefix_field_ids.push_back(field_id_doc_count.first);
}
or_iterator_t token_fields(its);
token_its.push_back(std::move(token_fields));
or_iterator_t::intersect(token_its, istate,
[&](const single_filter_result_t& filter_result, const std::vector<or_iterator_t>& its) {
auto& seq_id = filter_result.seq_id;
prev_token_doc_ids.push_back(seq_id);
});
for(posting_list_t* plist: expanded_plists) {
delete plist;
}
}
Option<bool> Index::search_across_fields(const std::vector<token_t>& query_tokens,
const std::vector<uint32_t>& num_typos,
const std::vector<bool>& prefixes,
const std::vector<search_field_t>& the_fields,
const size_t num_search_fields,
const text_match_type_t match_type,
const std::vector<sort_by>& sort_fields,
Topster*& topster,
spp::sparse_hash_map<uint64_t, uint32_t>& groups_processed,
std::vector<std::vector<art_leaf*>>& searched_queries,
tsl::htrie_map<char, token_leaf>& qtoken_set,
const std::vector<token_t>& dropped_tokens,
const size_t group_limit,
const std::vector<std::string>& group_by_fields,
const bool group_missing_values,
const bool prioritize_exact_match,
const bool prioritize_token_position,
const bool prioritize_num_matching_fields,
filter_result_iterator_t* const filter_result_iterator,
const uint32_t total_cost, const int syn_orig_num_tokens,
const uint32_t* excluded_result_ids, size_t excluded_result_ids_size,
const std::unordered_set<uint32_t>& excluded_group_ids,
const int* sort_order,
std::array<spp::sparse_hash_map<uint32_t, int64_t, Hasher32>*, 3>& field_values,
const std::vector<size_t>& geopoint_indices,
std::vector<uint32_t>& id_buff,
size_t& num_keyword_matches,
uint32_t*& all_result_ids, size_t& all_result_ids_len,
const std::string& collection_name) const {
std::vector<art_leaf*> query_suggestion;
// one or_iterator for each token (across multiple fields)
std::vector<or_iterator_t> dropped_token_its;
// used to track plists that must be destructed once done
std::vector<posting_list_t*> expanded_dropped_plists;
for(auto& dropped_token: dropped_tokens) {
auto& token = dropped_token.value;
auto token_c_str = (const unsigned char*) token.c_str();
// convert token from each field into an or_iterator
std::vector<posting_list_t::iterator_t> its;
for(size_t i = 0; i < the_fields.size(); i++) {
const std::string& field_name = the_fields[i].name;
art_tree* tree = search_index.at(the_fields[i].str_name);
art_leaf* leaf = static_cast<art_leaf*>(art_search(tree, token_c_str, token.size()+1));
if(!leaf) {
continue;
}
/*LOG(INFO) << "Token: " << token << ", field_name: " << field_name
<< ", num_ids: " << posting_t::num_ids(leaf->values);*/
if(IS_COMPACT_POSTING(leaf->values)) {
auto compact_posting_list = COMPACT_POSTING_PTR(leaf->values);
posting_list_t* full_posting_list = compact_posting_list->to_full_posting_list();
expanded_dropped_plists.push_back(full_posting_list);
its.push_back(full_posting_list->new_iterator(nullptr, nullptr, i)); // moved, not copied
} else {
posting_list_t* full_posting_list = (posting_list_t*)(leaf->values);
its.push_back(full_posting_list->new_iterator(nullptr, nullptr, i)); // moved, not copied
}
}
or_iterator_t token_fields(its);
dropped_token_its.push_back(std::move(token_fields));
}
// one iterator for each token, each underlying iterator contains results of token across multiple fields
std::vector<or_iterator_t> token_its;
// used to track plists that must be destructed once done
std::vector<posting_list_t*> expanded_plists;
result_iter_state_t istate(excluded_result_ids, excluded_result_ids_size, filter_result_iterator);
// for each token, find the posting lists across all query_by fields
for(size_t ti = 0; ti < query_tokens.size(); ti++) {
const uint32_t token_num_typos = query_tokens[ti].num_typos;
const bool token_prefix = query_tokens[ti].is_prefix_searched;
auto& token_str = query_tokens[ti].value;
auto token_c_str = (const unsigned char*) token_str.c_str();
const size_t token_len = token_str.size() + 1;
std::vector<posting_list_t::iterator_t> its;
for(size_t i = 0; i < num_search_fields; i++) {
const std::string& field_name = the_fields[i].name;
const uint32_t field_num_typos = the_fields[i].num_typos;
const bool field_prefix = the_fields[i].prefix;
if(token_num_typos > field_num_typos) {
// since the token can come from any field, we still have to respect per-field num_typos
continue;
}
if(token_prefix && !field_prefix) {
// even though this token is an outcome of prefix search, we can't use it for this field, since
// this field has prefix search disabled.
continue;
}
art_tree* tree = search_index.at(the_fields[i].str_name);
art_leaf* leaf = static_cast<art_leaf*>(art_search(tree, token_c_str, token_len));
if(!leaf) {
continue;
}
query_suggestion.push_back(leaf);
/*LOG(INFO) << "Token: " << token_str << ", field_name: " << field_name
<< ", num_ids: " << posting_t::num_ids(leaf->values);*/
if(IS_COMPACT_POSTING(leaf->values)) {
auto compact_posting_list = COMPACT_POSTING_PTR(leaf->values);
posting_list_t* full_posting_list = compact_posting_list->to_full_posting_list();
expanded_plists.push_back(full_posting_list);
its.push_back(full_posting_list->new_iterator(nullptr, nullptr, i)); // moved, not copied
} else {
posting_list_t* full_posting_list = (posting_list_t*)(leaf->values);
its.push_back(full_posting_list->new_iterator(nullptr, nullptr, i)); // moved, not copied
}
}
if(its.empty()) {
// this token does not have any match across *any* field: probably a typo
LOG(INFO) << "No matching field found for token: " << token_str;
continue;
}
or_iterator_t token_fields(its);
token_its.push_back(std::move(token_fields));
}
std::vector<uint32_t> result_ids;
std::vector<uint32_t> eval_filter_indexes;
Option<bool> status(true);
auto group_by_field_it_vec = get_group_by_field_iterators(group_by_fields);
or_iterator_t::intersect(token_its, istate,
[&](single_filter_result_t& filter_result, const std::vector<or_iterator_t>& its) {
auto& seq_id = filter_result.seq_id;
if(topster == nullptr) {
result_ids.push_back(seq_id);
return ;
}
auto references = std::move(filter_result.reference_filter_results);
//LOG(INFO) << "seq_id: " << seq_id;
// Convert [token -> fields] orientation to [field -> tokens] orientation
std::vector<std::vector<posting_list_t::iterator_t>> field_to_tokens(num_search_fields);
for(size_t ti = 0; ti < its.size(); ti++) {
const or_iterator_t& token_fields_iters = its[ti];
const std::vector<posting_list_t::iterator_t>& field_iters = token_fields_iters.get_its();
for(size_t fi = 0; fi < field_iters.size(); fi++) {
const posting_list_t::iterator_t& field_iter = field_iters[fi];
if(field_iter.id() == seq_id && field_iter.get_field_id() < num_search_fields) {
// not all fields might contain a given token
field_to_tokens[field_iter.get_field_id()].push_back(field_iter.clone());
}
}
}
size_t query_len = query_tokens.size();
// check if seq_id exists in any of the dropped_token iters
for(size_t ti = 0; ti < dropped_token_its.size(); ti++) {
or_iterator_t& token_fields_iters = dropped_token_its[ti];
if(token_fields_iters.skip_to(seq_id) && token_fields_iters.id() == seq_id) {
query_len++;
const std::vector<posting_list_t::iterator_t>& field_iters = token_fields_iters.get_its();
for(size_t fi = 0; fi < field_iters.size(); fi++) {
const posting_list_t::iterator_t& field_iter = field_iters[fi];
if(field_iter.id() == seq_id && field_iter.get_field_id() < num_search_fields) {
// not all fields might contain a given token
field_to_tokens[field_iter.get_field_id()].push_back(field_iter.clone());
}
}
}
}
if(syn_orig_num_tokens != -1) {
query_len = syn_orig_num_tokens;
}
int64_t best_field_match_score = 0, best_field_weight = 0;
int64_t sum_field_weighted_score = 0;
uint32_t num_matching_fields = 0;
for(size_t fi = 0; fi < field_to_tokens.size(); fi++) {
const std::vector<posting_list_t::iterator_t>& token_postings = field_to_tokens[fi];
if(token_postings.empty()) {
continue;
}
const int64_t field_weight = the_fields[fi].weight;
const bool field_is_array = search_schema.at(the_fields[fi].name).is_array();
int64_t field_match_score = 0;
bool single_exact_query_token = false;
if(total_cost == 0 && query_tokens.size() == 1) {
// does this candidate suggestion token match query token exactly?
single_exact_query_token = true;
}
score_results2(sort_fields, searched_queries.size(), fi, field_is_array,
total_cost, field_match_score,
seq_id, sort_order,
prioritize_exact_match, single_exact_query_token, prioritize_token_position,
query_tokens.size(), syn_orig_num_tokens, token_postings);
if(match_type == max_score && field_match_score > best_field_match_score) {
best_field_match_score = field_match_score;
best_field_weight = field_weight;
}
if(match_type == max_weight && field_weight > best_field_weight) {
best_field_weight = field_weight;
best_field_match_score = field_match_score;
}
if(match_type == sum_score) {
sum_field_weighted_score += (field_weight * field_match_score);
}
num_matching_fields++;
}
uint64_t distinct_id = seq_id;
if(group_limit != 0) {
distinct_id = 1;
for(auto& kv : group_by_field_it_vec) {
get_distinct_id(kv.it, seq_id, kv.is_array, group_missing_values, distinct_id);
}
if(excluded_group_ids.count(distinct_id) != 0) {
return;
}
}
int64_t scores[3] = {0};
int64_t match_score_index = -1;
bool should_skip = false;
auto compute_sort_scores_op = compute_sort_scores(sort_fields, sort_order, field_values, geopoint_indices,
seq_id, references, eval_filter_indexes, best_field_match_score,
scores, match_score_index, should_skip, 0, collection_name);
if (!compute_sort_scores_op.ok()) {
status = Option<bool>(compute_sort_scores_op.code(), compute_sort_scores_op.error());
return;
}
if(should_skip) {
return;
}
query_len = std::min<size_t>(15, query_len);
// NOTE: `query_len` is total tokens matched across fields.
// Within a field, only a subset can match
// MAX_SCORE
// [ sign | tokens_matched | max_field_score | max_field_weight | num_matching_fields ]
// [ 1 | 4 | 48 | 8 | 3 ] (64 bits)
// MAX_WEIGHT
// [ sign | tokens_matched | max_field_weight | max_field_score | num_matching_fields ]
// [ 1 | 4 | 8 | 48 | 3 ] (64 bits)
// SUM_SCORE
// [ sign | tokens_matched | sum_field_score | num_matching_fields ]
// [ 1 | 4 | 56 | 3 ] (64 bits)
auto max_field_weight = std::min<size_t>(FIELD_MAX_WEIGHT, best_field_weight);
num_matching_fields = std::min<size_t>(7, num_matching_fields);
if(!prioritize_num_matching_fields) {
num_matching_fields = 0;
}
uint64_t aggregated_score = 0;
if (match_type == max_score) {
aggregated_score = ((int64_t(query_len) << 59) |
(int64_t(best_field_match_score) << 11) |
(int64_t(max_field_weight) << 3) |
(int64_t(num_matching_fields) << 0));
} else if (match_type == max_weight) {
aggregated_score = ((int64_t(query_len) << 59) |
(int64_t(max_field_weight) << 51) |
(int64_t(best_field_match_score) << 3) |
(int64_t(num_matching_fields) << 0));
} else {
// sum_score
aggregated_score = ((int64_t(query_len) << 59) |
(int64_t(sum_field_weighted_score) << 3) |
(int64_t(num_matching_fields) << 0));
}
/*LOG(INFO) << "seq_id: " << seq_id << ", query_len: " << query_len
<< ", syn_orig_num_tokens: " << syn_orig_num_tokens
<< ", best_field_match_score: " << best_field_match_score
<< ", max_field_weight: " << max_field_weight
<< ", num_matching_fields: " << num_matching_fields
<< ", aggregated_score: " << aggregated_score;*/
KV kv(searched_queries.size(), seq_id, distinct_id, match_score_index, scores, std::move(references));
if(match_score_index != -1) {
kv.scores[match_score_index] = aggregated_score;
kv.text_match_score = aggregated_score;
}
int ret = topster->add(&kv);
if(group_limit != 0 && ret < 2) {
groups_processed[distinct_id]++;
}
result_ids.push_back(seq_id);
});
num_keyword_matches = istate.num_keyword_matches;
if (!status.ok()) {
for(posting_list_t* plist: expanded_plists) {
delete plist;
}
for(posting_list_t* plist: expanded_dropped_plists) {
delete plist;
}
return status;
}
id_buff.insert(id_buff.end(), result_ids.begin(), result_ids.end());
if(id_buff.size() > 100000) {
// prevents too many ORs during exhaustive searching
gfx::timsort(id_buff.begin(), id_buff.end());
id_buff.erase(std::unique( id_buff.begin(), id_buff.end() ), id_buff.end());
uint32_t* new_all_result_ids = nullptr;
all_result_ids_len = ArrayUtils::or_scalar(all_result_ids, all_result_ids_len, &id_buff[0],
id_buff.size(), &new_all_result_ids);
delete[] all_result_ids;
all_result_ids = new_all_result_ids;
id_buff.clear();
}
if(!result_ids.empty()) {
searched_queries.push_back(query_suggestion);
for(const auto& qtoken: query_tokens) {
qtoken_set.insert(qtoken.value, token_leaf(nullptr, qtoken.root_len, qtoken.num_typos, qtoken.is_prefix_searched));
}
}
for(posting_list_t* plist: expanded_plists) {
delete plist;
}
for(posting_list_t* plist: expanded_dropped_plists) {
delete plist;
}
return Option<bool>(true);
}
Option<bool> Index::compute_sort_scores(const std::vector<sort_by>& sort_fields, const int* sort_order,
std::array<spp::sparse_hash_map<uint32_t, int64_t, Hasher32>*, 3> field_values,
const std::vector<size_t>& geopoint_indices,
uint32_t seq_id, const std::map<basic_string<char>, reference_filter_result_t>& references,
std::vector<uint32_t>& filter_indexes, int64_t max_field_match_score, int64_t* scores,
int64_t& match_score_index, bool& should_skip, float vector_distance,
const std::string& collection_name) const {
int64_t geopoint_distances[3];
for(auto& i: geopoint_indices) {
auto const& sort_field = sort_fields[i];
S2LatLng reference_lat_lng;
GeoPoint::unpack_lat_lng(sort_field.geopoint, reference_lat_lng);
auto get_geo_distance_op = !sort_field.reference_collection_name.empty() ?
get_referenced_geo_distance(sort_field, seq_id, references, reference_lat_lng) :
get_geo_distance(sort_field.name, seq_id, reference_lat_lng);
if (!get_geo_distance_op.ok()) {
return Option<bool>(get_geo_distance_op.code(), get_geo_distance_op.error());
}
int64_t dist = get_geo_distance_op.get();
if(dist < sort_fields[i].exclude_radius) {
dist = 0;
}
if(sort_fields[i].geo_precision > 0) {
dist = dist + sort_fields[i].geo_precision - 1 -
(dist + sort_fields[i].geo_precision - 1) % sort_fields[i].geo_precision;
}
geopoint_distances[i] = dist;
// Swap (id -> latlong) index to (id -> distance) index
field_values[i] = &geo_sentinel_value;
}
const int64_t default_score = INT64_MIN; // to handle field that doesn't exist in document (e.g. optional)
uint32_t ref_seq_id;
for(int i = 0; i < sort_fields.size(); ++i) {
auto reference_found = true;
auto const& is_reference_sort = !sort_fields[i].reference_collection_name.empty();
auto is_random_sort = sort_fields[i].random_sort.is_enabled;
auto is_decay_function_sort = (sort_fields[i].sort_by_param == sort_by::linear ||
sort_fields[i].sort_by_param == sort_by::exp ||
sort_fields[i].sort_by_param == sort_by::gauss ||
sort_fields[i].sort_by_param == sort_by::diff);
// In case of reference sort_by, we need to get the sort score of the reference doc id.
if (is_reference_sort) {
std::string ref_collection_name;
auto get_ref_seq_id_op = get_ref_seq_id(sort_fields[i], seq_id, references, ref_collection_name);
if (!get_ref_seq_id_op.ok()) {
return Option<bool>(get_ref_seq_id_op.code(), "Error while sorting on `" + sort_fields[i].reference_collection_name
+ "." + sort_fields[i].name + ": " + get_ref_seq_id_op.error());
}
if (get_ref_seq_id_op.get() == reference_helper_sentinel_value) { // No references found.
reference_found = false;
} else {
ref_seq_id = get_ref_seq_id_op.get();
}
}
if (field_values[i] == &text_match_sentinel_value) {
scores[i] = int64_t(max_field_match_score);
match_score_index = i;
} else if (field_values[i] == &seq_id_sentinel_value) {
scores[i] = seq_id;
} else if(field_values[i] == &geo_sentinel_value) {
scores[i] = geopoint_distances[i];
} else if(field_values[i] == &str_sentinel_value) {
if (!is_reference_sort) {
scores[i] = str_sort_index.at(sort_fields[i].name)->rank(seq_id);
} else if (!reference_found) {
scores[i] = adi_tree_t::NOT_FOUND;
} else {
auto& cm = CollectionManager::get_instance();
auto ref_collection = cm.get_collection(sort_fields[i].reference_collection_name);
if (ref_collection == nullptr) {
return Option<bool>(400, "Referenced collection `" + sort_fields[i].reference_collection_name +
"` not found.");
}
scores[i] = ref_collection->reference_string_sort_score(sort_fields[i].name, ref_seq_id);
}
if(scores[i] == adi_tree_t::NOT_FOUND) {
if(sort_fields[i].order == sort_field_const::asc &&
sort_fields[i].missing_values == sort_by::missing_values_t::first) {
scores[i] = -scores[i];
}
else if(sort_fields[i].order == sort_field_const::desc &&
sort_fields[i].missing_values == sort_by::missing_values_t::last) {
scores[i] = -scores[i];
}
}
} else if(field_values[i] == &eval_sentinel_value) {
auto const& count = sort_fields[i].eval_expressions.size();
if (filter_indexes.empty()) {
filter_indexes = std::vector<uint32_t>(count, 0);
}
bool found = false;
uint32_t index = 0;
auto const& eval = sort_fields[i].eval;
if (eval.eval_ids_vec.size() != count || eval.eval_ids_count_vec.size() != count) {
return Option<bool>(400, "Eval expressions count does not match the ids count.");
}
for (; index < count; index++) {
// ref_seq_id(s) can be unordered.
uint32_t ref_filter_index = 0;
auto& filter_index = is_reference_sort ? ref_filter_index : filter_indexes[index];
auto const& eval_ids = eval.eval_ids_vec[index];
auto const& eval_ids_count = eval.eval_ids_count_vec[index];
if (filter_index == 0 || filter_index < eval_ids_count) {
// Returns iterator to the first element that is >= to value or last if no such element is found.
auto const& id = is_reference_sort ? ref_seq_id : seq_id;
filter_index = std::lower_bound(eval_ids + filter_index, eval_ids + eval_ids_count, id) -
eval_ids;
if (filter_index < eval_ids_count && eval_ids[filter_index] == id) {
filter_index++;
found = true;
break;
}
}
}
scores[i] = found ? eval.scores[index] : 0;
} else if(field_values[i] == &vector_distance_sentinel_value) {
scores[i] = float_to_int64_t(vector_distance);
} else if(field_values[i] == &vector_query_sentinel_value) {
scores[i] = float_to_int64_t(2.0f);
try {
const auto& values = sort_fields[i].vector_query.vector_index->vecdex->getDataByLabel<float>(seq_id);
const auto& dist_func = sort_fields[i].vector_query.vector_index->space->get_dist_func();
float dist = dist_func(sort_fields[i].vector_query.query.values.data(), values.data(), &sort_fields[i].vector_query.vector_index->num_dim);
if(dist > sort_fields[i].vector_query.query.distance_threshold) {
//if computed distance is more then distance_thershold then we set it to max float,
//so that other sort conditions can execute
dist = std::numeric_limits<float>::max();
}
scores[i] = float_to_int64_t(dist);
} catch(...) {
// probably not found
// do nothing
}
} else {
if(is_random_sort) {
scores[i] = sort_fields[i].random_sort.generate_random();
} else if(is_decay_function_sort) {
auto score = compute_decay_function_score(sort_fields[i], seq_id);
if(score == INT64_MAX) {
return Option<bool>(400, "Error computing decay function score.");
}
scores[i] = float_to_int64_t(score);
} else if (!is_reference_sort || reference_found) {
auto it = field_values[i]->find(is_reference_sort ? ref_seq_id : seq_id);
scores[i] = (it == field_values[i]->end()) ? default_score : it->second;
} else {
scores[i] = default_score;
}
if(scores[i] == INT64_MIN && sort_fields[i].missing_values == sort_by::missing_values_t::first) {
// By default, missing numerical value are always going to be sorted to be at the end
// because: -INT64_MIN == INT64_MIN. To account for missing values config, we will have to change
// the default for missing value based on whether it's asc or desc sort.
bool is_asc = (sort_order[i] == -1);
scores[i] = is_asc ? (INT64_MIN + 1) : INT64_MAX;
}
}
if (sort_order[i] == -1) {
scores[i] = -scores[i];
}
}
return Option<bool>(true);
}
Option<bool> Index::do_phrase_search(const size_t num_search_fields, const std::vector<search_field_t>& search_fields,
std::vector<query_tokens_t>& field_query_tokens,
const std::vector<sort_by>& sort_fields,
std::vector<std::vector<art_leaf*>>& searched_queries, const size_t group_limit,
const std::vector<std::string>& group_by_fields,
const bool group_missing_values,
Topster* actual_topster,
const int sort_order[3],
std::array<spp::sparse_hash_map<uint32_t, int64_t, Hasher32>*, 3> field_values,
const std::vector<size_t>& geopoint_indices,
filter_result_iterator_t*& filter_result_iterator,
uint32_t*& all_result_ids, size_t& all_result_ids_len,
spp::sparse_hash_map<uint64_t, uint32_t>& groups_processed,
const uint32_t* excluded_result_ids, size_t excluded_result_ids_size,
const std::unordered_set<uint32_t>& excluded_group_ids,
bool is_wildcard_query, const std::string& collection_name) const {
uint32_t* phrase_result_ids = nullptr;
uint32_t phrase_result_count = 0;
std::map<uint32_t, size_t> phrase_match_id_scores;
for(size_t i = 0; i < num_search_fields; i++) {
const std::string& field_name = search_fields[i].name;
const size_t field_weight = search_fields[i].weight;
bool is_array = search_schema.at(field_name).is_array();
uint32_t* field_phrase_match_ids = nullptr;
size_t field_phrase_match_ids_size = 0;
for(const auto& phrase: field_query_tokens[i].q_phrases) {
std::vector<void*> posting_lists;
for(const std::string& token: phrase) {
art_leaf* leaf = (art_leaf *) art_search(search_index.at(field_name),
(const unsigned char *) token.c_str(),
token.size() + 1);
if(leaf) {
posting_lists.push_back(leaf->values);
}
}
if(posting_lists.size() != phrase.size()) {
// unmatched length means no matches will be found for this phrase, so skip to next phrase
continue;
}
std::vector<uint32_t> contains_ids;
posting_t::intersect(posting_lists, contains_ids);
uint32_t* this_phrase_ids = new uint32_t[contains_ids.size()];
size_t this_phrase_ids_size = 0;
posting_t::get_phrase_matches(posting_lists, is_array, &contains_ids[0], contains_ids.size(),
this_phrase_ids, this_phrase_ids_size);
if(this_phrase_ids_size == 0) {
// no results found for this phrase, but other phrases can find results
delete [] this_phrase_ids;
continue;
}
// results of multiple phrases must be ANDed
if(field_phrase_match_ids_size == 0) {
field_phrase_match_ids_size = this_phrase_ids_size;
field_phrase_match_ids = this_phrase_ids;
} else {
uint32_t* phrase_ids_merged = nullptr;
field_phrase_match_ids_size = ArrayUtils::and_scalar(this_phrase_ids, this_phrase_ids_size, field_phrase_match_ids,
field_phrase_match_ids_size, &phrase_ids_merged);
delete [] field_phrase_match_ids;
delete [] this_phrase_ids;
field_phrase_match_ids = phrase_ids_merged;
}
}
if(field_phrase_match_ids_size == 0) {
continue;
}
// upto 10K phrase match IDs per field will be weighted so that phrase match against a higher weighted field
// is returned earlier in the results
const size_t weight_score_base = 100000; // just to make score be a large number
for(size_t pi = 0; pi < std::min<size_t>(10000, field_phrase_match_ids_size); pi++) {
auto this_field_score = (weight_score_base + field_weight);
auto existing_score = phrase_match_id_scores[field_phrase_match_ids[pi]];
phrase_match_id_scores[field_phrase_match_ids[pi]] = std::max(this_field_score, existing_score);
}
// across fields, we have to OR phrase match ids
if(phrase_result_count == 0) {
phrase_result_ids = field_phrase_match_ids;
phrase_result_count = field_phrase_match_ids_size;
} else {
uint32_t* phrase_ids_merged = nullptr;
phrase_result_count = ArrayUtils::or_scalar(phrase_result_ids, phrase_result_count, field_phrase_match_ids,
field_phrase_match_ids_size, &phrase_ids_merged);
delete [] phrase_result_ids;
delete [] field_phrase_match_ids;
phrase_result_ids = phrase_ids_merged;
}
}
if(excluded_result_ids_size != 0) {
uint32_t* excluded_phrase_result_ids = nullptr;
phrase_result_count = ArrayUtils::exclude_scalar(phrase_result_ids, phrase_result_count, excluded_result_ids,
excluded_result_ids_size, &excluded_phrase_result_ids);
delete[] phrase_result_ids;
phrase_result_ids = excluded_phrase_result_ids;
}
// AND phrase id matches with filter ids
if(filter_result_iterator->validity) {
filter_result_iterator_t::add_phrase_ids(filter_result_iterator, phrase_result_ids, phrase_result_count);
} else {
delete filter_result_iterator;
filter_result_iterator = new filter_result_iterator_t(phrase_result_ids, phrase_result_count);
}
if (!is_wildcard_query) {
// this means that the there are non-phrase tokens in the query
// so we cannot directly copy to the all_result_ids array
return Option<bool>(true);
}
filter_result_iterator->compute_iterators();
all_result_ids_len = filter_result_iterator->to_filter_id_array(all_result_ids);
filter_result_iterator->reset();
std::vector<uint32_t> eval_filter_indexes;
std::vector<group_by_field_it_t> group_by_field_it_vec;
if (group_limit != 0) {
group_by_field_it_vec = get_group_by_field_iterators(group_by_fields);
}
// populate topster
for(size_t i = 0; i < all_result_ids_len && filter_result_iterator->validity == filter_result_iterator_t::valid; i++) {
auto seq_id = filter_result_iterator->seq_id;
auto references = std::move(filter_result_iterator->reference);
filter_result_iterator->next();
int64_t match_score = phrase_match_id_scores[seq_id];
int64_t scores[3] = {0};
int64_t match_score_index = -1;
bool should_skip = false;
auto compute_sort_scores_op = compute_sort_scores(sort_fields, sort_order, field_values, geopoint_indices,
seq_id, references, eval_filter_indexes, match_score, scores,
match_score_index, should_skip, 0, collection_name);
if (!compute_sort_scores_op.ok()) {
return compute_sort_scores_op;
}
if(should_skip) {
continue;
}
uint64_t distinct_id = seq_id;
if(group_limit != 0) {
distinct_id = 1;
for(auto& kv : group_by_field_it_vec) {
get_distinct_id(kv.it, seq_id, kv.is_array, group_missing_values, distinct_id);
}
if(excluded_group_ids.count(distinct_id) != 0) {
continue;
}
}
KV kv(searched_queries.size(), seq_id, distinct_id, match_score_index, scores, std::move(references));
int ret = actual_topster->add(&kv);
if(group_limit != 0 && ret < 2) {
groups_processed[distinct_id]++;
}
if(((i + 1) % (1 << 12)) == 0) {
BREAK_CIRCUIT_BREAKER
}
}
filter_result_iterator->reset();
search_cutoff = search_cutoff || filter_result_iterator->validity == filter_result_iterator_t::timed_out;
searched_queries.push_back({});
return Option<bool>(true);
}
Option<bool> Index::do_synonym_search(const std::vector<search_field_t>& the_fields,
const text_match_type_t match_type,
filter_node_t const* const& filter_tree_root,
const std::vector<sort_by>& sort_fields_std, Topster*& curated_topster,
const token_ordering& token_order,
const size_t typo_tokens_threshold, const size_t group_limit,
const std::vector<std::string>& group_by_fields,
const bool group_missing_values,
bool prioritize_exact_match,
const bool prioritize_token_position,
const bool prioritize_num_matching_fields,
const bool exhaustive_search, const size_t concurrency,
const std::vector<bool>& prefixes,
size_t min_len_1typo,
size_t min_len_2typo, const size_t max_candidates, const std::set<uint32_t>& curated_ids,
const std::vector<uint32_t>& curated_ids_sorted, const uint32_t* exclude_token_ids,
size_t exclude_token_ids_size,
const std::unordered_set<uint32_t>& excluded_group_ids,
Topster* actual_topster,
std::vector<std::vector<token_t>>& q_pos_synonyms,
int syn_orig_num_tokens,
spp::sparse_hash_map<uint64_t, uint32_t>& groups_processed,
std::vector<std::vector<art_leaf*>>& searched_queries,
uint32_t*& all_result_ids, size_t& all_result_ids_len,
filter_result_iterator_t* const filter_result_iterator,
std::set<uint64>& query_hashes,
const int* sort_order,
std::array<spp::sparse_hash_map<uint32_t, int64_t, Hasher32>*, 3>& field_values,
const std::vector<size_t>& geopoint_indices,
tsl::htrie_map<char, token_leaf>& qtoken_set,
const std::string& collection_name) const {
for (const auto& syn_tokens : q_pos_synonyms) {
query_hashes.clear();
auto fuzzy_search_fields_op = fuzzy_search_fields(the_fields, syn_tokens, {}, match_type, exclude_token_ids,
exclude_token_ids_size, filter_result_iterator,
curated_ids_sorted, excluded_group_ids, sort_fields_std, {0},
searched_queries, qtoken_set, actual_topster, groups_processed,
all_result_ids, all_result_ids_len, group_limit, group_by_fields,
group_missing_values,
prioritize_exact_match, prioritize_token_position,
prioritize_num_matching_fields,
query_hashes,
token_order, prefixes, typo_tokens_threshold, exhaustive_search,
max_candidates, min_len_1typo, min_len_2typo,
syn_orig_num_tokens, sort_order, field_values, geopoint_indices,
collection_name);
if (!fuzzy_search_fields_op.ok()) {
return fuzzy_search_fields_op;
}
}
return Option<bool>(true);
}
Option<bool> Index::do_infix_search(const size_t num_search_fields, const std::vector<search_field_t>& the_fields,
const std::vector<enable_t>& infixes,
const std::vector<sort_by>& sort_fields,
std::vector<std::vector<art_leaf*>>& searched_queries, const size_t group_limit,
const std::vector<std::string>& group_by_fields,
const bool group_missing_values,
const size_t max_extra_prefix,
const size_t max_extra_suffix,
const std::vector<token_t>& query_tokens, Topster* actual_topster,
filter_result_iterator_t* const filter_result_iterator,
const int sort_order[3],
std::array<spp::sparse_hash_map<uint32_t, int64_t, Hasher32>*, 3> field_values,
const std::vector<size_t>& geopoint_indices,
const std::vector<uint32_t>& curated_ids_sorted,
const std::unordered_set<uint32_t>& excluded_group_ids,
uint32_t*& all_result_ids, size_t& all_result_ids_len,
spp::sparse_hash_map<uint64_t, uint32_t>& groups_processed,
const std::string& collection_name) const {
std::vector<group_by_field_it_t> group_by_field_it_vec;
if (group_limit != 0) {
group_by_field_it_vec = get_group_by_field_iterators(group_by_fields);
}
for(size_t field_id = 0; field_id < num_search_fields; field_id++) {
auto& field_name = the_fields[field_id].name;
enable_t field_infix = the_fields[field_id].infix;
if(field_infix == always || (field_infix == fallback && all_result_ids_len == 0)) {
std::vector<uint32_t> infix_ids;
filter_result_t filtered_infix_ids;
auto search_infix_op = search_infix(query_tokens[0].value, field_name, infix_ids,
max_extra_prefix, max_extra_suffix);
if (!search_infix_op.ok()) {
return search_infix_op;
}
if(!infix_ids.empty()) {
gfx::timsort(infix_ids.begin(), infix_ids.end());
infix_ids.erase(std::unique( infix_ids.begin(), infix_ids.end() ), infix_ids.end());
auto& raw_infix_ids = filtered_infix_ids.docs;
auto& raw_infix_ids_length = filtered_infix_ids.count;
if(!curated_ids_sorted.empty()) {
raw_infix_ids_length = ArrayUtils::exclude_scalar(&infix_ids[0], infix_ids.size(), &curated_ids_sorted[0],
curated_ids_sorted.size(), &raw_infix_ids);
infix_ids.clear();
} else {
raw_infix_ids = &infix_ids[0];
raw_infix_ids_length = infix_ids.size();
}
if(filter_result_iterator->validity == filter_result_iterator_t::valid) {
filter_result_t result;
filter_result_iterator->and_scalar(raw_infix_ids, raw_infix_ids_length, result);
if(raw_infix_ids != &infix_ids[0]) {
delete [] raw_infix_ids;
}
filtered_infix_ids = std::move(result);
filter_result_iterator->reset();
}
bool field_is_array = search_schema.at(the_fields[field_id].name).is_array();
std::vector<uint32_t> eval_filter_indexes;
for(size_t i = 0; i < raw_infix_ids_length; i++) {
auto seq_id = raw_infix_ids[i];
std::map<std::string, reference_filter_result_t> references;
if (filtered_infix_ids.coll_to_references != nullptr) {
references = std::move(filtered_infix_ids.coll_to_references[i]);
}
int64_t match_score = 0;
score_results2(sort_fields, searched_queries.size(), field_id, field_is_array,
0, match_score, seq_id, sort_order, false, false, false, 1, -1, {});
int64_t scores[3] = {0};
int64_t match_score_index = -1;
bool should_skip = false;
auto compute_sort_scores_op = compute_sort_scores(sort_fields, sort_order, field_values,
geopoint_indices, seq_id, references,
eval_filter_indexes, 100, scores, match_score_index,
should_skip, 0, collection_name);
if (!compute_sort_scores_op.ok()) {
return compute_sort_scores_op;
}
if(should_skip) {
continue;
}
uint64_t distinct_id = seq_id;
if(group_limit != 0) {
distinct_id = 1;
for(auto& kv : group_by_field_it_vec) {
get_distinct_id(kv.it, seq_id, kv.is_array, group_missing_values, distinct_id);
}
if(excluded_group_ids.count(distinct_id) != 0) {
continue;
}
}
KV kv(searched_queries.size(), seq_id, distinct_id, match_score_index, scores, std::move(references));
int ret = actual_topster->add(&kv);
if(group_limit != 0 && ret < 2) {
groups_processed[distinct_id]++;
}
if(((i + 1) % (1 << 12)) == 0) {
BREAK_CIRCUIT_BREAKER
}
}
uint32_t* new_all_result_ids = nullptr;
all_result_ids_len = ArrayUtils::or_scalar(all_result_ids, all_result_ids_len, raw_infix_ids,
raw_infix_ids_length, &new_all_result_ids);
delete[] all_result_ids;
all_result_ids = new_all_result_ids;
if (raw_infix_ids == &infix_ids[0]) {
raw_infix_ids = nullptr;
}
searched_queries.push_back({});
}
}
}
return Option<bool>(true);
}
void Index::handle_exclusion(const size_t num_search_fields, std::vector<query_tokens_t>& field_query_tokens,
const std::vector<search_field_t>& search_fields, uint32_t*& exclude_token_ids,
size_t& exclude_token_ids_size) const {
for(size_t i = 0; i < num_search_fields; i++) {
const std::string & field_name = search_fields[i].name;
bool is_array = search_schema.at(field_name).is_array();
for(const auto& q_exclude_phrase: field_query_tokens[i].q_exclude_tokens) {
// if phrase has multiple words, then we have to do exclusion of phrase match results
std::vector<void*> posting_lists;
for(const std::string& exclude_token: q_exclude_phrase) {
art_leaf* leaf = (art_leaf *) art_search(search_index.at(field_name),
(const unsigned char *) exclude_token.c_str(),
exclude_token.size() + 1);
if(leaf) {
posting_lists.push_back(leaf->values);
}
}
if(posting_lists.size() != q_exclude_phrase.size()) {
continue;
}
std::vector<uint32_t> contains_ids;
posting_t::intersect(posting_lists, contains_ids);
if(posting_lists.size() == 1) {
uint32_t *exclude_token_ids_merged = nullptr;
exclude_token_ids_size = ArrayUtils::or_scalar(exclude_token_ids, exclude_token_ids_size,
&contains_ids[0], contains_ids.size(),
&exclude_token_ids_merged);
delete [] exclude_token_ids;
exclude_token_ids = exclude_token_ids_merged;
} else {
uint32_t* phrase_ids = new uint32_t[contains_ids.size()];
size_t phrase_ids_size = 0;
posting_t::get_phrase_matches(posting_lists, is_array, &contains_ids[0], contains_ids.size(),
phrase_ids, phrase_ids_size);
uint32_t *exclude_token_ids_merged = nullptr;
exclude_token_ids_size = ArrayUtils::or_scalar(exclude_token_ids, exclude_token_ids_size,
phrase_ids, phrase_ids_size,
&exclude_token_ids_merged);
delete [] phrase_ids;
delete [] exclude_token_ids;
exclude_token_ids = exclude_token_ids_merged;
}
}
}
}
void Index::compute_facet_infos(const std::vector<facet>& facets, facet_query_t& facet_query,
const uint32_t facet_query_num_typos,
uint32_t* all_result_ids, const size_t& all_result_ids_len,
const std::vector<std::string>& group_by_fields,
const size_t group_limit, const bool is_wildcard_no_filter_query,
const size_t max_candidates,
std::vector<facet_info_t>& facet_infos,
const std::vector<facet_index_type_t>& facet_index_types) const {
if(all_result_ids_len == 0) {
return;
}
size_t total_docs = seq_ids->num_ids();
for(size_t findex=0; findex < facets.size(); findex++) {
const auto& a_facet = facets[findex];
const field &facet_field = search_schema.at(a_facet.field_name);
const auto facet_index_type = facet_index_types[a_facet.orig_index];
facet_infos[findex].facet_field = facet_field;
facet_infos[findex].use_facet_query = false;
facet_infos[findex].should_compute_stats = (facet_field.type != field_types::STRING &&
facet_field.type != field_types::BOOL &&
facet_field.type != field_types::STRING_ARRAY &&
facet_field.type != field_types::BOOL_ARRAY);
bool facet_value_index_exists = facet_index_v4->has_value_index(facet_field.name);
//as we use sort index for range facets with hash based index, sort index should be present
if(facet_index_type == exhaustive) {
facet_infos[findex].use_value_index = false;
}
else if(facet_value_index_exists) {
if(facet_index_type == top_values) {
facet_infos[findex].use_value_index = true;
} else {
// facet_index_type = detect
size_t num_facet_values = facet_index_v4->get_facet_count(facet_field.name);
facet_infos[findex].use_value_index = (group_limit == 0) && (a_facet.sort_field.empty()) &&
( is_wildcard_no_filter_query ||
(all_result_ids_len > 1000 && num_facet_values < 250) ||
(all_result_ids_len > 1000 && all_result_ids_len * 2 > total_docs) ||
(a_facet.is_sort_by_alpha));
}
} else {
facet_infos[findex].use_value_index = false;
}
if(a_facet.field_name == facet_query.field_name && !facet_query.query.empty()) {
facet_infos[findex].use_facet_query = true;
if (facet_field.is_bool()) {
if (facet_query.query == "true") {
facet_query.query = "1";
} else if (facet_query.query == "false") {
facet_query.query = "0";
}
}
//LOG(INFO) << "facet_query.query: " << facet_query.query;
std::vector<std::string> query_tokens;
Tokenizer(facet_query.query, true, !facet_field.is_string(),
facet_field.locale, symbols_to_index, token_separators).tokenize(query_tokens);
std::vector<token_t> qtokens;
for (size_t qtoken_index = 0; qtoken_index < query_tokens.size(); qtoken_index++) {
bool is_prefix = (qtoken_index == query_tokens.size()-1);
qtokens.emplace_back(qtoken_index, query_tokens[qtoken_index], is_prefix,
query_tokens[qtoken_index].size(), 0);
}
std::vector<std::vector<art_leaf*>> searched_queries;
Topster* topster = nullptr;
spp::sparse_hash_map<uint64_t, uint32_t> groups_processed;
uint32_t* field_result_ids = nullptr;
size_t field_result_ids_len = 0;
size_t field_num_results = 0;
std::set<uint64> query_hashes;
size_t num_toks_dropped = 0;
std::vector<sort_by> sort_fields;
std::vector<search_field_t> fq_fields;
fq_fields.emplace_back(facet_field.name, facet_field.faceted_name(), 1, facet_query_num_typos,
true, enable_t::off);
uint32_t* filter_ids = new uint32_t[all_result_ids_len];
std::copy(all_result_ids, all_result_ids + all_result_ids_len, filter_ids);
filter_result_iterator_t filter_result_it(filter_ids, all_result_ids_len);
tsl::htrie_map<char, token_leaf> qtoken_set;
std::array<spp::sparse_hash_map<uint32_t, int64_t, Hasher32>*, 3> field_values{};
const std::vector<size_t> geopoint_indices;
auto fuzzy_search_fields_op = fuzzy_search_fields(fq_fields, qtokens, {}, text_match_type_t::max_score, nullptr, 0,
&filter_result_it, {}, {}, sort_fields, {facet_query_num_typos}, searched_queries,
qtoken_set, topster, groups_processed, field_result_ids, field_result_ids_len,
group_limit, group_by_fields, false, true, false, false, query_hashes, MAX_SCORE, {true}, 1,
false, max_candidates, 3, 7, 0, nullptr, field_values, geopoint_indices, "", true);
if(!fuzzy_search_fields_op.ok()) {
continue;
}
//LOG(INFO) << "searched_queries.size: " << searched_queries.size();
// NOTE: `field_result_ids` will consist of IDs across ALL queries in searched_queries
for(size_t si = 0; si < searched_queries.size(); si++) {
const auto& searched_query = searched_queries[si];
std::vector<std::string> searched_tokens;
std::vector<void*> posting_lists;
for(auto leaf: searched_query) {
posting_lists.push_back(leaf->values);
std::string tok(reinterpret_cast<char*>(leaf->key), leaf->key_len - 1);
searched_tokens.push_back(tok);
//LOG(INFO) << "tok: " << tok;
}
//LOG(INFO) << "si: " << si << ", field_result_ids_len: " << field_result_ids_len;
if(facet_infos[findex].use_value_index) {
size_t num_tokens_found = 0;
for(auto pl: posting_lists) {
if(posting_t::contains_atleast_one(pl, field_result_ids, field_result_ids_len)) {
num_tokens_found++;
} else {
break;
}
}
if(num_tokens_found == posting_lists.size()) {
// need to ensure that document ID actually contains searched_query tokens
// since `field_result_ids` contains documents matched across all queries
// value based index
facet_infos[findex].fvalue_searched_tokens.emplace_back(searched_tokens);
}
}
else {
for(size_t i = 0; i < field_result_ids_len; i++) {
uint32_t seq_id = field_result_ids[i];
bool id_matched = true;
for(auto pl: posting_lists) {
if(!posting_t::contains(pl, seq_id)) {
// need to ensure that document ID actually contains searched_query tokens
// since `field_result_ids` contains documents matched across all queries
id_matched = false;
break;
}
}
if(!id_matched) {
continue;
}
std::vector<uint32_t> facet_hashes;
auto facet_index = facet_index_v4->get_facet_hash_index(a_facet.field_name);
posting_list_t::iterator_t facet_index_it = facet_index->new_iterator();
facet_index_it.skip_to(seq_id);
if(facet_index_it.valid()) {
posting_list_t::get_offsets(facet_index_it, facet_hashes);
if(facet_field.is_array()) {
std::vector<size_t> array_indices;
posting_t::get_matching_array_indices(posting_lists, seq_id, array_indices);
for(size_t array_index: array_indices) {
if(array_index < facet_hashes.size()) {
uint32_t hash = facet_hashes[array_index];
/*LOG(INFO) << "seq_id: " << seq_id << ", hash: " << hash << ", array index: "
<< array_index;*/
if(facet_infos[findex].hashes.count(hash) == 0) {
//LOG(INFO) << "adding searched_tokens for hash " << hash;
facet_infos[findex].hashes.emplace(hash, searched_tokens);
}
}
}
} else {
uint32_t hash = facet_hashes[0];
if(facet_infos[findex].hashes.count(hash) == 0) {
//LOG(INFO) << "adding searched_tokens for hash " << hash;
facet_infos[findex].hashes.emplace(hash, searched_tokens);
}
}
}
}
}
}
delete [] field_result_ids;
}
}
}
void Index::curate_filtered_ids(const uint32_t* exclude_token_ids, size_t exclude_token_ids_size,
uint32_t*& filter_ids, uint32_t& filter_ids_length,
const std::vector<uint32_t>& curated_ids_sorted) const {
if(!curated_ids_sorted.empty()) {
uint32_t *excluded_result_ids = nullptr;
filter_ids_length = ArrayUtils::exclude_scalar(filter_ids, filter_ids_length, &curated_ids_sorted[0],
curated_ids_sorted.size(), &excluded_result_ids);
delete [] filter_ids;
filter_ids = excluded_result_ids;
}
// Exclude document IDs associated with excluded tokens from the result set
if(exclude_token_ids_size != 0) {
uint32_t *excluded_result_ids = nullptr;
filter_ids_length = ArrayUtils::exclude_scalar(filter_ids, filter_ids_length, exclude_token_ids,
exclude_token_ids_size, &excluded_result_ids);
delete[] filter_ids;
filter_ids = excluded_result_ids;
}
}
Option<bool> Index::search_wildcard(filter_node_t const* const& filter_tree_root,
const std::vector<sort_by>& sort_fields, Topster*& topster, Topster*& curated_topster,
spp::sparse_hash_map<uint64_t, uint32_t>& groups_processed,
std::vector<std::vector<art_leaf*>>& searched_queries, const size_t group_limit,
const std::vector<std::string>& group_by_fields,
const bool group_missing_values,
const uint32_t* exclude_token_ids,
size_t exclude_token_ids_size, const std::unordered_set<uint32_t>& excluded_group_ids,
uint32_t*& all_result_ids, size_t& all_result_ids_len,
filter_result_iterator_t* const filter_result_iterator,
const size_t concurrency,
const int* sort_order,
std::array<spp::sparse_hash_map<uint32_t, int64_t, Hasher32>*, 3>& field_values,
const std::vector<size_t>& geopoint_indices,
const std::string& collection_name) const {
filter_result_iterator->compute_iterators();
auto const& approx_filter_ids_length = filter_result_iterator->approx_filter_ids_length;
// Timed out during computation of filter_result_iterator. We should still process the partial ids.
auto timed_out_before_processing = filter_result_iterator->validity == filter_result_iterator_t::timed_out;
uint32_t token_bits = 0;
const bool check_for_circuit_break = (approx_filter_ids_length > 1000000);
//auto beginF = std::chrono::high_resolution_clock::now();
const size_t num_threads = std::min<size_t>(concurrency, approx_filter_ids_length);
const size_t window_size = (num_threads == 0) ? 0 :
(approx_filter_ids_length + num_threads - 1) / num_threads; // rounds up
spp::sparse_hash_map<uint64_t, uint64_t> tgroups_processed[num_threads];
Topster* topsters[num_threads];
std::vector<posting_list_t::iterator_t> plists;
size_t num_processed = 0;
std::mutex m_process;
std::condition_variable cv_process;
size_t num_queued = 0;
const auto parent_search_begin = search_begin_us;
const auto parent_search_stop_ms = search_stop_us;
auto parent_search_cutoff = search_cutoff;
uint32_t excluded_result_index = 0;
Option<bool>* compute_sort_score_statuses[num_threads];
for(size_t thread_id = 0; thread_id < num_threads &&
filter_result_iterator->validity != filter_result_iterator_t::invalid; thread_id++) {
auto batch_result = new filter_result_t();
filter_result_iterator->get_n_ids(window_size, excluded_result_index, exclude_token_ids,
exclude_token_ids_size, batch_result, timed_out_before_processing);
if (batch_result->count == 0) {
delete batch_result;
break;
}
num_queued++;
searched_queries.push_back({});
topsters[thread_id] = new Topster(topster->MAX_SIZE, topster->distinct);
auto& compute_sort_score_status = compute_sort_score_statuses[thread_id] = nullptr;
thread_pool->enqueue([this, &parent_search_begin, &parent_search_stop_ms, &parent_search_cutoff,
thread_id, &sort_fields, &searched_queries,
&group_limit, &group_by_fields, group_missing_values,
&topsters, &tgroups_processed, &excluded_group_ids,
&sort_order, field_values, &geopoint_indices, &plists,
check_for_circuit_break,
batch_result,
&num_processed, &m_process, &cv_process, &compute_sort_score_status, collection_name]() {
std::unique_ptr<filter_result_t> batch_result_guard(batch_result);
search_begin_us = parent_search_begin;
search_stop_us = parent_search_stop_ms;
search_cutoff = false;
std::vector<uint32_t> filter_indexes;
std::vector<group_by_field_it_t> group_by_field_it_vec;
if (group_limit != 0) {
group_by_field_it_vec = get_group_by_field_iterators(group_by_fields);
}
for(size_t i = 0; i < batch_result->count; i++) {
const uint32_t seq_id = batch_result->docs[i];
std::map<basic_string<char>, reference_filter_result_t> references;
if (batch_result->coll_to_references != nullptr) {
references = std::move(batch_result->coll_to_references[i]);
}
int64_t match_score = 0;
score_results2(sort_fields, (uint16_t) searched_queries.size(), 0, false, 0,
match_score, seq_id, sort_order, false, false, false, 1, -1, plists);
int64_t scores[3] = {0};
int64_t match_score_index = -1;
bool should_skip = false;
auto compute_sort_scores_op = compute_sort_scores(sort_fields, sort_order, field_values, geopoint_indices,
seq_id, references, filter_indexes, 100, scores,
match_score_index, should_skip, 0, collection_name);
if (!compute_sort_scores_op.ok()) {
compute_sort_score_status = new Option<bool>(compute_sort_scores_op.code(), compute_sort_scores_op.error());
break;
}
if(should_skip) {
continue;
}
uint64_t distinct_id = seq_id;
if(group_limit != 0) {
distinct_id = 1;
for(auto& kv : group_by_field_it_vec) {
get_distinct_id(kv.it, seq_id, kv.is_array, group_missing_values, distinct_id);
}
if(excluded_group_ids.count(distinct_id) != 0) {
continue;
}
}
KV kv(searched_queries.size(), seq_id, distinct_id, match_score_index, scores, std::move(references));
int ret = topsters[thread_id]->add(&kv);
if(group_limit != 0 && ret < 2) {
tgroups_processed[thread_id][distinct_id]++;
}
if(check_for_circuit_break && ((i + 1) % (1 << 15)) == 0) {
// check only once every 2^15 docs to reduce overhead
BREAK_CIRCUIT_BREAKER
}
}
std::unique_lock<std::mutex> lock(m_process);
num_processed++;
parent_search_cutoff = parent_search_cutoff || search_cutoff;
cv_process.notify_one();
});
}
std::unique_lock<std::mutex> lock_process(m_process);
cv_process.wait(lock_process, [&](){ return num_processed == num_queued; });
search_cutoff = parent_search_cutoff || timed_out_before_processing ||
filter_result_iterator->validity == filter_result_iterator_t::timed_out;
for(size_t thread_id = 0; thread_id < num_processed; thread_id++) {
if (compute_sort_score_statuses[thread_id] != nullptr) {
auto& status = compute_sort_score_statuses[thread_id];
auto return_value = Option<bool>(status->code(), status->error());
// Cleanup the remaining threads.
for (size_t i = thread_id; i < num_processed; i++) {
delete compute_sort_score_statuses[i];
delete topsters[i];
}
return return_value;
}
//groups_processed.insert(tgroups_processed[thread_id].begin(), tgroups_processed[thread_id].end());
for(const auto& it : tgroups_processed[thread_id]) {
groups_processed[it.first]+= it.second;
}
aggregate_topster(topster, topsters[thread_id]);
delete topsters[thread_id];
}
/*long long int timeMillisF = std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::high_resolution_clock::now() - beginF).count();
LOG(INFO) << "Time for raw scoring: " << timeMillisF;*/
filter_result_iterator->reset(true);
if (timed_out_before_processing || filter_result_iterator->validity == filter_result_iterator_t::valid) {
all_result_ids_len = filter_result_iterator->to_filter_id_array(all_result_ids);
search_cutoff = search_cutoff || filter_result_iterator->validity == filter_result_iterator_t::timed_out;
} else if (filter_result_iterator->validity == filter_result_iterator_t::timed_out) {
auto partial_result = new filter_result_t();
std::unique_ptr<filter_result_t> partial_result_guard(partial_result);
filter_result_iterator->get_n_ids(window_size * num_processed,
excluded_result_index, nullptr, 0, partial_result, true);
all_result_ids_len = partial_result->count;
all_result_ids = partial_result->docs;
partial_result->docs = nullptr;
}
return Option<bool>(true);
}
Option<bool> Index::populate_sort_mapping(int* sort_order, std::vector<size_t>& geopoint_indices,
std::vector<sort_by>& sort_fields_std,
std::array<spp::sparse_hash_map<uint32_t, int64_t, Hasher32>*, 3>& field_values) const {
for (size_t i = 0; i < sort_fields_std.size(); i++) {
if (!sort_fields_std[i].reference_collection_name.empty()) {
auto& cm = CollectionManager::get_instance();
auto ref_collection = cm.get_collection(sort_fields_std[i].reference_collection_name);
int ref_sort_order[1];
std::vector<size_t> ref_geopoint_indices;
std::vector<sort_by> ref_sort_fields_std;
ref_sort_fields_std.emplace_back(sort_fields_std[i]);
ref_sort_fields_std.front().reference_collection_name.clear();
std::array<spp::sparse_hash_map<uint32_t, int64_t, Hasher32>*, 3> ref_field_values;
auto populate_op = ref_collection->reference_populate_sort_mapping(ref_sort_order, ref_geopoint_indices,
ref_sort_fields_std, ref_field_values);
if (!populate_op.ok()) {
return populate_op;
}
sort_order[i] = ref_sort_order[0];
if (!ref_geopoint_indices.empty()) {
geopoint_indices.push_back(i);
}
sort_fields_std[i] = ref_sort_fields_std[0];
sort_fields_std[i].reference_collection_name = ref_collection->get_name();
field_values[i] = ref_field_values[0];
continue;
}
sort_order[i] = 1;
if (sort_fields_std[i].order == sort_field_const::asc) {
sort_order[i] = -1;
}
if (sort_fields_std[i].name == sort_field_const::text_match) {
field_values[i] = &text_match_sentinel_value;
} else if (sort_fields_std[i].name == sort_field_const::seq_id ||
sort_fields_std[i].name == sort_field_const::group_found) {
field_values[i] = &seq_id_sentinel_value;
} else if (sort_fields_std[i].name == sort_field_const::eval) {
field_values[i] = &eval_sentinel_value;
auto& eval_exp = sort_fields_std[i].eval;
auto count = sort_fields_std[i].eval_expressions.size();
for (uint32_t j = 0; j < count; j++) {
auto filter_result_iterator = filter_result_iterator_t("", this, eval_exp.filter_trees[j], false,
DEFAULT_FILTER_BY_CANDIDATES,
search_begin_us, search_stop_us);
auto filter_init_op = filter_result_iterator.init_status();
if (!filter_init_op.ok()) {
return filter_init_op;
}
filter_result_iterator.compute_iterators();
uint32_t* eval_ids = nullptr;
auto eval_ids_count = filter_result_iterator.to_filter_id_array(eval_ids);
eval_exp.eval_ids_vec.push_back(eval_ids);
eval_exp.eval_ids_count_vec.push_back(eval_ids_count);
}
} else if(sort_fields_std[i].name == sort_field_const::vector_distance) {
field_values[i] = &vector_distance_sentinel_value;
} else if(sort_fields_std[i].name == sort_field_const::vector_query) {
field_values[i] = &vector_query_sentinel_value;
} else if (search_schema.count(sort_fields_std[i].name) != 0 && search_schema.at(sort_fields_std[i].name).sort) {
if (search_schema.at(sort_fields_std[i].name).type == field_types::GEOPOINT_ARRAY) {
geopoint_indices.push_back(i);
field_values[i] = nullptr; // GEOPOINT_ARRAY uses a multi-valued index
} else if(search_schema.at(sort_fields_std[i].name).type == field_types::STRING) {
field_values[i] = &str_sentinel_value;
} else {
field_values[i] = sort_index.at(sort_fields_std[i].name);
if (search_schema.at(sort_fields_std[i].name).is_geopoint()) {
geopoint_indices.push_back(i);
}
}
}
}
return Option<bool>(true);
}
Option<bool> Index::populate_sort_mapping_with_lock(int* sort_order, std::vector<size_t>& geopoint_indices,
std::vector<sort_by>& sort_fields_std,
std::array<spp::sparse_hash_map<uint32_t, int64_t, Hasher32>*, 3>& field_values) const {
std::shared_lock lock(mutex);
return populate_sort_mapping(sort_order, geopoint_indices, sort_fields_std, field_values);
}
int Index::get_bounded_typo_cost(const size_t max_cost, const std::string& token, const size_t token_len,
const size_t min_len_1typo, const size_t min_len_2typo,
bool enable_typos_for_numerical_tokens,
bool enable_typos_for_alpha_numerical_tokens) {
if(!enable_typos_for_alpha_numerical_tokens) {
for(auto c : token) {
if(!isalnum(c)) { //some special char which is indexed
return 0;
}
}
}
if(!enable_typos_for_numerical_tokens && std::all_of(token.begin(), token.end(), ::isdigit)) {
return 0;
}
if (token_len < min_len_1typo) {
// typo correction is disabled for small tokens
return 0;
}
if (token_len < min_len_2typo) {
// 2-typos are enabled only at token length of 7 chars
return std::min<int>(max_cost, 1);
}
return std::min<int>(max_cost, 2);
}
void Index::log_leaves(const int cost, const std::string &token, const std::vector<art_leaf *> &leaves) const {
LOG(INFO) << "Index: " << name << ", token: " << token << ", cost: " << cost;
for(size_t i=0; i < leaves.size(); i++) {
std::string key((char*)leaves[i]->key, leaves[i]->key_len);
LOG(INFO) << key << " - " << posting_t::num_ids(leaves[i]->values);
LOG(INFO) << "frequency: " << posting_t::num_ids(leaves[i]->values) << ", max_score: " << leaves[i]->max_score;
/*for(auto j=0; j<leaves[i]->values->ids.getLength(); j++) {
LOG(INFO) << "id: " << leaves[i]->values->ids.at(j);
}*/
}
}
int64_t Index::score_results2(const std::vector<sort_by> & sort_fields, const uint16_t & query_index,
const size_t field_id,
const bool field_is_array,
const uint32_t total_cost,
int64_t& match_score,
const uint32_t seq_id, const int sort_order[3],
const bool prioritize_exact_match,
const bool single_exact_query_token,
const bool prioritize_token_position,
size_t num_query_tokens,
int syn_orig_num_tokens,
const std::vector<posting_list_t::iterator_t>& posting_lists) const {
//auto begin = std::chrono::high_resolution_clock::now();
//const std::string first_token((const char*)query_suggestion[0]->key, query_suggestion[0]->key_len-1);
if (posting_lists.size() <= 1) {
const uint8_t is_verbatim_match = uint8_t(
prioritize_exact_match && single_exact_query_token &&
posting_list_t::is_single_token_verbatim_match(posting_lists[0], field_is_array)
);
size_t words_present = (num_query_tokens == 1 && syn_orig_num_tokens != -1) ? syn_orig_num_tokens : 1;
size_t distance = (num_query_tokens == 1 && syn_orig_num_tokens != -1) ? syn_orig_num_tokens-1 : 0;
size_t max_offset = prioritize_token_position ? posting_list_t::get_last_offset(posting_lists[0],
field_is_array) : 255;
Match single_token_match = Match(words_present, distance, max_offset, is_verbatim_match);
match_score = single_token_match.get_match_score(total_cost, words_present);
/*auto this_words_present = ((match_score >> 32) & 0xFF);
auto unique_words = ((match_score >> 40) & 0xFF);
auto typo_score = ((match_score >> 24) & 0xFF);
auto proximity = ((match_score >> 16) & 0xFF);
auto verbatim = ((match_score >> 8) & 0xFF);
auto offset_score = ((match_score >> 0) & 0xFF);
LOG(INFO) << "seq_id: " << seq_id
<< ", words_present: " << this_words_present
<< ", unique_words: " << unique_words
<< ", typo_score: " << typo_score
<< ", proximity: " << proximity
<< ", verbatim: " << verbatim
<< ", offset_score: " << offset_score
<< ", match_score: " << match_score;*/
} else {
std::map<size_t, std::vector<token_positions_t>> array_token_positions;
posting_list_t::get_offsets(posting_lists, array_token_positions);
for (const auto& kv: array_token_positions) {
const std::vector<token_positions_t>& token_positions = kv.second;
if (token_positions.empty()) {
continue;
}
const Match &match = Match(seq_id, token_positions, false, prioritize_exact_match);
uint64_t this_match_score = match.get_match_score(total_cost, posting_lists.size());
// Within a field, only a subset of query tokens can match (unique_words), but even a smaller set
// might be available within the window used for proximity calculation (this_words_present)
auto this_words_present = ((this_match_score >> 32) & 0xFF);
auto unique_words = field_is_array ? this_words_present : ((this_match_score >> 40) & 0xFF);
auto typo_score = ((this_match_score >> 24) & 0xFF);
auto proximity = ((this_match_score >> 16) & 0xFF);
auto verbatim = ((this_match_score >> 8) & 0xFF);
auto offset_score = prioritize_token_position ? ((this_match_score >> 0) & 0xFF) : 0;
if(syn_orig_num_tokens != -1 && num_query_tokens == posting_lists.size()) {
unique_words = syn_orig_num_tokens;
this_words_present = syn_orig_num_tokens;
proximity = 100 - (syn_orig_num_tokens - 1);
}
uint64_t mod_match_score = (
(int64_t(this_words_present) << 40) |
(int64_t(unique_words) << 32) |
(int64_t(typo_score) << 24) |
(int64_t(proximity) << 16) |
(int64_t(verbatim) << 8) |
(int64_t(offset_score) << 0)
);
if(mod_match_score > match_score) {
match_score = mod_match_score;
}
/*std::ostringstream os;
os << "seq_id: " << seq_id << ", field_id: " << field_id
<< ", this_words_present: " << this_words_present
<< ", unique_words: " << unique_words
<< ", typo_score: " << typo_score
<< ", proximity: " << proximity
<< ", verbatim: " << verbatim
<< ", offset_score: " << offset_score
<< ", mod_match_score: " << mod_match_score
<< ", token_positions: " << token_positions.size()
<< ", num_query_tokens: " << num_query_tokens
<< ", posting_lists.size: " << posting_lists.size()
<< ", array_index: " << kv.first
<< std::endl;
LOG(INFO) << os.str();*/
}
}
//long long int timeNanos = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::high_resolution_clock::now() - begin).count();
//LOG(INFO) << "Time taken for results iteration: " << timeNanos << "ms";
return 0;
}
void Index::score_results(const std::vector<sort_by> & sort_fields, const uint16_t & query_index,
const uint8_t & field_id, const bool field_is_array, const uint32_t total_cost,
Topster*& topster,
const std::vector<art_leaf *> &query_suggestion,
spp::sparse_hash_map<uint64_t, uint32_t>& groups_processed,
const uint32_t seq_id, const int sort_order[3],
std::array<spp::sparse_hash_map<uint32_t, int64_t, Hasher32>*, 3> field_values,
const std::vector<size_t>& geopoint_indices,
const size_t group_limit, const std::vector<std::string>& group_by_fields,
const bool group_missing_values,
const uint32_t token_bits,
const bool prioritize_exact_match,
const bool single_exact_query_token,
int syn_orig_num_tokens,
const std::vector<posting_list_t::iterator_t>& posting_lists) const {
int64_t geopoint_distances[3];
for(auto& i: geopoint_indices) {
auto geopoints = field_values[i];
int64_t dist = INT32_MAX;
S2LatLng reference_lat_lng;
GeoPoint::unpack_lat_lng(sort_fields[i].geopoint, reference_lat_lng);
if(geopoints != nullptr) {
auto it = geopoints->find(seq_id);
if(it != geopoints->end()) {
int64_t packed_latlng = it->second;
S2LatLng s2_lat_lng;
GeoPoint::unpack_lat_lng(packed_latlng, s2_lat_lng);
dist = GeoPoint::distance(s2_lat_lng, reference_lat_lng);
}
} else {
// indicates geo point array
auto field_it = geo_array_index.at(sort_fields[i].name);
auto it = field_it->find(seq_id);
if(it != field_it->end()) {
int64_t* latlngs = it->second;
for(size_t li = 0; li < latlngs[0]; li++) {
S2LatLng s2_lat_lng;
int64_t packed_latlng = latlngs[li + 1];
GeoPoint::unpack_lat_lng(packed_latlng, s2_lat_lng);
int64_t this_dist = GeoPoint::distance(s2_lat_lng, reference_lat_lng);
if(this_dist < dist) {
dist = this_dist;
}
}
}
}
if(dist < sort_fields[i].exclude_radius) {
dist = 0;
}
if(sort_fields[i].geo_precision > 0) {
dist = dist + sort_fields[i].geo_precision - 1 -
(dist + sort_fields[i].geo_precision - 1) % sort_fields[i].geo_precision;
}
geopoint_distances[i] = dist;
// Swap (id -> latlong) index to (id -> distance) index
field_values[i] = &geo_sentinel_value;
}
//auto begin = std::chrono::high_resolution_clock::now();
//const std::string first_token((const char*)query_suggestion[0]->key, query_suggestion[0]->key_len-1);
uint64_t match_score = 0;
if (posting_lists.size() <= 1) {
const uint8_t is_verbatim_match = uint8_t(
prioritize_exact_match && single_exact_query_token &&
posting_list_t::is_single_token_verbatim_match(posting_lists[0], field_is_array)
);
size_t words_present = (syn_orig_num_tokens == -1) ? 1 : syn_orig_num_tokens;
size_t distance = (syn_orig_num_tokens == -1) ? 0 : syn_orig_num_tokens-1;
Match single_token_match = Match(words_present, distance, is_verbatim_match);
match_score = single_token_match.get_match_score(total_cost, words_present);
} else {
std::map<size_t, std::vector<token_positions_t>> array_token_positions;
posting_list_t::get_offsets(posting_lists, array_token_positions);
// NOTE: tokens found returned by matcher is only within the best matched window, so we have to still consider
// unique tokens found if they are spread across the text.
uint32_t unique_tokens_found = __builtin_popcount(token_bits);
if(syn_orig_num_tokens != -1) {
unique_tokens_found = syn_orig_num_tokens;
}
for (const auto& kv: array_token_positions) {
const std::vector<token_positions_t>& token_positions = kv.second;
if (token_positions.empty()) {
continue;
}
const Match &match = Match(seq_id, token_positions, false, prioritize_exact_match);
uint64_t this_match_score = match.get_match_score(total_cost, unique_tokens_found);
auto this_words_present = ((this_match_score >> 24) & 0xFF);
auto typo_score = ((this_match_score >> 16) & 0xFF);
auto proximity = ((this_match_score >> 8) & 0xFF);
auto verbatim = (this_match_score & 0xFF);
if(syn_orig_num_tokens != -1) {
this_words_present = syn_orig_num_tokens;
proximity = 100 - (syn_orig_num_tokens - 1);
}
uint64_t mod_match_score = (
(int64_t(unique_tokens_found) << 32) |
(int64_t(this_words_present) << 24) |
(int64_t(typo_score) << 16) |
(int64_t(proximity) << 8) |
(int64_t(verbatim) << 0)
);
if(mod_match_score > match_score) {
match_score = mod_match_score;
}
/*std::ostringstream os;
os << name << ", total_cost: " << (255 - total_cost)
<< ", words_present: " << match.words_present
<< ", match_score: " << match_score
<< ", match.distance: " << match.distance
<< ", seq_id: " << seq_id << std::endl;
LOG(INFO) << os.str();*/
}
}
const int64_t default_score = INT64_MIN; // to handle field that doesn't exist in document (e.g. optional)
int64_t scores[3] = {0};
size_t match_score_index = 0;
// avoiding loop
if (sort_fields.size() > 0) {
if (field_values[0] == &text_match_sentinel_value) {
scores[0] = int64_t(match_score);
match_score_index = 0;
} else if (field_values[0] == &seq_id_sentinel_value) {
scores[0] = seq_id;
} else if(field_values[0] == &geo_sentinel_value) {
scores[0] = geopoint_distances[0];
} else if(field_values[0] == &str_sentinel_value) {
scores[0] = str_sort_index.at(sort_fields[0].name)->rank(seq_id);
} else {
auto it = field_values[0]->find(seq_id);
scores[0] = (it == field_values[0]->end()) ? default_score : it->second;
}
if (sort_order[0] == -1) {
scores[0] = -scores[0];
}
}
if(sort_fields.size() > 1) {
if (field_values[1] == &text_match_sentinel_value) {
scores[1] = int64_t(match_score);
match_score_index = 1;
} else if (field_values[1] == &seq_id_sentinel_value) {
scores[1] = seq_id;
} else if(field_values[1] == &geo_sentinel_value) {
scores[1] = geopoint_distances[1];
} else if(field_values[1] == &str_sentinel_value) {
scores[1] = str_sort_index.at(sort_fields[1].name)->rank(seq_id);
} else {
auto it = field_values[1]->find(seq_id);
scores[1] = (it == field_values[1]->end()) ? default_score : it->second;
}
if (sort_order[1] == -1) {
scores[1] = -scores[1];
}
}
if(sort_fields.size() > 2) {
if (field_values[2] == &text_match_sentinel_value) {
scores[2] = int64_t(match_score);
match_score_index = 2;
} else if (field_values[2] == &seq_id_sentinel_value) {
scores[2] = seq_id;
} else if(field_values[2] == &geo_sentinel_value) {
scores[2] = geopoint_distances[2];
} else if(field_values[2] == &str_sentinel_value) {
scores[2] = str_sort_index.at(sort_fields[2].name)->rank(seq_id);
} else {
auto it = field_values[2]->find(seq_id);
scores[2] = (it == field_values[2]->end()) ? default_score : it->second;
}
if (sort_order[2] == -1) {
scores[2] = -scores[2];
}
}
uint64_t distinct_id = seq_id;
if(group_limit != 0) {
distinct_id = 1;
auto group_by_field_it_vec = get_group_by_field_iterators(group_by_fields);
for(auto& kv : group_by_field_it_vec) {
get_distinct_id(kv.it, seq_id, kv.is_array, group_missing_values, distinct_id);
}
}
//LOG(INFO) << "Seq id: " << seq_id << ", match_score: " << match_score;
KV kv(query_index, seq_id, distinct_id, match_score_index, scores);
int ret = topster->add(&kv);
if(group_limit != 0 && ret < 2) {
groups_processed[distinct_id]++;
}
//long long int timeNanos = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::high_resolution_clock::now() - begin).count();
//LOG(INFO) << "Time taken for results iteration: " << timeNanos << "ms";
}
void Index::get_distinct_id(posting_list_t::iterator_t& facet_index_it, const uint32_t seq_id, const bool is_array,
const bool group_missing_values, uint64_t& distinct_id, bool is_reverse) const {
if (!facet_index_it.valid()) {
if (!group_missing_values) {
distinct_id = seq_id;
}
return;
}
// calculate hash from group_by_fields
if(!is_reverse) {
facet_index_it.skip_to(seq_id);
} else {
facet_index_it.skip_to_rev(seq_id);
}
if (facet_index_it.valid() && facet_index_it.id() == seq_id) {
if (is_array) {
//LOG(INFO) << "combining hashes for facet array ";
std::vector<uint32_t> facet_hashes;
posting_list_t::get_offsets(facet_index_it, facet_hashes);
for (size_t i = 0; i < facet_hashes.size(); i++) {
distinct_id = StringUtils::hash_combine(distinct_id, facet_hashes[i]);
}
} else {
//LOG(INFO) << "combining hashes for facet ";
distinct_id = StringUtils::hash_combine(distinct_id, facet_index_it.offset());
}
}
//LOG(INFO) << "seq_id: " << seq_id << ", distinct_id: " << distinct_id;
if (distinct_id == 1 && !group_missing_values) {
distinct_id = seq_id;
}
return;
}
inline uint32_t Index::next_suggestion2(const std::vector<tok_candidates>& token_candidates_vec,
long long int n,
std::vector<token_t>& query_suggestion,
uint64& qhash) {
uint32_t total_cost = 0;
qhash = 1;
// generate the next combination from `token_leaves` and store it in `query_suggestion`
ldiv_t q { n, 0 };
for(size_t i = 0 ; i < token_candidates_vec.size(); i++) {
size_t token_size = token_candidates_vec[i].token.value.size();
q = ldiv(q.quot, token_candidates_vec[i].candidates.size());
const auto& candidate = token_candidates_vec[i].candidates[q.rem];
size_t typo_cost = token_candidates_vec[i].cost;
if (candidate.size() > 1 && !Tokenizer::is_ascii_char(candidate[0])) {
icu::UnicodeString ustr = icu::UnicodeString::fromUTF8(candidate);
auto code_point = ustr.char32At(0);
if(code_point >= 0x600 && code_point <= 0x6ff) {
// adjust typo cost for Arabic strings, since 1 byte difference makes no sense
if(typo_cost == 1) {
typo_cost = 2;
}
}
}
// we assume that toke was found via prefix search if candidate is longer than token's typo tolerance
bool is_prefix_searched = token_candidates_vec[i].prefix_search &&
(candidate.size() > (token_size + typo_cost));
size_t actual_cost = (2 * typo_cost) + uint32_t(is_prefix_searched);
total_cost += actual_cost;
query_suggestion[i] = token_t(i, candidate, is_prefix_searched, token_size, typo_cost);
uint64_t this_hash = StringUtils::hash_wy(query_suggestion[i].value.c_str(), query_suggestion[i].value.size());
qhash = StringUtils::hash_combine(qhash, this_hash);
/*LOG(INFO) << "suggestion key: " << actual_query_suggestion[i]->key << ", token: "
<< token_candidates_vec[i].token.value << ", actual_cost: " << actual_cost;
LOG(INFO) << ".";*/
}
return total_cost;
}
inline uint32_t Index::next_suggestion(const std::vector<token_candidates> &token_candidates_vec,
long long int n,
std::vector<art_leaf *>& actual_query_suggestion,
std::vector<art_leaf *>& query_suggestion,
const int syn_orig_num_tokens,
uint32_t& token_bits,
uint64& qhash) {
uint32_t total_cost = 0;
qhash = 1;
// generate the next combination from `token_leaves` and store it in `query_suggestion`
ldiv_t q { n, 0 };
for(long long i = 0 ; i < (long long) token_candidates_vec.size(); i++) {
size_t token_size = token_candidates_vec[i].token.value.size();
q = ldiv(q.quot, token_candidates_vec[i].candidates.size());
actual_query_suggestion[i] = token_candidates_vec[i].candidates[q.rem];
query_suggestion[i] = token_candidates_vec[i].candidates[q.rem];
bool exact_match = token_candidates_vec[i].cost == 0 && token_size == actual_query_suggestion[i]->key_len-1;
bool incr_for_prefix_search = token_candidates_vec[i].prefix_search && !exact_match;
size_t actual_cost = (2 * token_candidates_vec[i].cost) + uint32_t(incr_for_prefix_search);
total_cost += actual_cost;
token_bits |= 1UL << token_candidates_vec[i].token.position; // sets n-th bit
uintptr_t addr_val = (uintptr_t) query_suggestion[i];
qhash = StringUtils::hash_combine(qhash, addr_val);
/*LOG(INFO) << "suggestion key: " << actual_query_suggestion[i]->key << ", token: "
<< token_candidates_vec[i].token.value << ", actual_cost: " << actual_cost;
LOG(INFO) << ".";*/
}
if(syn_orig_num_tokens != -1) {
token_bits = 0;
for(size_t i = 0; i < size_t(syn_orig_num_tokens); i++) {
token_bits |= 1UL << i;
}
}
return total_cost;
}
void Index::remove_facet_token(const field& search_field, spp::sparse_hash_map<std::string, art_tree*>& search_index,
const std::string& token, uint32_t seq_id) {
const unsigned char *key = (const unsigned char *) token.c_str();
int key_len = (int) (token.length() + 1);
const std::string& field_name = search_field.faceted_name();
art_leaf* leaf = (art_leaf *) art_search(search_index.at(field_name), key, key_len);
if(leaf != nullptr) {
posting_t::erase(leaf->values, seq_id);
if (posting_t::num_ids(leaf->values) == 0) {
void* values = art_delete(search_index.at(field_name), key, key_len);
posting_t::destroy_list(values);
}
}
}
void Index::remove_field(uint32_t seq_id, nlohmann::json& document, const std::string& field_name,
const bool is_update) {
const auto& search_field_it = search_schema.find(field_name);
if(search_field_it == search_schema.end()) {
return;
}
const auto& search_field = search_field_it.value();
if(!search_field.index) {
return;
}
if(search_field.optional && document[field_name].is_null()) {
return ;
}
auto coerce_op = validator_t::coerce_element(search_field, document, document[field_name],
"", DIRTY_VALUES::COERCE_OR_REJECT);
if(!coerce_op.ok()) {
LOG(ERROR) << "Bad type for field " << field_name;
return ;
}
// Go through all the field names and find the keys+values so that they can be removed from in-memory index
if(search_field.type == field_types::STRING_ARRAY || search_field.type == field_types::STRING) {
std::vector<std::string> tokens;
tokenize_string_field(document, search_field, tokens, search_field.locale, symbols_to_index, token_separators);
for(size_t i = 0; i < tokens.size(); i++) {
const auto& token = tokens[i];
const unsigned char *key = (const unsigned char *) token.c_str();
int key_len = (int) (token.length() + 1);
art_leaf* leaf = (art_leaf *) art_search(search_index.at(field_name), key, key_len);
if(leaf != nullptr) {
posting_t::erase(leaf->values, seq_id);
if (posting_t::num_ids(leaf->values) == 0) {
void* values = art_delete(search_index.at(field_name), key, key_len);
posting_t::destroy_list(values);
if(search_field.infix) {
auto strhash = StringUtils::hash_wy(key, token.size());
const auto& infix_sets = infix_index.at(search_field.name);
infix_sets[strhash % 4]->erase(token);
}
}
}
}
} else if(search_field.is_int32()) {
const std::vector<int32_t>& values = search_field.is_single_integer() ?
std::vector<int32_t>{document[field_name].get<int32_t>()} :
document[field_name].get<std::vector<int32_t>>();
for(int32_t value: values) {
if (search_field.range_index) {
auto trie = range_index.at(field_name);
trie->remove(value, seq_id);
} else {
num_tree_t* num_tree = numerical_index.at(field_name);
num_tree->remove(value, seq_id);
}
if(search_field.facet) {
remove_facet_token(search_field, search_index, std::to_string(value), seq_id);
}
}
} else if(search_field.is_int64()) {
std::vector<int64_t> values;
std::vector<std::pair<uint32_t, uint32_t>> object_array_reference_values;
if (search_field.is_array() && search_field.nested && search_field.is_reference_helper) {
for (const auto &pair: document[field_name]) {
if (!pair.is_array() || pair.size() != 2 || !pair[0].is_number_unsigned() ||
!pair[1].is_number_unsigned()) {
LOG(ERROR) << "`" + field_name + "` object array reference helper field has wrong value `"
+ pair.dump() + "`.";
continue;
}
object_array_reference_values.emplace_back(seq_id, pair[0]);
values.emplace_back(pair[1]);
}
} else {
values = search_field.is_single_integer() ?
std::vector<int64_t>{document[field_name].get<int64_t>()} :
document[field_name].get<std::vector<int64_t>>();
}
for(int64_t value: values) {
if (search_field.range_index) {
auto trie = range_index.at(field_name);
trie->remove(value, seq_id);
} else {
num_tree_t* num_tree = numerical_index.at(field_name);
num_tree->remove(value, seq_id);
}
if(search_field.facet) {
remove_facet_token(search_field, search_index, std::to_string(value), seq_id);
}
if (reference_index.count(field_name) != 0) {
reference_index[field_name]->remove(seq_id, value);
}
}
for (auto const& pair: object_array_reference_values) {
object_array_reference_index[field_name]->erase(pair);
}
} else if(search_field.num_dim) {
if(!is_update) {
// since vector index supports upsert natively, we should not attempt to delete for update
vector_index[search_field.name]->vecdex->markDelete(seq_id);
}
} else if(search_field.is_float()) {
const std::vector<float>& values = search_field.is_single_float() ?
std::vector<float>{document[field_name].get<float>()} :
document[field_name].get<std::vector<float>>();
for(float value: values) {
int64_t fintval = float_to_int64_t(value);
if (search_field.range_index) {
auto trie = range_index.at(field_name);
trie->remove(fintval, seq_id);
} else {
num_tree_t* num_tree = numerical_index.at(field_name);
num_tree->remove(fintval, seq_id);
}
if(search_field.facet) {
remove_facet_token(search_field, search_index, StringUtils::float_to_str(value), seq_id);
}
}
} else if(search_field.is_bool()) {
const std::vector<bool>& values = search_field.is_single_bool() ?
std::vector<bool>{document[field_name].get<bool>()} :
document[field_name].get<std::vector<bool>>();
for(bool value: values) {
int64_t bool_int64 = value ? 1 : 0;
if (search_field.range_index) {
auto trie = range_index.at(field_name);
trie->remove(bool_int64, seq_id);
} else {
num_tree_t* num_tree = numerical_index.at(field_name);
num_tree->remove(bool_int64, seq_id);
}
if(search_field.facet) {
remove_facet_token(search_field, search_index, std::to_string(value), seq_id);
}
}
} else if(search_field.is_geopoint()) {
auto geopoint_range_index = geo_range_index[field_name];
S2RegionTermIndexer::Options options;
options.set_index_contains_points_only(true);
S2RegionTermIndexer indexer(options);
const std::vector<std::vector<double>>& latlongs = search_field.is_single_geopoint() ?
std::vector<std::vector<double>>{document[field_name].get<std::vector<double>>()} :
document[field_name].get<std::vector<std::vector<double>>>();
for(const std::vector<double>& latlong: latlongs) {
S2Point point = S2LatLng::FromDegrees(latlong[0], latlong[1]).ToPoint();
auto cell = S2CellId(point);
geopoint_range_index->delete_geopoint(cell.id(), seq_id);
}
if(!search_field.is_single_geopoint()) {
spp::sparse_hash_map<uint32_t, int64_t*>*& field_geo_array_map = geo_array_index.at(field_name);
auto geo_array_it = field_geo_array_map->find(seq_id);
if(geo_array_it != field_geo_array_map->end()) {
delete [] geo_array_it->second;
field_geo_array_map->erase(seq_id);
}
}
}
// remove facets
facet_index_v4->remove(document, search_field, seq_id);
// remove sort field
if(sort_index.count(field_name) != 0) {
sort_index[field_name]->erase(seq_id);
}
if(str_sort_index.count(field_name) != 0) {
str_sort_index[field_name]->remove(seq_id);
}
}
Option<uint32_t> Index::remove(const uint32_t seq_id, nlohmann::json & document,
const std::vector<field>& del_fields, const bool is_update) {
std::unique_lock lock(mutex);
// The exception during removal is mostly because of an edge case with auto schema detection:
// Value indexed as Type T but later if field is dropped and reindexed in another type X,
// the on-disk data will differ from the newly detected type on schema. We've to log the error,
// but have to ignore the field and proceed because there's no leak caused here.
if(!del_fields.empty()) {
for(auto& the_field: del_fields) {
if(!document.contains(the_field.name)) {
// could be an optional field
continue;
}
try {
remove_field(seq_id, document, the_field.name, is_update);
} catch(const std::exception& e) {
LOG(WARNING) << "Error while removing field `" << the_field.name << "` from document, message: "
<< e.what();
}
}
} else {
for(auto it = document.begin(); it != document.end(); ++it) {
const std::string& field_name = it.key();
try {
remove_field(seq_id, document, field_name, is_update);
} catch(const std::exception& e) {
LOG(WARNING) << "Error while removing field `" << field_name << "` from document, message: "
<< e.what();
}
}
}
if(!is_update) {
seq_ids->erase(seq_id);
}
return Option<uint32_t>(seq_id);
}
void Index::tokenize_string_field(const nlohmann::json& document, const field& search_field,
std::vector<std::string>& tokens, const std::string& locale,
const std::vector<char>& symbols_to_index,
const std::vector<char>& token_separators) {
const std::string& field_name = search_field.name;
if(search_field.type == field_types::STRING) {
Tokenizer(document[field_name], true, false, locale, symbols_to_index, token_separators).tokenize(tokens);
} else if(search_field.type == field_types::STRING_ARRAY) {
const std::vector<std::string>& values = document[field_name].get<std::vector<std::string>>();
for(const std::string & value: values) {
Tokenizer(value, true, false, locale, symbols_to_index, token_separators).tokenize(tokens);
}
}
}
art_leaf* Index::get_token_leaf(const std::string & field_name, const unsigned char* token, uint32_t token_len) {
std::shared_lock lock(mutex);
const art_tree *t = search_index.at(field_name);
return (art_leaf*) art_search(t, token, (int) token_len);
}
const spp::sparse_hash_map<std::string, art_tree *> &Index::_get_search_index() const {
return search_index;
}
const spp::sparse_hash_map<std::string, num_tree_t*>& Index::_get_numerical_index() const {
return numerical_index;
}
const spp::sparse_hash_map<std::string, NumericTrie*>& Index::_get_range_index() const {
return range_index;
}
const spp::sparse_hash_map<std::string, array_mapped_infix_t>& Index::_get_infix_index() const {
return infix_index;
};
const spp::sparse_hash_map<std::string, hnsw_index_t*>& Index::_get_vector_index() const {
return vector_index;
}
facet_index_t* Index::_get_facet_index() const {
return facet_index_v4;
}
void Index::refresh_schemas(const std::vector<field>& new_fields, const std::vector<field>& del_fields) {
std::unique_lock lock(mutex);
for(const auto & new_field: new_fields) {
if(!new_field.index || new_field.is_dynamic()) {
continue;
}
search_schema.emplace(new_field.name, new_field);
if(new_field.type == field_types::FLOAT_ARRAY && new_field.num_dim > 0) {
auto hnsw_index = new hnsw_index_t(new_field.num_dim, 16, new_field.vec_dist, new_field.hnsw_params["M"].get<uint32_t>(), new_field.hnsw_params["ef_construction"].get<uint32_t>());
vector_index.emplace(new_field.name, hnsw_index);
continue;
}
if(new_field.is_sortable()) {
if(new_field.is_num_sortable()) {
auto doc_to_score = new spp::sparse_hash_map<uint32_t, int64_t, Hasher32>();
sort_index.emplace(new_field.name, doc_to_score);
} else if(new_field.is_str_sortable()) {
str_sort_index.emplace(new_field.name, new adi_tree_t);
}
}
if(search_index.count(new_field.name) == 0) {
if(new_field.is_string() || field_types::is_string_or_array(new_field.type)) {
art_tree *t = new art_tree;
art_tree_init(t);
search_index.emplace(new_field.name, t);
} else if(new_field.is_geopoint()) {
geo_range_index.emplace(new_field.name, new NumericTrie(32));
if(!new_field.is_single_geopoint()) {
auto geo_array_map = new spp::sparse_hash_map<uint32_t, int64_t*>();
geo_array_index.emplace(new_field.name, geo_array_map);
}
} else {
if (new_field.range_index) {
auto trie = new_field.is_bool() ? new NumericTrie(8) :
new_field.is_int32() ? new NumericTrie(32) : new NumericTrie(64);
range_index.emplace(new_field.name, trie);
} else {
num_tree_t* num_tree = new num_tree_t;
numerical_index.emplace(new_field.name, num_tree);
}
}
}
if(new_field.is_facet()) {
initialize_facet_indexes(new_field);
// initialize for non-string facet fields
if(!new_field.is_string()) {
art_tree *ft = new art_tree;
art_tree_init(ft);
search_index.emplace(new_field.faceted_name(), ft);
}
}
if(new_field.infix) {
array_mapped_infix_t infix_sets(ARRAY_INFIX_DIM);
for(auto& infix_set: infix_sets) {
infix_set = new tsl::htrie_set<char>();
}
infix_index.emplace(new_field.name, infix_sets);
}
}
for(const auto & del_field: del_fields) {
if(search_schema.count(del_field.name) == 0) {
// could be a dynamic field
continue;
}
search_schema.erase(del_field.name);
if(!del_field.index) {
continue;
}
if(del_field.is_string() || field_types::is_string_or_array(del_field.type)) {
art_tree_destroy(search_index[del_field.name]);
delete search_index[del_field.name];
search_index.erase(del_field.name);
} else if(del_field.is_geopoint()) {
delete geo_range_index[del_field.name];
geo_range_index.erase(del_field.name);
if(!del_field.is_single_geopoint()) {
spp::sparse_hash_map<uint32_t, int64_t*>* geo_array_map = geo_array_index[del_field.name];
for(auto& kv: *geo_array_map) {
delete [] kv.second;
}
delete geo_array_map;
geo_array_index.erase(del_field.name);
}
} else {
if (del_field.range_index) {
delete range_index[del_field.name];
range_index.erase(del_field.name);
} else {
delete numerical_index[del_field.name];
numerical_index.erase(del_field.name);
}
}
if(del_field.is_sortable()) {
if(del_field.is_num_sortable()) {
delete sort_index[del_field.name];
sort_index.erase(del_field.name);
} else if(del_field.is_str_sortable()) {
delete str_sort_index[del_field.name];
str_sort_index.erase(del_field.name);
}
}
if(del_field.is_facet()) {
facet_index_v4->erase(del_field.name);
if(!del_field.is_string()) {
art_tree_destroy(search_index[del_field.faceted_name()]);
delete search_index[del_field.faceted_name()];
search_index.erase(del_field.faceted_name());
}
}
if(del_field.infix) {
auto& infix_set = infix_index[del_field.name];
for(size_t i = 0; i < infix_set.size(); i++) {
delete infix_set[i];
}
infix_index.erase(del_field.name);
}
if(del_field.num_dim) {
auto hnsw_index = vector_index[del_field.name];
delete hnsw_index;
vector_index.erase(del_field.name);
}
}
}
void Index::handle_doc_ops(const tsl::htrie_map<char, field>& search_schema,
nlohmann::json& update_doc, const nlohmann::json& old_doc) {
/*
{
"$operations": {
"increment": {"likes": 1, "views": 20}
}
}
*/
auto ops_it = update_doc.find("$operations");
if(ops_it != update_doc.end()) {
const auto& operations = ops_it.value();
if(operations.contains("increment") && operations["increment"].is_object()) {
for(const auto& item: operations["increment"].items()) {
auto field_it = search_schema.find(item.key());
if(field_it != search_schema.end()) {
if(field_it->type == field_types::INT32 && item.value().is_number_integer()) {
int32_t existing_value = 0;
if(old_doc.contains(item.key())) {
existing_value = old_doc[item.key()].get<int32_t>();
}
auto updated_value = existing_value + item.value().get<int32>();
update_doc[item.key()] = updated_value;
}
}
}
}
update_doc.erase("$operations");
}
}
void Index::get_doc_changes(const index_operation_t op, const tsl::htrie_map<char, field>& embedding_fields,
nlohmann::json& update_doc, const nlohmann::json& old_doc, nlohmann::json& new_doc,
nlohmann::json& del_doc) {
if(op == UPSERT) {
new_doc = update_doc;
new_doc.merge_patch(update_doc); // ensures that null valued keys are deleted
// since UPSERT could replace a doc with lesser fields, we have to add those missing fields to del_doc
for(auto it = old_doc.begin(); it != old_doc.end(); ++it) {
if(it.value().is_object() || (it.value().is_array() && (it.value().empty() || it.value()[0].is_object()))) {
continue;
}
if(!update_doc.contains(it.key())) {
// embedding field won't be part of upsert doc so populate new doc with the value from old doc
if(embedding_fields.count(it.key()) != 0) {
new_doc[it.key()] = it.value();
} else {
del_doc[it.key()] = it.value();
}
}
}
} else {
new_doc = old_doc;
new_doc.merge_patch(update_doc);
if(old_doc.contains(".flat")) {
new_doc[".flat"] = old_doc[".flat"];
for(auto& fl: update_doc[".flat"]) {
new_doc[".flat"].push_back(fl);
}
}
}
auto it = update_doc.begin();
while(it != update_doc.end()) {
if(it.value().is_object() || (it.value().is_array() && !it.value().empty() && it.value()[0].is_object())) {
++it;
continue;
}
if(it.value().is_null()) {
// null values should not be indexed
new_doc.erase(it.key());
if(old_doc.contains(it.key()) && !old_doc[it.key()].is_null()) {
del_doc[it.key()] = old_doc[it.key()];
}
it = update_doc.erase(it);
continue;
}
if(old_doc.contains(it.key())) {
if(old_doc[it.key()] == it.value()) {
// unchanged so should not be part of update doc
it = update_doc.erase(it);
continue;
} else {
// delete this old value from index
del_doc[it.key()] = old_doc[it.key()];
}
}
it++;
}
}
size_t Index::num_seq_ids() const {
std::shared_lock lock(mutex);
return seq_ids->num_ids();
}
Option<bool> Index::seq_ids_outside_top_k(const std::string& field_name, size_t k,
std::vector<uint32_t>& outside_seq_ids) {
std::shared_lock lock(mutex);
auto field_it = numerical_index.find(field_name);
if(field_it != numerical_index.end()) {
field_it->second->seq_ids_outside_top_k(k, outside_seq_ids);
return Option<bool>(true);
}
auto range_trie_it = range_index.find(field_name);
if (range_trie_it != range_index.end()) {
range_trie_it->second->seq_ids_outside_top_k(k, outside_seq_ids);
return Option<bool>(true);
}
return Option<bool>(400, "Field `" + field_name + "` not found in numerical index.");
}
void Index::resolve_space_as_typos(std::vector<std::string>& qtokens, const string& field_name,
std::vector<std::vector<std::string>>& resolved_queries) const {
auto tree_it = search_index.find(field_name);
if(tree_it == search_index.end()) {
return ;
}
// we will try to find a verbatim match first
art_tree* t = tree_it->second;
std::vector<art_leaf*> leaves;
for(const std::string& token: qtokens) {
art_leaf* leaf = (art_leaf *) art_search(t, (const unsigned char*) token.c_str(),
token.length()+1);
if(leaf == nullptr) {
break;
}
leaves.push_back(leaf);
}
// When we cannot find verbatim match, we can try concatting and splitting query tokens for alternatives.
// Concatenation:
size_t qtokens_size = std::min<size_t>(5, qtokens.size()); // only first 5 tokens will be considered
if(qtokens.size() > 1) {
// a) join all tokens to form a single string
const string& all_tokens_query = StringUtils::join(qtokens, "");
if(art_search(t, (const unsigned char*) all_tokens_query.c_str(), all_tokens_query.length()+1) != nullptr) {
resolved_queries.push_back({all_tokens_query});
return;
}
// b) join 2 adjacent tokens in a sliding window (provided they are atleast 2 tokens in size)
for(size_t i = 0; i < qtokens_size-1 && qtokens_size > 2; i++) {
std::vector<std::string> candidate_tokens;
for(size_t j = 0; j < i; j++) {
candidate_tokens.push_back(qtokens[j]);
}
std::string joined_tokens = qtokens[i] + qtokens[i+1];
candidate_tokens.push_back(joined_tokens);
for(size_t j = i+2; j < qtokens.size(); j++) {
candidate_tokens.push_back(qtokens[j]);
}
leaves.clear();
for(auto& token: candidate_tokens) {
art_leaf* leaf = static_cast<art_leaf*>(art_search(t, (const unsigned char*) token.c_str(),
token.length() + 1));
if(leaf == nullptr) {
break;
}
leaves.push_back(leaf);
}
if(candidate_tokens.size() == leaves.size() && common_results_exist(leaves, false)) {
resolved_queries.push_back(candidate_tokens);
return;
}
}
}
// concats did not work, we will try splitting individual tokens
for(size_t i = 0; i < qtokens_size; i++) {
std::vector<std::string> candidate_tokens;
for(size_t j = 0; j < i; j++) {
candidate_tokens.push_back(qtokens[j]);
}
const std::string& token = qtokens[i];
bool found_split = false;
for(size_t ci = 1; ci < token.size(); ci++) {
std::string first_part = token.substr(0, token.size()-ci);
art_leaf* first_leaf = static_cast<art_leaf*>(art_search(t, (const unsigned char*) first_part.c_str(),
first_part.length() + 1));
if(first_leaf != nullptr) {
// check if rest of the string is also a valid token
std::string second_part = token.substr(token.size()-ci, ci);
art_leaf* second_leaf = static_cast<art_leaf*>(art_search(t, (const unsigned char*) second_part.c_str(),
second_part.length() + 1));
std::vector<art_leaf*> part_leaves = {first_leaf, second_leaf};
if(second_leaf != nullptr && common_results_exist(part_leaves, true)) {
candidate_tokens.push_back(first_part);
candidate_tokens.push_back(second_part);
found_split = true;
break;
}
}
}
if(!found_split) {
continue;
}
for(size_t j = i+1; j < qtokens.size(); j++) {
candidate_tokens.push_back(qtokens[j]);
}
leaves.clear();
for(auto& candidate_token: candidate_tokens) {
art_leaf* leaf = static_cast<art_leaf*>(art_search(t, (const unsigned char*) candidate_token.c_str(),
candidate_token.length() + 1));
if(leaf == nullptr) {
break;
}
leaves.push_back(leaf);
}
if(common_results_exist(leaves, false)) {
resolved_queries.push_back(candidate_tokens);
return;
}
}
}
bool Index::common_results_exist(std::vector<art_leaf*>& leaves, bool must_match_phrase) const {
std::vector<uint32_t> result_ids;
std::vector<void*> leaf_vals;
for(auto leaf: leaves) {
leaf_vals.push_back(leaf->values);
}
posting_t::intersect(leaf_vals, result_ids);
if(result_ids.empty()) {
return false;
}
if(!must_match_phrase) {
return !result_ids.empty();
}
uint32_t* phrase_ids = new uint32_t[result_ids.size()];
size_t num_phrase_ids;
posting_t::get_phrase_matches(leaf_vals, false, &result_ids[0], result_ids.size(),
phrase_ids, num_phrase_ids);
bool phrase_exists = (num_phrase_ids != 0);
delete [] phrase_ids;
return phrase_exists;
}
void Index::batch_embed_fields(std::vector<index_record*>& records,
const tsl::htrie_map<char, field>& embedding_fields,
const tsl::htrie_map<char, field> & search_schema, const size_t remote_embedding_batch_size,
const size_t remote_embedding_timeout_ms, const size_t remote_embedding_num_tries) {
for(const auto& field : embedding_fields) {
std::vector<std::pair<index_record*, std::string>> values_to_embed_text, values_to_embed_image;
auto indexing_prefix = EmbedderManager::get_instance().get_indexing_prefix(field.embed[fields::model_config]);
for(auto& record : records) {
if(!record->indexed.ok()) {
continue;
}
nlohmann::json* document;
if(record->is_update) {
document = &record->new_doc;
} else {
document = &record->doc;
}
if(document == nullptr) {
continue;
}
if(document->contains(field.name) && !record->is_update) {
// embedding already exists (could be a restore from export)
continue;
}
std::string value = indexing_prefix;
const auto& embed_from = field.embed[fields::from].get<std::vector<std::string>>();
for(const auto& field_name : embed_from) {
auto field_it = search_schema.find(field_name);
auto doc_field_it = document->find(field_name);
if(doc_field_it == document->end()) {
continue;
}
if(field_it.value().type == field_types::IMAGE) {
values_to_embed_image.push_back(std::make_pair(record, doc_field_it->get<std::string>()));
continue;
}
if(field_it.value().type == field_types::STRING) {
value += doc_field_it->get<std::string>() + " ";
} else if(field_it.value().type == field_types::STRING_ARRAY) {
for(const auto& val : *(doc_field_it)) {
value += val.get<std::string>() + " ";
}
}
}
if(value != indexing_prefix) {
values_to_embed_text.push_back(std::make_pair(record, value));
}
}
if(values_to_embed_text.empty() && values_to_embed_image.empty()) {
continue;
}
std::vector<embedding_res_t> embeddings_text, embeddings_image;
// sort texts by length
if(!values_to_embed_text.empty()) {
std::sort(values_to_embed_text.begin(), values_to_embed_text.end(),
[](const std::pair<index_record*, std::string>& a,
const std::pair<index_record*, std::string>& b) {
return a.second.size() < b.second.size();
});
}
// get vector of values
std::vector<std::string> values_text, values_image;
std::unordered_set<index_record*> records_to_index;
for(const auto& value_to_embed : values_to_embed_text) {
values_text.push_back(value_to_embed.second);
records_to_index.insert(value_to_embed.first);
}
for(const auto& value_to_embed : values_to_embed_image) {
values_image.push_back(value_to_embed.second);
records_to_index.insert(value_to_embed.first);
}
EmbedderManager& embedder_manager = EmbedderManager::get_instance();
if(!values_image.empty()) {
auto embedder_op = embedder_manager.get_image_embedder(field.embed[fields::model_config]);
if(!embedder_op.ok()) {
const std::string& error_msg = "Could not find image embedder for model: " + field.embed[fields::model_config][fields::model_name].get<std::string>();
for(auto& record : records) {
record->index_failure(400, error_msg);
}
LOG(ERROR) << "Error: " << error_msg;
return;
}
embeddings_image = embedder_op.get()->batch_embed(values_image);
}
if(!values_text.empty()) {
auto embedder_op = embedder_manager.get_text_embedder(field.embed[fields::model_config]);
if(!embedder_op.ok()) {
LOG(ERROR) << "Error while getting embedder for model: " << field.embed[fields::model_config];
LOG(ERROR) << "Error: " << embedder_op.error();
return;
}
embeddings_text = embedder_op.get()->batch_embed(values_text, remote_embedding_batch_size, remote_embedding_timeout_ms,
remote_embedding_num_tries);
}
for(auto& record: records_to_index) {
size_t count = 0;
if(!values_to_embed_text.empty()) {
process_embed_results(values_to_embed_text, record, embeddings_text, count, field);
}
if(!values_to_embed_image.empty()) {
process_embed_results(values_to_embed_image, record, embeddings_image, count, field);
}
if(count > 1) {
auto& doc = record->is_update ? record->new_doc : record->doc;
std::vector<float> existing_embedding = doc[field.name].get<std::vector<float>>();
// average embeddings
for(size_t i = 0; i < existing_embedding.size(); i++) {
existing_embedding[i] /= count;
}
doc[field.name] = existing_embedding;
}
}
}
}
void Index::process_embed_results(std::vector<std::pair<index_record*, std::string>>& values_to_embed,
const index_record* record,
const std::vector<embedding_res_t>& embedding_results,
size_t& count, const field& the_field) {
for(size_t i = 0; i < values_to_embed.size(); i++) {
auto& value_to_embed = values_to_embed[i];
if(record == value_to_embed.first) {
if(!value_to_embed.first->embedding_res.empty()) {
continue;
}
if(!embedding_results[i].success) {
value_to_embed.first->embedding_res = embedding_results[i].error;
value_to_embed.first->index_failure(embedding_results[i].status_code, "");
continue;
}
std::vector<float> embedding_vals;
auto& doc = value_to_embed.first->is_update ? value_to_embed.first->new_doc : value_to_embed.first->doc;
if(doc.count(the_field.name) == 0) {
embedding_vals = embedding_results[i].embedding;
} else {
std::vector<float> existing_embedding = doc[the_field.name].get<std::vector<float>>();
// accumulate embeddings
for(size_t j = 0; j < existing_embedding.size(); j++) {
existing_embedding[j] += embedding_results[i].embedding[j];
}
embedding_vals = existing_embedding;
}
doc[the_field.name] = embedding_vals;
count++;
}
}
}
void Index::repair_hnsw_index() {
std::vector<std::string> vector_fields;
// this lock ensures that the `vector_index` map is not mutated during read
std::shared_lock read_lock(mutex);
for(auto& vec_kv: vector_index) {
vector_fields.push_back(vec_kv.first);
}
read_lock.unlock();
for(const auto& vector_field: vector_fields) {
read_lock.lock();
if(vector_index.count(vector_field) != 0) {
// this lock ensures that the vector index is not dropped during repair
std::unique_lock lock(vector_index[vector_field]->repair_m);
read_lock.unlock(); // release this lock since repair is a long running operation
vector_index[vector_field]->vecdex->repair_zero_indegree();
} else {
read_lock.unlock();
}
}
}
int64_t Index::reference_string_sort_score(const string &field_name, const uint32_t &seq_id) const {
std::shared_lock lock(mutex);
return str_sort_index.at(field_name)->rank(seq_id);
}
Option<bool> Index::get_related_ids(const string& field_name, const uint32_t& seq_id,
std::vector<uint32_t>& result) const {
std::shared_lock lock(mutex);
auto const reference_helper_field_name = field_name + fields::REFERENCE_HELPER_FIELD_SUFFIX;
if (search_schema.count(reference_helper_field_name) == 0) {
return Option<bool>(400, "Could not find `" + reference_helper_field_name + "` in the collection `" +
get_collection_name() + "`.");
}
auto const field_not_found_op = Option<bool>(400, "Could not find `" + reference_helper_field_name +
"` in the collection `" + get_collection_name() + "`.");
auto const no_match_op = Option<bool>(404, "Could not find `" + reference_helper_field_name + "` value for doc `" +
std::to_string(seq_id) + "`.");
if (search_schema.at(reference_helper_field_name).is_singular()) {
if (sort_index.count(reference_helper_field_name) == 0) {
return field_not_found_op;
}
auto const& ref_index = sort_index.at(reference_helper_field_name);
auto const it = ref_index->find(seq_id);
if (it == ref_index->end()) {
return no_match_op;
}
const uint32_t id = it->second;
if (id != Index::reference_helper_sentinel_value) {
result.emplace_back(id);
}
return Option<bool>(true);
}
if (reference_index.count(reference_helper_field_name) == 0) {
return field_not_found_op;
}
size_t ids_len = 0;
uint32_t* ids = nullptr;
reference_index.at(reference_helper_field_name)->search(EQUALS, seq_id, &ids, ids_len);
if (ids_len == 0) {
return no_match_op;
}
for (uint32_t i = 0; i < ids_len; i++) {
result.emplace_back(ids[i]);
}
delete [] ids;
return Option<bool>(true);
}
Option<bool> Index::get_object_array_related_id(const std::string& collection_name,
const std::string& field_name,
const uint32_t& seq_id, const uint32_t& object_index,
uint32_t& result) const {
std::shared_lock lock(mutex);
if (object_array_reference_index.count(field_name) == 0 || object_array_reference_index.at(field_name) == nullptr) {
return Option<bool>(404, "`" + field_name + "` not found in `" + collection_name +
".object_array_reference_index`");
} else if (object_array_reference_index.at(field_name)->count({seq_id, object_index}) == 0) {
return Option<bool>(400, "Key `{" + std::to_string(seq_id) + ", " + std::to_string(object_index) + "}`"
" not found in `" + collection_name + ".object_array_reference_index`");
}
result = object_array_reference_index.at(field_name)->at({seq_id, object_index});
return Option<bool>(true);
}
Option<uint32_t> Index::get_sort_index_value_with_lock(const std::string& field_name,
const uint32_t& seq_id) const {
std::shared_lock lock(mutex);
return get_sort_index_value(field_name, seq_id);
}
Option<uint32_t> Index::get_sort_index_value(const std::string& field_name,
const uint32_t& seq_id) const {
auto const reference_helper_field_name = field_name + fields::REFERENCE_HELPER_FIELD_SUFFIX;
if (search_schema.count(reference_helper_field_name) == 0) {
return Option<uint32_t>(400, "Could not find `" + reference_helper_field_name + "` in the collection `" +
get_collection_name() + "`.");
} else if (search_schema.at(reference_helper_field_name).is_array()) {
return Option<uint32_t>(400, "Cannot sort on `" + reference_helper_field_name + "` in the collection, `" +
get_collection_name() + "` is `" + search_schema.at(reference_helper_field_name).type + "`.");
} else if (sort_index.count(reference_helper_field_name) == 0 ||
sort_index.at(reference_helper_field_name)->count(seq_id) == 0) {
return Option<uint32_t>(404, "Could not find `" + reference_helper_field_name + "` value for doc `" +
std::to_string(seq_id) + "`.");;
}
return Option<uint32_t>(sort_index.at(reference_helper_field_name)->at(seq_id));
}
Option<int64_t> Index::get_geo_distance_with_lock(const std::string& geo_field_name, const uint32_t& seq_id,
const S2LatLng& reference_lat_lng, const bool& round_distance) const {
std::unique_lock lock(mutex);
return get_geo_distance(geo_field_name, seq_id, reference_lat_lng, round_distance);
}
Option<int64_t> Index::get_geo_distance(const std::string& geo_field_name, const uint32_t& seq_id,
const S2LatLng& reference_lat_lng, const bool& round_distance) const {
int64_t distance = INT32_MAX;
if (sort_index.count(geo_field_name) != 0) {
auto& geo_index = sort_index.at(geo_field_name);
auto it = geo_index->find(seq_id);
if (it != geo_index->end()) {
int64_t packed_latlng = it->second;
S2LatLng s2_lat_lng;
GeoPoint::unpack_lat_lng(packed_latlng, s2_lat_lng);
distance = GeoPoint::distance(s2_lat_lng, reference_lat_lng);
}
} else if (geo_array_index.count(geo_field_name) != 0) {
// indicates geo point array
auto field_it = geo_array_index.at(geo_field_name);
auto it = field_it->find(seq_id);
if (it != field_it->end()) {
int64_t* latlngs = it->second;
for (size_t li = 0; li < latlngs[0]; li++) {
S2LatLng s2_lat_lng;
int64_t packed_latlng = latlngs[li + 1];
GeoPoint::unpack_lat_lng(packed_latlng, s2_lat_lng);
int64_t this_dist = GeoPoint::distance(s2_lat_lng, reference_lat_lng);
if (this_dist < distance) {
distance = this_dist;
}
}
}
} else {
return Option<int64_t>(400, "Could not find `" + geo_field_name + "` field in the index of `" += get_collection_name() +
"` collection.");
}
if (round_distance) {
distance = std::round((double)distance * 1000.0) / 1000.0;
}
return Option<int64_t>(distance);
}
std::string multiple_references_message(const std::string& coll_name, const uint32_t& seq_id,
const std::string& ref_coll_name) {
auto const& multiple_references_error_message = "`" + coll_name + "` collection's `Sequence ID: " +
std::to_string(seq_id) + "` document references multiple documents of `" +
ref_coll_name + "` collection.";
auto& cm = CollectionManager::get_instance();
auto coll = cm.get_collection(coll_name);
if (coll == nullptr) {
return multiple_references_error_message;
}
nlohmann::json doc;
auto get_op = coll->get_document_from_store(seq_id, doc);
if (!get_op.ok() || !doc.contains("id") || !doc.at("id").is_string()) {
return multiple_references_error_message;
}
return "`" + coll_name + "` collection's `id: " + doc.at("id").get<std::string>() +
"` document references multiple documents of `" + ref_coll_name + "` collection.";
}
Option<uint32_t> Index::get_ref_seq_id(const sort_by& sort_field, const uint32_t& seq_id,
const std::map<std::string, reference_filter_result_t>& references,
std::string& ref_collection_name) const {
auto collection_name = get_collection_name_with_lock();
ref_collection_name = sort_field.reference_collection_name;
auto const* references_ptr = &(references);
auto ref_seq_id = seq_id;
if (sort_field.is_nested_join_sort_by()) {
// Get the reference doc_id by following through all the nested join collections.
for (size_t i = 0; i < sort_field.nested_join_collection_names.size() - 1; i++) {
ref_collection_name = sort_field.nested_join_collection_names[i];
auto get_ref_seq_id_op = get_ref_seq_id_helper(sort_field, ref_seq_id, collection_name, references_ptr,
ref_collection_name);
if (!get_ref_seq_id_op.ok() || get_ref_seq_id_op.get() == reference_helper_sentinel_value) { // No references found.
return get_ref_seq_id_op;
} else {
ref_seq_id = get_ref_seq_id_op.get();
}
}
ref_collection_name = sort_field.nested_join_collection_names.back();
}
return get_ref_seq_id_helper(sort_field, ref_seq_id, collection_name, references_ptr, ref_collection_name);
}
Option<uint32_t> Index::get_ref_seq_id_helper(const sort_by& sort_field, const uint32_t& seq_id, std::string& coll_name,
std::map<std::string, reference_filter_result_t> const*& references,
std::string& ref_coll_name) const {
uint32_t ref_seq_id = reference_helper_sentinel_value;
if (references != nullptr && references->count(ref_coll_name) > 0) { // Joined on ref collection
auto& ref_result = references->at(ref_coll_name);
auto const& count = ref_result.count;
if (count == 1) {
ref_seq_id = ref_result.docs[0];
references = ref_result.coll_to_references;
} else if (count > 1) {
return Option<uint32_t>(400, multiple_references_message(coll_name, seq_id, ref_coll_name));
}
} else {
auto& cm = CollectionManager::get_instance();
auto ref_collection = cm.get_collection(ref_coll_name);
if (ref_collection == nullptr) {
return Option<uint32_t>(400, "Referenced collection `" + ref_coll_name +
"` in `sort_by` not found.");
}
// Current collection has a reference.
if (ref_collection->is_referenced_in(coll_name)) {
auto get_reference_field_op = ref_collection->get_referenced_in_field_with_lock(coll_name);
if (!get_reference_field_op.ok()) {
return Option<uint32_t>(get_reference_field_op.code(), get_reference_field_op.error());
}
auto const& field_name = get_reference_field_op.get();
std::vector<uint32_t> ref_ids;
auto related_ids_op = Option<bool>(true);
if (coll_name == get_collection_name_with_lock()) {
related_ids_op = get_related_ids(field_name, seq_id, ref_ids);
} else {
auto prev_coll = cm.get_collection(coll_name);
if (prev_coll == nullptr) {
return Option<uint32_t>(400, "Referenced collection `" + coll_name +
"` in `sort_by` not found.");
}
related_ids_op = prev_coll->get_related_ids(field_name, seq_id, ref_ids);
}
if (!related_ids_op.ok()) {
if (related_ids_op.code() == 400) {
return Option<uint32_t>(400, related_ids_op.error());
}
} else if (ref_ids.size() > 1) {
return Option<uint32_t>(400, multiple_references_message(coll_name, seq_id, ref_coll_name));
} else if (ref_ids.size() == 1) {
ref_seq_id = ref_ids.front();
}
}
// Joined collection has a reference
else if (references != nullptr) {
std::string joined_coll_having_reference;
for (const auto &reference: *references) {
if (ref_collection->is_referenced_in(reference.first)) {
joined_coll_having_reference = reference.first;
break;
}
}
if (!joined_coll_having_reference.empty()) {
auto joined_collection = cm.get_collection(joined_coll_having_reference);
if (joined_collection == nullptr) {
return Option<uint32_t>(400, "Referenced collection `" + joined_coll_having_reference +
"` in `sort_by` not found.");
}
auto reference_field_name_op = ref_collection->get_referenced_in_field_with_lock(joined_coll_having_reference);
if (!reference_field_name_op.ok()) {
return Option<uint32_t>(reference_field_name_op.code(), reference_field_name_op.error());
}
auto const& reference_field_name = reference_field_name_op.get();
auto& ref_result = references->at(joined_coll_having_reference);
auto const& count = ref_result.count;
if (count == 1) {
std::vector<uint32_t> ref_ids;
auto related_ids_op = joined_collection->get_related_ids(reference_field_name, ref_result.docs[0],
ref_ids);
if (!related_ids_op.ok()) {
if (related_ids_op.code() == 400) {
return Option<uint32_t>(400, related_ids_op.error());
}
} else if (ref_ids.size() > 1) {
return Option<uint32_t>(400, multiple_references_message(joined_coll_having_reference,
ref_result.docs[0], ref_coll_name));
} else if (ref_ids.size() == 1) {
ref_seq_id = ref_ids.front();
references = ref_result.coll_to_references;
}
} else if (count > 1) {
return Option<uint32_t>(400, multiple_references_message(coll_name, seq_id,
joined_coll_having_reference));
}
}
}
}
coll_name = ref_coll_name;
return Option<uint32_t>(ref_seq_id);
}
Option<int64_t> Index::get_referenced_geo_distance(const sort_by& sort_field, uint32_t seq_id,
const std::map<basic_string<char>, reference_filter_result_t>& references,
const S2LatLng& reference_lat_lng, const bool& round_distance) const {
std::string ref_collection_name;
auto get_ref_seq_id_op = get_ref_seq_id(sort_field, seq_id, references, ref_collection_name);
if (!get_ref_seq_id_op.ok()) {
return Option<int64_t>(400, get_ref_seq_id_op.error());
} else if (get_ref_seq_id_op.get() == reference_helper_sentinel_value) { // No references found.
return Option<int64_t>(0);
} else {
seq_id = get_ref_seq_id_op.get();
}
auto& cm = CollectionManager::get_instance();
auto ref_collection = cm.get_collection(ref_collection_name);
if (ref_collection == nullptr) {
return Option<int64_t>(400, "Referenced collection `" + ref_collection_name + "` in `sort_by` not found.");
}
return ref_collection->get_geo_distance_with_lock(sort_field.name, seq_id, reference_lat_lng, round_distance);
}
void Index::get_top_k_result_ids(const std::vector<std::vector<KV*>>& raw_result_kvs,
std::vector<uint32_t>& result_ids) const{
for(const auto& group_kv : raw_result_kvs) {
for(const auto& kv : group_kv) {
result_ids.push_back(kv->key);
}
}
std::sort(result_ids.begin(), result_ids.end());
}
void Index::compute_aux_scores(Topster *topster, const std::vector<search_field_t>& the_fields,
const std::vector<token_t>& query_tokens, uint16_t search_query_size,
const std::vector<sort_by>& sort_fields_std, const int* sort_order,
const vector_query_t& vector_query) const {
auto compute_text_match_aux_score = [&] (std::vector<KV*>& result_ids, size_t found_ids_offset) {
std::vector<posting_list_t::iterator_t> its;
std::vector<posting_list_t*> expanded_plists;
for (auto j = 0; j < query_tokens.size(); ++j) {
auto token_str = (const unsigned char *) query_tokens[j].value.c_str();
auto token_len = query_tokens[j].value.size();
for (auto i = 0; i < the_fields.size(); ++i) {
art_tree *tree = search_index.at(the_fields[i].str_name);
art_leaf *leaf = static_cast<art_leaf *>(art_search(tree, token_str,
token_len + 1));
if (!leaf) {
continue;
}
if (IS_COMPACT_POSTING(leaf->values)) {
auto compact_posting_list = COMPACT_POSTING_PTR(leaf->values);
posting_list_t *full_posting_list = compact_posting_list->to_full_posting_list();
expanded_plists.push_back(full_posting_list);
its.push_back(full_posting_list->new_iterator(nullptr, nullptr,
i)); // moved, not copied
} else {
posting_list_t *full_posting_list = (posting_list_t *) (leaf->values);
its.push_back(full_posting_list->new_iterator(nullptr, nullptr,
i)); // moved, not copied
}
}
}
if (!its.empty()) {
std::vector<posting_list_t::iterator_t> matching_its;
//sort the result ids
std::sort(result_ids.begin(), result_ids.end(), [&](const auto& kv1, const auto& kv2) {
return kv1->key < kv2->key;
});
for(auto& kv : result_ids) {
auto seq_id = kv->key;
matching_its.clear();
for(auto& it : its) {
if (it.valid()) {
it.skip_to(seq_id);
if(it.valid() && it.id() == seq_id) {
matching_its.push_back((it.clone()));
}
}
}
if(!matching_its.empty()) {
int64_t match_score = 0;
score_results2(sort_fields_std, search_query_size, 0, false, 0,
match_score, kv->key, sort_order, false, false, false, 1,
-1, matching_its);
kv->text_match_score = match_score;
}
}
}
for(posting_list_t* plist: expanded_plists) {
delete plist;
}
};
std::vector<KV *> text_match_ids;
for (auto &kv: topster->kv_map) {
if (kv.second->text_match_score == 0) {
//only found via vector distance, should compute text_match_score later
text_match_ids.push_back(kv.second);
} else if (kv.second->vector_distance == -1.0f) {
//only found via text_match, should compute vector distance
std::vector<float> values;
auto &field_vector_index = vector_index.at(vector_query.field_name);
try {
values = field_vector_index->vecdex->getDataByLabel<float>(kv.second->key);
} catch (...) {
// likely not found
continue;
}
float dist;
if (field_vector_index->distance_type == cosine) {
std::vector<float> normalized_q(vector_query.values.size());
hnsw_index_t::normalize_vector(vector_query.values, normalized_q);
dist = field_vector_index->space->get_dist_func()(normalized_q.data(),
values.data(),
&field_vector_index->num_dim);
} else {
dist = field_vector_index->space->get_dist_func()(vector_query.values.data(),
values.data(),
&field_vector_index->num_dim);
}
kv.second->vector_distance = dist;
}
}
if (!text_match_ids.empty()) {
compute_text_match_aux_score(text_match_ids, topster->kv_map.size() - text_match_ids.size() + 1);
}
//rerank results
std::unordered_map<int32_t, int32_t> semantic_seq_id_ranks;
std::unordered_map<int32_t, int32_t> keyword_seq_id_ranks;
std::vector<KV*> kvs;
for(const auto& kv : topster->kv_map) {
kvs.push_back(kv.second);
}
// compute ranks as per keyword search first
std::stable_sort(kvs.begin(), kvs.end(), [&](const auto& kv1, const auto& kv2) {
return std::tie(kv1->text_match_score, kv1->key) > std::tie(kv2->text_match_score, kv2->key);
});
for(auto i = 0; i < kvs.size(); ++i) {
keyword_seq_id_ranks.emplace(kvs[i]->key, i+1);
}
// compute ranks as per semantic search
std::stable_sort(kvs.begin(), kvs.end(), [&](const auto& kv1, const auto& kv2) {
return kv1->vector_distance < kv2->vector_distance;
});
for(auto i = 0; i < kvs.size(); ++i) {
semantic_seq_id_ranks.emplace(kvs[i]->key, i+1);
}
//compute fusion_score
for(auto& kv : topster->kv_map) {
auto seq_id = kv.second->key;
kv.second->scores[kv.second->match_score_index] = float_to_int64_t((1.0/keyword_seq_id_ranks[seq_id]) * (1.0 - vector_query.alpha) +
(1.0/semantic_seq_id_ranks[seq_id]) * vector_query.alpha);
}
}
float Index::compute_decay_function_score(const sort_by& sort_field, uint32_t seq_id) const {
float res;
int64_t origin_distance_with_offset;
double variance;
auto sort_index_it = sort_index.find(sort_field.name);
auto val = get_doc_val_from_sort_index(sort_index_it, seq_id);
if(val == INT64_MAX) {
return INT64_MAX;
}
origin_distance_with_offset = std::abs(sort_field.origin_val - val) - sort_field.offset;
switch(sort_field.sort_by_param) {
case sort_by::gauss:
variance = std::pow(sort_field.scale,2)/(2 * std::log(sort_field.decay_val));
res = std::exp(std::pow(std::max((int64_t)0, origin_distance_with_offset), 2)/(2 * variance));
break;
case sort_by::exp:
variance = std::log(sort_field.decay_val)/sort_field.scale;
res = std::exp(variance * std::max((int64_t)0, origin_distance_with_offset));
break;
case sort_by::linear:
variance = sort_field.scale/(1.0 - sort_field.decay_val);
res = std::max((double)0.f, (variance - std::max((int64_t)0, origin_distance_with_offset))/variance);
break;
case sort_by::diff:
res = origin_distance_with_offset;
break;
default:
break;
}
return res;
}
/*
// https://stackoverflow.com/questions/924171/geo-fencing-point-inside-outside-polygon
// NOTE: polygon and point should have been transformed with `transform_for_180th_meridian`
bool Index::is_point_in_polygon(const Geofence& poly, const GeoCoord &point) {
int i, j;
bool c = false;
for (i = 0, j = poly.numVerts - 1; i < poly.numVerts; j = i++) {
if ((((poly.verts[i].lat <= point.lat) && (point.lat < poly.verts[j].lat))
|| ((poly.verts[j].lat <= point.lat) && (point.lat < poly.verts[i].lat)))
&& (point.lon < (poly.verts[j].lon - poly.verts[i].lon) * (point.lat - poly.verts[i].lat)
/ (poly.verts[j].lat - poly.verts[i].lat) + poly.verts[i].lon)) {
c = !c;
}
}
return c;
}
double Index::transform_for_180th_meridian(Geofence &poly) {
double offset = 0.0;
double maxLon = -1000, minLon = 1000;
for(int v=0; v < poly.numVerts; v++) {
if(poly.verts[v].lon < minLon) {
minLon = poly.verts[v].lon;
}
if(poly.verts[v].lon > maxLon) {
maxLon = poly.verts[v].lon;
}
if(std::abs(minLon - maxLon) > 180) {
offset = 360.0;
}
}
int i, j;
for (i = 0, j = poly.numVerts - 1; i < poly.numVerts; j = i++) {
if (poly.verts[i].lon < 0.0) {
poly.verts[i].lon += offset;
}
if (poly.verts[j].lon < 0.0) {
poly.verts[j].lon += offset;
}
}
return offset;
}
void Index::transform_for_180th_meridian(GeoCoord &point, double offset) {
point.lon = point.lon < 0.0 ? point.lon + offset : point.lon;
}
*/
| 375,475
|
C++
|
.cpp
| 6,725
| 38.917621
| 233
| 0.520383
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,765
|
array_utils.cpp
|
typesense_typesense/src/array_utils.cpp
|
#include "array_utils.h"
#include <memory.h>
size_t ArrayUtils::and_scalar(const uint32_t *A, const size_t lenA,
const uint32_t *B, const size_t lenB, uint32_t **results) {
if (lenA == 0 || lenB == 0) {
return 0;
}
*results = new uint32_t[std::min(lenA, lenB)];
uint32_t *out = *results;
const uint32_t *const initout(out);
const uint32_t *endA = A + lenA;
const uint32_t *endB = B + lenB;
while (1) {
while (*A < *B) {
SKIP_FIRST_COMPARE:
if (++A == endA)
return (out - initout);
}
while (*A > *B) {
if (++B == endB)
return (out - initout);
}
if (*A == *B) {
*out++ = *A;
if (++A == endA || ++B == endB)
return (out - initout);
} else {
goto SKIP_FIRST_COMPARE;
}
}
return (out - initout); // NOTREACHED
}
// merges two sorted arrays and also removes duplicates
size_t ArrayUtils::or_scalar(const uint32_t *A, const size_t lenA,
const uint32_t *B, const size_t lenB, uint32_t **out) {
size_t indexA = 0, indexB = 0, res_index = 0;
if(A == nullptr && B == nullptr) {
return 0;
}
if(A == nullptr) {
*out = new uint32_t[lenB];
memcpy(*out, B, lenB * sizeof(uint32_t));
return lenB;
}
if(B == nullptr) {
*out = new uint32_t[lenA];
memcpy(*out, A, lenA * sizeof(uint32_t));
return lenA;
}
uint32_t* results = new uint32_t[lenA+lenB];
while (indexA < lenA && indexB < lenB) {
if (A[indexA] < B[indexB]) {
// check for duplicate
if(res_index == 0 || results[res_index-1] != A[indexA]) {
results[res_index] = A[indexA];
res_index++;
}
indexA++;
} else {
if(res_index == 0 || results[res_index-1] != B[indexB]) {
results[res_index] = B[indexB];
res_index++;
}
indexB++;
}
}
while (indexA < lenA) {
if(res_index == 0 || results[res_index-1] != A[indexA]) {
results[res_index] = A[indexA];
res_index++;
}
indexA++;
}
while (indexB < lenB) {
if(res_index == 0 || results[res_index-1] != B[indexB]) {
results[res_index] = B[indexB];
res_index++;
}
indexB++;
}
// shrink fit
*out = new uint32_t[res_index];
memcpy(*out, results, res_index * sizeof(uint32_t));
delete[] results;
return res_index;
}
size_t ArrayUtils::exclude_scalar(const uint32_t *A, const size_t lenA,
const uint32_t *B, const size_t lenB, uint32_t **out) {
size_t indexA = 0, indexB = 0, res_index = 0;
if(A == nullptr && B == nullptr) {
*out = nullptr;
return 0;
}
if(A == nullptr) {
*out = nullptr;
return 0;
}
if(lenB == 0 || B == nullptr) {
*out = new uint32_t[lenA];
memcpy(*out, A, lenA * sizeof(uint32_t));
return lenA;
}
uint32_t* results = new uint32_t[lenA];
while (indexA < lenA && indexB < lenB) {
if (A[indexA] < B[indexB]) {
results[res_index] = A[indexA];
res_index++;
indexA++;
} else if (A[indexA] == B[indexB]) {
indexA++;
indexB++;
} else {
indexB++;
}
}
while (indexA < lenA) {
results[res_index] = A[indexA];
res_index++;
indexA++;
}
// shrink fit
*out = new uint32_t[res_index];
memcpy(*out, results, res_index * sizeof(uint32_t));
delete[] results;
return res_index;
}
bool ArrayUtils::skip_index_to_id(uint32_t& curr_index, uint32_t const* const array, const uint32_t& array_len,
const uint32_t& id) {
if (curr_index >= array_len) {
return false;
}
if (id <= array[curr_index]) {
return id == array[curr_index];
}
long start = curr_index, mid, end = array_len - 1;
while (start <= end) {
mid = start + (end - start) / 2;
if (array[mid] == id) {
curr_index = mid;
return true;
} else if (array[mid] < id) {
start = mid + 1;
} else {
end = mid - 1;
}
}
curr_index = start;
return false;
}
| 4,179
|
C++
|
.cpp
| 149
| 21.604027
| 111
| 0.527382
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,766
|
http_data.cpp
|
typesense_typesense/src/http_data.cpp
|
#include "http_data.h"
std::string route_path::_get_action() {
// `resource:operation` forms an action
// operations: create, get, list, delete, search, import, export
std::string resource_path;
std::string operation;
size_t identifier_index = 0;
for(size_t i = 0; i < path_parts.size(); i++) {
if(i == 0 && path_parts.size() > 2 && path_parts[i] == "collections") {
// sub-resource of a collection, e.g. /collections/:name/overrides should be treated as
// top-level resource to maintain backward compatibility
continue;
}
if(path_parts[i][0] == ':') {
identifier_index = i;
} else if(resource_path.empty()){
resource_path = path_parts[i];
} else {
resource_path = resource_path + "/" + path_parts[i];
}
}
// special cases to maintain semantics and backward compatibility
if(resource_path == "multi_search" || resource_path == "documents/search") {
return "documents:search";
}
if(resource_path == "documents/import" || resource_path == "documents/export") {
StringUtils::replace_all(resource_path, "documents/", "");
return "documents:" + resource_path;
}
// e.g /collections or /collections/:collection/foo or /collections/:collection
if(http_method == "GET") {
// GET can be a `get` or `list`
operation = (identifier_index != 0) ? "get" : "list";
} else if(http_method == "POST") {
operation = "create";
} else if(http_method == "PUT") {
operation = "upsert";
} else if(http_method == "DELETE") {
operation = "delete";
} else if(http_method == "PATCH") {
operation = "update";
} else {
operation = "unknown";
}
return resource_path + ":" + operation;
}
bool http_req::do_resource_check() {
return http_method != "DELETE" && path_without_query != "/health" && path_without_query != "/config";
}
| 1,996
|
C++
|
.cpp
| 49
| 33.653061
| 105
| 0.588023
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,767
|
cvt.cpp
|
typesense_typesense/src/cvt.cpp
|
#include <cvt.h>
#include <cstring>
#include "logger.h"
bool CVTrie::add(const char *key, const uint8_t length, void *value) {
// If the key exists, augment the node, otherwise insert a new node
if(root == nullptr) {
// Trie is empty, so add a single leaf node:
// [0|PTRLEAF][3][f][o][o]
uint8_t* block = new uint8_t[8 + 1 + length];
void* node = tag_ptr(value, 0, LEAF);
std::memcpy(block, &node, sizeof(void*));
std::memcpy(block+sizeof(void*), &length, 1);
std::memcpy(block+sizeof(void*)+1, key, length);
root = block;
return true;
}
uint8_t node_type = get_node_type(root);
if(node_type == LEAF) {
// Compare new key with child key to identify common prefix
// e.g. welcome vs welding (or) we vs welcome (or) welcome vs foobar
}
/*size_t num_siblings = 1;
size_t key_index = 0;
while(true) {
// for each sibling
for(auto sindex = 0; sindex < num_siblings; sindex++) {
unsigned char c = key[key_index];
}
}*/
return true;
}
void *CVTrie::find(const char *key, const uint8_t length) {
size_t key_index = 0;
void* curr = nullptr;
std::memcpy(&curr, root, sizeof curr);
while(true) {
uint8_t node_type = get_node_type(curr);
if(node_type == LEAF) {
size_t key_len = *(uint8_t*)(root+sizeof(void*));
if(key_index+key_len != length) {
return nullptr;
}
for(size_t i=0; i<key_len; i++) {
uint8_t this_char = *(uint8_t *) (root + sizeof(void *) + 1 + i);
if(key[key_index++] != this_char) {
return nullptr;
}
}
void* leaf = get_ptr(curr);
return leaf;
}
break;
}
return nullptr;
}
| 1,890
|
C++
|
.cpp
| 55
| 25.890909
| 81
| 0.536894
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,768
|
synonym_index.cpp
|
typesense_typesense/src/synonym_index.cpp
|
#include "synonym_index.h"
#include "posting.h"
void SynonymIndex::synonym_reduction_internal(const std::vector<std::string>& tokens,
const std::string& locale,
size_t start_window_size, size_t start_index_pos,
std::set<std::string>& processed_tokens,
std::vector<std::vector<std::string>>& results,
const std::vector<std::string>& orig_tokens,
bool synonym_prefix, uint32_t synonym_num_typos) const {
bool recursed = false;
for(size_t window_len = start_window_size; window_len > 0; window_len--) {
for(size_t start_index = start_index_pos; start_index+window_len-1 < tokens.size(); start_index++) {
std::string merged_tokens_str="";
for(size_t i = start_index; i < start_index+window_len; i++) {
merged_tokens_str += tokens[i];
merged_tokens_str += " ";
}
StringUtils::trim(merged_tokens_str);
std::vector<art_leaf*> leaves;
std::set<std::string> exclude_leaves;
auto merged_tokens_len = strlen(merged_tokens_str.c_str());
merged_tokens_len = synonym_prefix ? merged_tokens_len : merged_tokens_len + 1;
art_fuzzy_search(synonym_index_tree, (unsigned char*)merged_tokens_str.c_str(), merged_tokens_len, 0, synonym_num_typos,
10, FREQUENCY, synonym_prefix, false, "", nullptr, 0, leaves, exclude_leaves);
if(processed_tokens.count(merged_tokens_str) == 0) {
// tokens in this window match a synonym: reconstruct tokens and rerun synonym mapping against matches
for (const auto &leaf: leaves) {
std::vector<posting_list_t*> expanded_plists;
posting_list_t::iterator_t it(nullptr, nullptr, nullptr);
if(IS_COMPACT_POSTING(leaf->values)) {
auto compact_posting_list = COMPACT_POSTING_PTR(leaf->values);
posting_list_t* full_posting_list = compact_posting_list->to_full_posting_list();
expanded_plists.push_back(full_posting_list);
it = full_posting_list->new_iterator(nullptr, nullptr, 0);
} else {
posting_list_t* full_posting_list = (posting_list_t*)(leaf->values);
it = full_posting_list->new_iterator(nullptr, nullptr, 0);
}
while(it.valid()) {
auto syn_index = it.id();
const auto &syn_def = synonym_definitions.at(syn_index);
if(syn_def.locale != locale) {
break;
}
for (const auto &syn_def_tokens: syn_def.synonyms) {
std::vector<std::string> new_tokens;
for (size_t i = 0; i < start_index; i++) {
new_tokens.push_back(tokens[i]);
}
for (size_t i = 0; i < syn_def_tokens.size(); i++) {
const auto &syn_def_token = syn_def_tokens[i];
new_tokens.push_back(syn_def_token);
processed_tokens.emplace(syn_def_token);
}
for (size_t i = start_index + window_len; i < tokens.size(); i++) {
new_tokens.push_back(tokens[i]);
}
processed_tokens.emplace(merged_tokens_str);
auto syn_def_tokens_str = StringUtils::join(syn_def_tokens, " ");
processed_tokens.emplace(syn_def_tokens_str);
recursed = true;
synonym_reduction_internal(new_tokens, locale, window_len,
start_index, processed_tokens, results, orig_tokens,
synonym_prefix, synonym_num_typos);
}
it.next();
}
for(posting_list_t* plist: expanded_plists) {
delete plist;
}
}
}
}
// reset it because for the next window we have to start from scratch
start_index_pos = 0;
}
if(!recursed && !processed_tokens.empty() && tokens != orig_tokens) {
results.emplace_back(tokens);
}
}
void SynonymIndex::synonym_reduction(const std::vector<std::string>& tokens,
const std::string& locale,
std::vector<std::vector<std::string>>& results,
bool synonym_prefix, uint32_t synonym_num_typos) const {
std::shared_lock lock(mutex);
if(synonym_definitions.empty()) {
return;
}
std::set<std::string> processed_tokens;
synonym_reduction_internal(tokens, locale, tokens.size(), 0, processed_tokens, results, tokens,
synonym_prefix, synonym_num_typos);
}
Option<bool> SynonymIndex::add_synonym(const std::string & collection_name, const synonym_t& synonym,
bool write_to_store) {
std::unique_lock write_lock(mutex);
if(synonym_ids_index_map.count(synonym.id) != 0) {
write_lock.unlock();
// first we have to delete existing entries so we can upsert
Option<bool> rem_op = remove_synonym(collection_name, synonym.id);
if (!rem_op.ok()) {
return rem_op;
}
write_lock.lock();
}
synonym_definitions[synonym_index] = synonym;
synonym_ids_index_map[synonym.id] = synonym_index;
std::vector<std::string> keys;
if(!synonym.root.empty()) {
auto root_tokens_str = StringUtils::join(synonym.root, " ");
keys.push_back(root_tokens_str);
} else {
for(const auto & syn_tokens : synonym.synonyms) {
auto synonyms_str = StringUtils::join(syn_tokens, " ");
keys.push_back(synonyms_str);
}
}
for(const auto& key : keys) {
art_leaf* exact_leaf = (art_leaf *) art_search(synonym_index_tree, (unsigned char *) key.c_str(), key.size() + 1);
if(exact_leaf) {
auto offset = posting_t::num_ids(exact_leaf->values);
posting_t::upsert(exact_leaf->values, synonym_index, {offset});
} else {
art_document document(synonym_index, synonym_index, {0});
art_insert(synonym_index_tree, (unsigned char *) key.c_str(), key.size() + 1, &document);
}
}
++synonym_index;
write_lock.unlock();
if(write_to_store) {
bool inserted = store->insert(get_synonym_key(collection_name, synonym.id), synonym.to_view_json().dump());
if(!inserted) {
return Option<bool>(500, "Error while storing the synonym on disk.");
}
}
return Option<bool>(true);
}
bool SynonymIndex::get_synonym(const std::string& id, synonym_t& synonym) {
std::shared_lock lock(mutex);
if(synonym_ids_index_map.count(id) != 0) {
auto index = synonym_ids_index_map.at(id);
synonym = synonym_definitions.at(index);
return true;
}
return false;
}
Option<bool> SynonymIndex::remove_synonym(const std::string & collection_name, const std::string &id) {
std::unique_lock lock(mutex);
const auto& syn_iter = synonym_ids_index_map.find(id);
if(syn_iter != synonym_ids_index_map.end()) {
bool removed = store->remove(get_synonym_key(collection_name, id));
if(!removed) {
return Option<bool>(500, "Error while deleting the synonym from disk.");
}
const auto& synonym = synonym_definitions.at(syn_iter->second);
std::vector<std::string> keys;
keys.insert(keys.end(), synonym.root.begin(), synonym.root.end());
for(const auto & syn_tokens : synonym.synonyms) {
auto synonyms_str = StringUtils::join(syn_tokens, " ");
keys.push_back(synonyms_str);
}
for(const auto& key : keys) {
art_leaf* found_leaf = (art_leaf *) art_search(synonym_index_tree, (unsigned char *) key.c_str(), key.size() + 1);
if(found_leaf) {
auto index = syn_iter->second;
posting_t::erase(found_leaf->values, index);
if(posting_t::num_ids(found_leaf->values) == 0) {
void* values = art_delete(synonym_index_tree, (unsigned char*)key.c_str(), key.size() + 1);
posting_t::destroy_list(values);
}
}
}
auto index = synonym_ids_index_map.at(id);
synonym_ids_index_map.erase(id);
synonym_definitions.erase(index);
return Option<bool>(true);
}
return Option<bool>(404, "Could not find that `id`.");
}
Option<std::map<uint32_t, synonym_t*>> SynonymIndex::get_synonyms(uint32_t limit, uint32_t offset) {
std::shared_lock lock(mutex);
std::map<uint32_t, synonym_t*> synonyms_map;
auto synonym_it = synonym_definitions.begin();
if(offset > 0) {
if(offset >= synonym_definitions.size()) {
return Option<std::map<uint32_t, synonym_t*>>(400, "Invalid offset param.");
}
std::advance(synonym_it, offset);
}
auto synonym_end = synonym_definitions.end();
if(limit > 0 && (offset + limit < synonym_definitions.size())) {
synonym_end = synonym_it;
std::advance(synonym_end, limit);
}
while (synonym_it != synonym_end) {
synonyms_map[synonym_it->first] = &synonym_it->second;
synonym_it++;
}
return Option<std::map<uint32_t, synonym_t*>>(synonyms_map);
}
std::string SynonymIndex::get_synonym_key(const std::string & collection_name, const std::string & synonym_id) {
return std::string(COLLECTION_SYNONYM_PREFIX) + "_" + collection_name + "_" + synonym_id;
}
Option<bool> synonym_t::parse(const nlohmann::json& synonym_json, synonym_t& syn) {
if(synonym_json.count("id") == 0) {
return Option<bool>(400, "Missing `id` field.");
}
if(synonym_json.count("synonyms") == 0) {
return Option<bool>(400, "Could not find an array of `synonyms`");
}
if (!synonym_json["synonyms"].is_array() || synonym_json["synonyms"].empty()) {
return Option<bool>(400, "Could not find an array of `synonyms`");
}
if(synonym_json.count("locale") != 0) {
if(!synonym_json["locale"].is_string()) {
return Option<bool>(400, "Synonym `locale` should be a string.`");
}
syn.locale = synonym_json["locale"].get<std::string>();
}
if(synonym_json.count("symbols_to_index") != 0) {
if(!synonym_json["symbols_to_index"].is_array() || synonym_json["symbols_to_index"].empty() ||
!synonym_json["symbols_to_index"][0].is_string()) {
return Option<bool>(400, "Synonym `symbols_to_index` should be an array of strings.");
}
auto symbols = synonym_json["symbols_to_index"].get<std::vector<std::string>>();
for(auto symbol: symbols) {
if(symbol.size() != 1) {
return Option<bool>(400, "Synonym `symbols_to_index` should be an array of single character symbols.");
}
syn.symbols.push_back(symbol[0]);
}
}
if(synonym_json.count("root") != 0) {
std::vector<std::string> tokens;
if(synonym_json["root"].is_string()) {
Tokenizer(synonym_json["root"].get<std::string>(), true, false, syn.locale, syn.symbols).tokenize(tokens);
syn.raw_root = synonym_json["root"].get<std::string>();
} else if(synonym_json["root"].is_array()) {
// Typesense 0.23.1 and below incorrectly stored root as array
for(const auto& root_ele: synonym_json["root"]) {
if(!root_ele.is_string()) {
return Option<bool>(400, "Synonym root is not valid.");
}
tokens.push_back(root_ele.get<std::string>());
}
syn.raw_root = StringUtils::join(tokens, " ");
} else {
return Option<bool>(400, "Key `root` should be a string.");
}
syn.root = tokens;
}
for(const auto& synonym: synonym_json["synonyms"]) {
std::vector<std::string> tokens;
if(synonym.is_string()) {
Tokenizer(synonym.get<std::string>(), true, false, syn.locale, syn.symbols).tokenize(tokens);
syn.raw_synonyms.push_back(synonym.get<std::string>());
} else if(synonym.is_array()) {
// Typesense 0.23.1 and below incorrectly stored synonym as array
if(synonym.empty()) {
return Option<bool>(400, "Could not find a valid string array of `synonyms`");
}
for(const auto& ele: synonym) {
if(!ele.is_string() || ele.get<std::string>().empty()) {
return Option<bool>(400, "Could not find a valid string array of `synonyms`");
}
tokens.push_back(ele.get<std::string>());
}
syn.raw_synonyms.push_back(StringUtils::join(tokens, " "));
} else {
return Option<bool>(400, "Could not find a valid string array of `synonyms`");
}
syn.synonyms.push_back(tokens);
}
syn.id = synonym_json["id"];
return Option<bool>(true);
}
nlohmann::json synonym_t::to_view_json() const {
nlohmann::json obj;
obj["id"] = id;
obj["root"] = raw_root;
obj["synonyms"] = nlohmann::json::array();
for(const auto& synonym: raw_synonyms) {
obj["synonyms"].push_back(synonym);
}
if(!locale.empty()) {
obj["locale"] = locale;
}
if(!symbols.empty()) {
obj["symbols_to_index"] = nlohmann::json::array();
for(char c: symbols) {
obj["symbols_to_index"].push_back(std::string(1, c));
}
}
return obj;
}
| 14,451
|
C++
|
.cpp
| 294
| 36.136054
| 132
| 0.545939
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,769
|
auth_manager.cpp
|
typesense_typesense/src/auth_manager.cpp
|
#include "auth_manager.h"
#include <openssl/evp.h>
#include <regex>
#include <join.h>
constexpr const char* AuthManager::DOCUMENTS_SEARCH_ACTION;
constexpr const uint64_t api_key_t::FAR_FUTURE_TIMESTAMP;
Option<bool> AuthManager::init(Store* store, const std::string& bootstrap_auth_key) {
// This function must be idempotent, i.e. when called multiple times, must produce the same state without leaks
//LOG(INFO) << "AuthManager::init()";
std::unique_lock lock(mutex);
this->store = store;
this->bootstrap_auth_key = bootstrap_auth_key;
std::string next_api_key_id_str;
StoreStatus next_api_key_id_status = store->get(API_KEY_NEXT_ID_KEY, next_api_key_id_str);
if(next_api_key_id_status == StoreStatus::ERROR) {
return Option<bool>(500, "Error while fetching the next API key id from the store.");
}
if(next_api_key_id_status == StoreStatus::FOUND) {
next_api_key_id = (uint32_t) StringUtils::deserialize_uint32_t(next_api_key_id_str);
} else {
next_api_key_id = 0;
}
std::vector<std::string> api_key_json_strs;
store->scan_fill(std::string(API_KEYS_PREFIX) + "_",
std::string(API_KEYS_PREFIX) + "`",
api_key_json_strs);
LOG(INFO) << "Indexing " << api_key_json_strs.size() << " API key(s) found on disk.";
for(auto & api_key_json_str: api_key_json_strs) {
api_key_t api_key;
Option<bool> load_op = api_key.load(api_key_json_str);
if(!load_op.ok()) {
return Option<bool>(load_op.code(), load_op.error());
}
api_keys.emplace(api_key.value, api_key);
}
return Option<bool>(true);
}
Option<std::vector<api_key_t>> AuthManager::list_keys() const {
std::shared_lock lock(mutex);
std::vector<std::string> api_key_json_strs;
store->scan_fill(std::string(API_KEYS_PREFIX) + "_",
std::string(API_KEYS_PREFIX) + "`", api_key_json_strs);
std::vector<api_key_t> stored_api_keys;
for(const auto& api_key_json_str: api_key_json_strs) {
api_key_t api_key;
Option<bool> load_op = api_key.load(api_key_json_str);
if(!load_op.ok()) {
return Option<std::vector<api_key_t>>(load_op.code(), load_op.error());
}
stored_api_keys.push_back(api_key.truncate_value());
}
return Option<std::vector<api_key_t>>(stored_api_keys);
}
Option<api_key_t> AuthManager::get_key(uint32_t id, bool truncate_value) const {
std::shared_lock lock(mutex);
std::string api_key_store_key = std::string(API_KEYS_PREFIX) + "_" + std::to_string(id);
std::string api_key_json_str;
StoreStatus status = store->get(api_key_store_key, api_key_json_str);
if(status == StoreStatus::FOUND) {
api_key_t api_key;
const Option<bool> & load_op = api_key.load(api_key_json_str);
if(!load_op.ok()) {
return Option<api_key_t>(load_op.code(), load_op.error());
}
if(truncate_value) {
api_key.truncate_value();
}
return Option<api_key_t>(api_key);
}
if(status == StoreStatus::NOT_FOUND) {
return Option<api_key_t>(404, "Not found.");
}
return Option<api_key_t>(500, "Error while fetching key from store.");
}
Option<api_key_t> AuthManager::create_key(api_key_t& api_key) {
//LOG(INFO) << "AuthManager::create_key()";
std::unique_lock lock(mutex);
if(api_keys.count(api_key.value) != 0 || api_key.value == bootstrap_auth_key) {
return Option<api_key_t>(409, "API key generation conflict.");
}
api_key.id = get_next_api_key_id();
std::string api_key_store_key = std::string(API_KEYS_PREFIX) + "_" + std::to_string(api_key.id);
const nlohmann::json & api_key_obj = api_key.to_json();
bool inserted = store->insert(api_key_store_key, api_key_obj.dump());
if(!inserted) {
return Option<api_key_t>(500, "Could not store generated API key.");
}
api_keys.emplace(api_key.value, api_key);
return Option<api_key_t>(api_key);
}
Option<api_key_t> AuthManager::remove_key(uint32_t id) {
Option<api_key_t> key_op = get_key(id, false);
if(!key_op.ok()) {
return Option<api_key_t>(key_op.code(), key_op.error());
}
std::string api_key_store_key = std::string(API_KEYS_PREFIX) + "_" + std::to_string(id);
if(!store->remove(api_key_store_key)) {
return Option<api_key_t>(500, "Could not delete API key.");
}
std::unique_lock lock(mutex);
api_key_t&& key = key_op.get();
api_keys.erase(key.value);
return Option<api_key_t>(key.truncate_value());
}
uint32_t AuthManager::get_next_api_key_id() {
store->increment(std::string(API_KEY_NEXT_ID_KEY), 1);
return next_api_key_id++;
}
bool AuthManager::authenticate(const std::string& action,
const std::vector<collection_key_t>& collection_keys,
std::map<std::string, std::string>& params,
std::vector<nlohmann::json>& embedded_params_vec) const {
std::shared_lock lock(mutex);
//LOG(INFO) << "AuthManager::authenticate()";
size_t num_keys_matched = 0;
for(size_t i = 0; i < collection_keys.size(); i++) {
const auto& coll_key = collection_keys[i];
if(coll_key.api_key.empty()) {
return false;
}
if(coll_key.api_key == bootstrap_auth_key) {
return true;
}
const auto& key_it = api_keys.find(coll_key.api_key);
nlohmann::json embedded_params;
if(key_it != api_keys.end()) {
const api_key_t& api_key = key_it.value();
if(!auth_against_key(coll_key.collection, action, api_key, false)) {
return false;
}
} else {
// could be a scoped API key
Option<bool> auth_op = authenticate_parse_params(coll_key, action, embedded_params);
if(!auth_op.ok()) {
return false;
}
}
num_keys_matched++;
// lengths of embedded_params_vec and collection_keys are guaranteed by upstream to be the same
embedded_params_vec[i] = embedded_params;
}
//LOG(INFO) << "api_keys.size() = " << api_keys.size();
return (num_keys_matched == collection_keys.size());
}
bool AuthManager::regexp_match(const std::string& value, const std::string& regexp) {
try {
return std::regex_match (value, std::regex(regexp));
} catch(const std::exception& e) {
LOG(ERROR) << "Error while matching regexp " << regexp << " against value " << value;
return false;
}
}
bool AuthManager::auth_against_key(const std::string& req_collection, const std::string& action,
const api_key_t& api_key, const bool search_only) const {
if(uint64_t(std::time(0)) > api_key.expires_at) {
LOG(ERROR) << fmt_error("Rejecting expired API key.", api_key.value);
return false;
}
if(search_only) {
// ensure that parent key has only search scope
if(api_key.actions.size() != 1 || api_key.actions[0] != DOCUMENTS_SEARCH_ACTION) {
LOG(ERROR) << fmt_error(std::string("Parent API key must allow only `") + DOCUMENTS_SEARCH_ACTION + "` action.",
api_key.value);
return false;
}
} else {
bool action_is_allowed = false;
for(const std::string& allowed_action: api_key.actions) {
if(allowed_action == "*" || (action != "*" && allowed_action == action)) {
action_is_allowed = true;
break;
}
// e.g. collections:create or documents:create
if (allowed_action.size() >= 2 && allowed_action[allowed_action.size() - 2] == ':' &&
allowed_action.back() == '*') {
std::string allowed_resource = allowed_action.substr(0, allowed_action.size() - 2);
std::vector<std::string> actual_action_parts;
StringUtils::split(action, actual_action_parts, ":");
if(actual_action_parts[0] == allowed_resource) {
action_is_allowed = true;
break;
}
}
}
if(!action_is_allowed) {
return false;
}
}
bool coll_allowed = false;
for(const std::string& allowed_collection: api_key.collections) {
if(allowed_collection == "*" || (allowed_collection == req_collection) || req_collection.empty() ||
regexp_match(req_collection, allowed_collection)) {
coll_allowed = true;
break;
}
}
if(!coll_allowed) {
// even if one collection is not allowed, we reject the entire request
return false;
}
return true;
}
Option<bool> AuthManager::authenticate_parse_params(const collection_key_t& scoped_api_key, const std::string& action,
nlohmann::json& embedded_params) const {
// allow only searches from scoped keys
if(action != DOCUMENTS_SEARCH_ACTION) {
LOG(ERROR) << "Scoped API keys can only be used for searches.";
return Option<bool>(403, "Forbidden.");
}
const std::string& key_payload = StringUtils::base64_decode(scoped_api_key.api_key);
if(key_payload.size() < HMAC_BASE64_LEN + api_key_t::PREFIX_LEN) {
LOG(ERROR) << "Malformed scoped API key.";
return Option<bool>(403, "Forbidden.");
}
// FORMAT:
// <DIGEST><PARENT_KEY_PREFIX><PARAMS>
const std::string& hmacSHA256 = key_payload.substr(0, HMAC_BASE64_LEN);
const std::string& key_prefix = key_payload.substr(HMAC_BASE64_LEN, api_key_t::PREFIX_LEN);
const std::string& custom_params = key_payload.substr(HMAC_BASE64_LEN + api_key_t::PREFIX_LEN);
// Calculate and verify hmac against matching api key.
// There could be several matching keys since we look up only on a 4-char prefix.
auto prefix_range = api_keys.equal_prefix_range(key_prefix);
for(auto it = prefix_range.first; it != prefix_range.second; ++it) {
const api_key_t& root_api_key = it.value();
// ensure that parent key collection filter matches queried collection
bool auth_success = auth_against_key(scoped_api_key.collection, action, root_api_key, true);
if(!auth_success) {
continue;
}
// finally verify hmac
std::string digest = StringUtils::hmac(root_api_key.value, custom_params);
if(digest == hmacSHA256) {
try {
embedded_params = nlohmann::json::parse(custom_params);
} catch(const std::exception& e) {
continue;
}
if(!embedded_params.is_object()) {
continue;
}
if(embedded_params.count("expires_at") != 0) {
if(!embedded_params["expires_at"].is_number_integer() || embedded_params["expires_at"].get<int64_t>() < 0) {
continue;
}
// if parent key's expiry timestamp is smaller, it takes precedence
uint64_t expiry_ts = std::min(root_api_key.expires_at, embedded_params["expires_at"].get<uint64_t>());
if(uint64_t(std::time(0)) > expiry_ts) {
continue;
}
}
return Option<bool>(true);
}
}
return Option<bool>(403, "Forbidden.");
}
std::string AuthManager::fmt_error(std::string&& error, const std::string& key) {
std::stringstream ss;
ss << error << " Key prefix: " << key.substr(0, api_key_t::PREFIX_LEN) << ", SHA256: "
<< StringUtils::hash_sha256(key);
return ss.str();
}
Option<uint32_t> api_key_t::validate(const nlohmann::json &key_obj) {
auto mandatory_keys = {
"description", "actions", "collections"
};
for(auto key: mandatory_keys) {
if(key_obj.count(key) == 0) {
return Option<uint32_t>(400, std::string("Could not find a `") + key + "` key.");
}
}
if(key_obj.count("value") != 0 && !key_obj["value"].is_string()) {
return Option<uint32_t>(400, std::string("Key value must be a string."));
}
if(key_obj.count("description") != 0 && !key_obj["description"].is_string()) {
return Option<uint32_t>(400, std::string("Key description must be a string."));
}
if(!key_obj["actions"].is_array() || key_obj["actions"].empty()) {
return Option<uint32_t>(400,"Wrong format for `actions`. It should be an array of string.");
}
if(!key_obj["collections"].is_array() || key_obj["collections"].empty()) {
return Option<uint32_t>(400,"Wrong format for `collections`. It should be an array of string.");
}
for(const nlohmann::json & item: key_obj["actions"]) {
if(!item.is_string()) {
return Option<uint32_t>(400,"Wrong format for `actions`. It should be an array of string.");
}
}
for(const nlohmann::json & item: key_obj["collections"]) {
if(!item.is_string()) {
return Option<uint32_t>(400,"Wrong format for `collections`. It should be an array of string.");
}
}
if(key_obj.count("expires_at") != 0) {
if(!key_obj["expires_at"].is_number_integer() || key_obj["expires_at"].get<int64_t>() < 0) {
return Option<uint32_t>(400,"Wrong format for `expires_at`. It should be an unsigned integer.");
}
}
return Option<uint32_t>(200);
}
bool AuthManager::add_item_to_params(std::map<std::string, std::string>& req_params,
const nlohmann::detail::iteration_proxy_value<nlohmann::json::iterator>& item,
bool overwrite) {
std::string str_value;
if(item.value().is_string()) {
str_value = item.value().get<std::string>();
} else if(item.value().is_number_integer()) {
str_value = std::to_string(item.value().get<int64_t>());
} else if(item.value().is_number_float()) {
str_value = std::to_string(item.value().get<float>());
} else if(item.value().is_boolean()) {
str_value = item.value().get<bool>() ? "true" : "false";
} else {
return false;
}
if(req_params.count(item.key()) == 0) {
req_params[item.key()] = str_value;
} else if(item.key() == "filter_by") {
auto& embedded_param = str_value;
auto& query_param = req_params[item.key()];
// Join follows $collection_name(<join_condition>) pattern. There might be false-positive matches with this
// regular expression like, "(field: foo$) || (field: bar)" but that is acceptable.
const std::regex join_pattern(R"(\$.+\(.+\))");
if (std::regex_search(embedded_param, join_pattern) && std::regex_search(query_param, join_pattern) &&
!Join::merge_join_conditions(embedded_param, query_param)) {
return false;
}
if(!req_params[item.key()].empty() && !str_value.empty()) {
req_params[item.key()] = "(" + req_params[item.key()] + ") && (" + str_value + ")";
} else if(req_params[item.key()].empty() && !str_value.empty()) {
req_params[item.key()] = "(" + str_value + ")";
} else if(!req_params[item.key()].empty() && str_value.empty()) {
req_params[item.key()] = "(" + req_params[item.key()] + ")";
}
} else if(overwrite) {
req_params[item.key()] = str_value;
}
return true;
}
void AuthManager::remove_expired_keys() {
const Option<std::vector<api_key_t>>& keys_op = list_keys();
if(!keys_op.ok()) {
LOG(ERROR) << keys_op.error();
return;
}
const std::vector<api_key_t>& keys = keys_op.get();
for(const auto& key : keys) {
if(key.autodelete && (uint64_t(std::time(0)) > key.expires_at)) {
LOG(INFO) << "Deleting expired key " << key.value;
auto delete_op = remove_key(key.id);
if(!delete_op.ok()) {
LOG(ERROR) << delete_op.error();
}
}
}
}
void AuthManager::do_housekeeping() {
remove_expired_keys();
}
std::vector<std::string> AuthManager::get_api_key_collections(const std::string& value) {
if(api_keys.find(value) != api_keys.end()) {
return api_keys.at(value).collections;
}
return std::vector<std::string>();
}
| 16,517
|
C++
|
.cpp
| 364
| 36.675824
| 124
| 0.587393
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,770
|
ratelimit_manager.cpp
|
typesense_typesense/src/ratelimit_manager.cpp
|
#include "ratelimit_manager.h"
#include "string_utils.h"
#include "logger.h"
#include <iterator>
RateLimitManager * RateLimitManager::getInstance() {
if(!instance) {
instance = new RateLimitManager();
}
return instance;
}
void RateLimitManager::temp_ban_entity(const rate_limit_entity_t& entity, const uint64_t number_of_hours) {
// lock mutex
std::unique_lock<std::shared_mutex>lock(rate_limit_mutex);
temp_ban_entity_wrapped(entity, number_of_hours);
}
bool RateLimitManager::is_rate_limited(const rate_limit_entity_t& api_key_entity, const rate_limit_entity_t& ip_entity) {
// lock mutex
std::unique_lock<std::shared_mutex>lock(rate_limit_mutex);
std::vector<rate_limit_rule_t*> rules_bucket;
// get wildcard rules
fill_bucket(WILDCARD_IP, api_key_entity, rules_bucket);
fill_bucket(WILDCARD_API_KEY, ip_entity, rules_bucket);
// get rules for the IP entity
fill_bucket(ip_entity, api_key_entity, rules_bucket);
// get rules for the API key entity
fill_bucket(api_key_entity, ip_entity, rules_bucket);
if(rules_bucket.empty()) {
return false;
}
// sort rules_bucket by priority in ascending order
std::sort(rules_bucket.begin(), rules_bucket.end(), [](rate_limit_rule_t* rule1, rate_limit_rule_t* rule2) {
return rule1->priority < rule2->priority;
});
// get the rule with the highest priority
auto& rule = *rules_bucket.front();
// get key for throttling if exists
auto throttle_key = get_throttle_key(ip_entity, api_key_entity);
if(rule.action == RateLimitAction::block) {
return true;
}
else if(rule.action == RateLimitAction::allow) {
return false;
}
// check if any throttle exists and still valid
while(throttle_key.ok()) {
auto key = throttle_key.get();
// Check ifban duration is not over
if(throttled_entities.at(key).throttling_to > get_current_time()) {
return true;
}
// Remove ban from DB store
std::string ban_key = std::string(BANS_PREFIX) + "_" + std::to_string(throttled_entities.at(key).status_id);
store->remove(ban_key);
// Remove ban
throttled_entities.erase(key);
rate_limit_exceeds.erase(key);
// Reset request counts
auto& request_counts = rate_limit_request_counts.lookup(key);
request_counts.reset();
// Get next throttle key if exists
throttle_key = get_throttle_key(ip_entity, api_key_entity);
}
// get request counter key according to rule type
auto request_counter_key = get_request_counter_key(rule, ip_entity, api_key_entity);
if(!rate_limit_request_counts.contains(request_counter_key)){
rate_limit_request_counts.insert(request_counter_key, request_counter_t{});
}
auto& request_counts = rate_limit_request_counts.lookup(request_counter_key);
// Check iflast reset time was more than 1 minute ago
if(request_counts.last_reset_time_minute <= get_current_time() - 60) {
request_counts.previous_requests_count_minute = request_counts.current_requests_count_minute;
request_counts.current_requests_count_minute = 0;
if(request_counts.last_reset_time_minute <= get_current_time() - 120) {
request_counts.previous_requests_count_minute = 0;
}
request_counts.last_reset_time_minute = get_current_time();
}
// Check iflast reset time was more than 1 hour ago
if(request_counts.last_reset_time_hour <= get_current_time() - 3600) {
request_counts.previous_requests_count_hour = request_counts.current_requests_count_hour;
request_counts.current_requests_count_hour = 0;
if(request_counts.last_reset_time_hour <= get_current_time() - 7200) {
request_counts.previous_requests_count_hour = 0;
}
request_counts.last_reset_time_hour = get_current_time();
}
// Check if request count is over the limit
auto current_rate_for_minute = (60 - (get_current_time() - request_counts.last_reset_time_minute)) / 60 * request_counts.previous_requests_count_minute;
current_rate_for_minute += request_counts.current_requests_count_minute;
if(rule.max_requests.minute_threshold >= 0 && current_rate_for_minute >= rule.max_requests.minute_threshold) {
bool auto_ban_is_enabled = (rule.auto_ban_1m_threshold > 0 && rule.auto_ban_1m_duration_hours > 0);
// If key is not in exceed map that means, it is a new exceed, not a continued exceed
if(rate_limit_exceeds.count(request_counter_key) == 0) {
rate_limit_exceeds.insert({request_counter_key, rate_limit_exceed_t{last_throttle_id++, request_counter_key, 1}});
request_counts.threshold_exceed_count_minute++;
} else {
// else it is a continued exceed, so just increment the request count
rate_limit_exceeds[request_counter_key].request_count++;
}
// If auto ban is enabled, check if threshold is exceeded
if(auto_ban_is_enabled) {
if(request_counts.threshold_exceed_count_minute > rule.auto_ban_1m_threshold) {
temp_ban_entity_wrapped(request_counter_key.substr(0, request_counter_key.find("_")) == ".*" ? WILDCARD_API_KEY : api_key_entity, rule.auto_ban_1m_duration_hours, (request_counter_key.substr((request_counter_key.find("_") + 1)) == ".*" && !rule.apply_limit_per_entity) ? nullptr : &ip_entity);
}
}
return true;
}
auto current_rate_for_hour = (3600 - (get_current_time() - request_counts.last_reset_time_hour)) / 3600 * request_counts.previous_requests_count_hour;
current_rate_for_hour += request_counts.current_requests_count_hour;
if(rule.max_requests.hour_threshold >= 0 && current_rate_for_hour >= rule.max_requests.hour_threshold) {
if(rate_limit_exceeds.count(request_counter_key) == 0) {
rate_limit_exceeds.insert({request_counter_key, rate_limit_exceed_t{last_throttle_id++, request_counter_key, 1}});
} else {
rate_limit_exceeds[request_counter_key].request_count++;
}
return true;
}
// Increment request counts
request_counts.current_requests_count_minute++;
request_counts.current_requests_count_hour++;
// If key is in exceed map that means, it is no longer exceed, so remove it from the map
if(rate_limit_exceeds.count(request_counter_key) > 0) {
rate_limit_exceeds.erase(request_counter_key);
}
return false;
}
Option<nlohmann::json> RateLimitManager::find_rule_by_id(const uint64_t id) {
std::shared_lock<std::shared_mutex> lock(rate_limit_mutex);
if(rule_store.count(id) > 0) {
return Option<nlohmann::json>(rule_store.at(id).to_json());
}
return Option<nlohmann::json>(404, "Not Found");
}
bool RateLimitManager::delete_rule_by_id(const uint64_t id) {
std::unique_lock<std::shared_mutex> lock(rate_limit_mutex);
const std::string rule_store_key = get_rule_key(id);
bool deleted = store->remove(rule_store_key);
if(!deleted) {
return false;
}
// Check ifa rule exists for the given ID
if(rule_store.count(id) > 0) {
auto rule = &rule_store.at(id);
// Remove rule from rate limit rule pointer
for(auto& entity: rate_limit_entities) {
for(auto it = entity.second.begin(); it != entity.second.end(); ) {
if(*it == rule) {
it = entity.second.erase(it);
} else {
++it;
}
}
}
// Remove rule from rule store
rule_store.erase(id);
for(auto it = rate_limit_entities.begin(); it != rate_limit_entities.end(); ) {
if(it->second.empty()) {
it = rate_limit_entities.erase(it);
} else {
++it;
}
}
return true;
}
return false;
}
const std::vector<rate_limit_rule_t> RateLimitManager::get_all_rules() {
std::shared_lock<std::shared_mutex> lock(rate_limit_mutex);
// Get all rules in a vector
std::vector<rate_limit_rule_t> rules;
for(const auto &rule : rule_store) {
rules.push_back(rule.second);
}
return rules;
}
const std::vector<rate_limit_status_t> RateLimitManager::get_banned_entities(const RateLimitedEntityType entity_type) {
std::shared_lock<std::shared_mutex> lock(rate_limit_mutex);
std::vector <rate_limit_status_t> banned_entities;
for (auto & element: throttled_entities) {
if(element.second.entity.entity_type == entity_type) {
banned_entities.push_back(element.second);
}
if(element.second.and_entity.ok()) {
if(element.second.and_entity.get().entity_type == entity_type) {
banned_entities.push_back(element.second);
}
}
}
// Get permanent bans
for (auto & element: rule_store) {
if(element.second.action == RateLimitAction::block) {
for(const auto& entity: element.second.entities) {
if(entity.entity_type == entity_type) {
banned_entities.push_back(rate_limit_status_t(0, 0, 0, entity));
}
}
}
}
return banned_entities;
}
void RateLimitManager::clear_all() {
std::unique_lock<std::shared_mutex> lock(rate_limit_mutex);
rate_limit_request_counts.clear();
rate_limit_entities.clear();
throttled_entities.clear();
rate_limit_exceeds.clear();
rule_store.clear();
last_rule_id = 0;
last_ban_id = 0;
base_timestamp = 0;
}
void RateLimitManager::temp_ban_entity_wrapped(const rate_limit_entity_t& entity, const uint64_t number_of_hours, const rate_limit_entity_t* and_entity) {
std::string key = entity.entity_id + "_" + (and_entity != nullptr ? and_entity->entity_id : ".*");
// Check ifentity is already banned
if(throttled_entities.count(key) > 0) {
return;
}
auto now = get_current_time();
// Add entity to throttled_entities for the given number of days
rate_limit_status_t status(last_ban_id, now, now + (number_of_hours * 60 * 60), entity, and_entity);
std::string ban_key = get_ban_key(last_ban_id);
store->insert(ban_key, status.to_json().dump());
throttled_entities.insert({key, status});
last_ban_id++;
if(rate_limit_request_counts.contains(key)){
// Reset counters for the given entity
rate_limit_request_counts.lookup(key).current_requests_count_minute = 0;
rate_limit_request_counts.lookup(key).current_requests_count_hour = 0;
}
}
const nlohmann::json RateLimitManager::get_all_rules_json() {
std::shared_lock<std::shared_mutex> lock(rate_limit_mutex);
nlohmann::json rules_json = nlohmann::json::array();
for(const auto &rule : rule_store) {
rules_json.push_back(rule.second.to_json());
}
return rules_json;
}
const nlohmann::json rate_limit_rule_t::to_json() const {
nlohmann::json rule;
nlohmann::json api_keys_json = nlohmann::json::array();
nlohmann::json ip_addresses_json = nlohmann::json::array();
rule["id"] = id;
rule["action"] = magic_enum::enum_name(action);
rule["priority"] = priority;
for(const auto &entity : entities) {
if(entity.entity_type == RateLimitedEntityType::api_key) {
api_keys_json.push_back(entity.entity_id);
} else if(entity.entity_type == RateLimitedEntityType::ip) {
ip_addresses_json.push_back(entity.entity_id);
}
}
if(max_requests.minute_threshold >= 0) {
rule["max_requests"]["minute_threshold"] = max_requests.minute_threshold;
}
if(max_requests.hour_threshold >= 0) {
rule["max_requests"]["hour_threshold"] = max_requests.hour_threshold;
}
if(auto_ban_1m_threshold >= 0) {
rule["auto_ban_1m_threshold"] = auto_ban_1m_threshold;
}
if(auto_ban_1m_duration_hours >= 0) {
rule["auto_ban_1m_duration_hours"] = auto_ban_1m_duration_hours;
}
if(!api_keys_json.empty()) {
rule["api_keys"] = api_keys_json;
}
if(!ip_addresses_json.empty()) {
rule["ip_addresses"] = ip_addresses_json;
}
return rule;
}
const nlohmann::json rate_limit_status_t::to_json() const {
nlohmann::json status;
status["id"] = status_id;
status["throttling_from"] = throttling_from;
status["throttling_to"] = throttling_to;
status["value"] = entity.entity_id;
status["entity_type"] = magic_enum::enum_name(entity.entity_type);
if(and_entity.ok()) {
auto and_entity_value = and_entity.get();
status["and_entity"] = nlohmann::json::object();
status["and_entity"]["value"] = and_entity_value.entity_id;
status["and_entity"]["entity_type"] = magic_enum::enum_name(and_entity_value.entity_type);
}
return status;
}
void rate_limit_status_t::parse_json(const nlohmann::json &json) {
status_id = json["id"];
throttling_from = json["throttling_from"];
throttling_to = json["throttling_to"];
entity.entity_id = json["value"];
entity.entity_type = magic_enum::enum_cast<RateLimitedEntityType>(json["entity_type"].get<std::string>()).value();
if(json.contains("and_entity")) {
and_entity = Option<rate_limit_entity_t>(rate_limit_entity_t{magic_enum::enum_cast<RateLimitedEntityType>(json["and_entity"]["entity_type"].get<std::string>()).value(), json["and_entity"]["value"]});
} else {
and_entity = Option<rate_limit_entity_t>(404, "No and_entity found");
}
}
Option<nlohmann::json> RateLimitManager::add_rule(const nlohmann::json &rule_json) {
std::unique_lock<std::shared_mutex> lock(rate_limit_mutex);
auto rule_validation_result = is_valid_rule(rule_json);
if(!rule_validation_result.ok()) {
return Option<nlohmann::json>(rule_validation_result.code(), rule_validation_result.error());
}
auto parsed_rule_option = parse_rule(rule_json);
if(!parsed_rule_option.ok()) {
return Option<nlohmann::json>(parsed_rule_option.code(), parsed_rule_option.error());
}
rate_limit_rule_t parsed_rule = parsed_rule_option.get();
parsed_rule.id = last_rule_id++;
const std::string rule_store_key = get_rule_key(parsed_rule.id);
bool inserted = store->insert(rule_store_key, parsed_rule.to_json().dump());
if(!inserted) {
return Option<nlohmann::json>(500, "Failed to insert rule into the DB store");
}
store->increment(std::string(RULES_NEXT_ID), 1);
// Insert rule to rule store
lock.unlock();
insert_rule(parsed_rule);
lock.lock();
nlohmann::json response;
response["message"] = "Rule added successfully.";
response["rule"] = parsed_rule.to_json();
return Option<nlohmann::json>(response);
}
Option<nlohmann::json> RateLimitManager::edit_rule(const uint64_t id, const nlohmann::json &rule_json) {
std::unique_lock<std::shared_mutex> lock(rate_limit_mutex);
const auto& rule_option = find_rule_by_id(id);
if(!rule_option.ok()) {
return Option<nlohmann::json>(rule_option.code(), rule_option.error());
}
auto rule_validation_result = is_valid_rule(rule_json);
if(!rule_validation_result.ok()) {
return Option<nlohmann::json>(rule_validation_result.code(), rule_validation_result.error());
}
auto parsed_rule_option = parse_rule(rule_json);
if(!parsed_rule_option.ok()) {
return Option<nlohmann::json>(parsed_rule_option.code(), parsed_rule_option.error());
}
rate_limit_rule_t parsed_rule = parsed_rule_option.get();
parsed_rule.id = id;
const std::string rule_store_key = get_rule_key(parsed_rule.id);
bool inserted = store->insert(rule_store_key, parsed_rule.to_json().dump());
if(!inserted) {
return Option<nlohmann::json>(500, "Failed to update rule in the DB store");
}
auto old_rule = rule_store.at(id);
// Remove rule from rate limit rule pointer
for(const auto &entity : old_rule.entities) {
auto& vec = rate_limit_entities.at(entity);
std::remove_if(vec.begin(), vec.end(), [&](const auto &rule) {
return rule->id == id;
});
}
// Insert new rule to rule store
lock.unlock();
insert_rule(parsed_rule);
lock.lock();
nlohmann::json response;
response["message"] = "Rule updated successfully.";
response["rule"] = parsed_rule.to_json();
return Option<nlohmann::json>(response);
}
Option<bool> RateLimitManager::is_valid_rule(const nlohmann::json &rule_json) {
if(rule_json.count("action") == 0) {
return Option<bool>(400, "Parameter `action` is required.");
}
if(rule_json.count("apply_limit_per_entity") > 0 && rule_json["apply_limit_per_entity"].is_boolean() == false) {
return Option<bool>(400, "Parameter `apply_limit_per_entity` must be a boolean.");
}
if((rule_json.count("ip_addresses") == 0 && rule_json.count("api_keys") == 0)) {
return Option<bool>(400, "Parameter `ip_addresses` or `api_keys` is required.");
}
if(rule_json.count("ip_addresses") > 0 && (!rule_json["ip_addresses"].is_array() || !rule_json["ip_addresses"][0].is_string())) {
return Option<bool>(400, "Parameter `ip_addresses` must be an array of strings.");
}
if(rule_json.count("api_keys") > 0 && (!rule_json["api_keys"].is_array() || !rule_json["api_keys"][0].is_string())) {
return Option<bool>(400, "Parameter `api_keys` must be an array of strings.");
}
if(rule_json.count("api_keys") > 0 && rule_json.count("ip_addresses") > 0 && rule_json["api_keys"].size() == 0 && rule_json["ip_addresses"].size() == 0) {
return Option<bool>(400, "Parameter `ip_addresses` or `api_keys` must have at least one value.");
}
if(rule_json.count("api_keys") > 0 && rule_json.count("ip_addresses") > 0 && rule_json["api_keys"].size() > 1 && rule_json["ip_addresses"].size() > 1) {
return Option<bool>(400, "Many to many rule is not supported.");
}
if(rule_json["action"].is_string() == false) {
return Option<bool>(400, "Parameter `action` must be a string.");
}
if(rule_json["action"] == "allow") {
return Option<bool>(true);
} else if(rule_json["action"] == "block") {
return Option<bool>(true);
} else if(rule_json["action"] == "throttle") {
if(rule_json.count("max_requests_1m") == 0 && rule_json.count("max_requests_1h") == 0) {
return Option<bool>(400, "At least one of `max_requests_1m` or `max_requests_1h` is required.");
}
if(rule_json.count("max_requests_1m") > 0 && !rule_json["max_requests_1m"].is_number_integer()) {
return Option<bool>(400, "Parameter `max_requests_1m` must be an integer.");
}
if(rule_json.count("max_requests_1h") > 0 && !rule_json["max_requests_1h"].is_number_integer()) {
return Option<bool>(400, "Parameter `max_requests_1h` must be an integer.");
}
if((rule_json.count("auto_ban_1m_threshold") > 0 && rule_json.count("auto_ban_1m_duration_hours") == 0) || (rule_json.count("auto_ban_1m_threshold") == 0 && rule_json.count("auto_ban_1m_duration_hours") > 0)) {
return Option<bool>(400, "Both `auto_ban_1m_threshold` and `auto_ban_1m_duration_hours` are required ifeither is specified.");
}
if(rule_json.count("auto_ban_1m_threshold") > 0 && rule_json.count("auto_ban_1m_duration_hours") > 0) {
if(!rule_json["auto_ban_1m_threshold"].is_number_integer() || !rule_json["auto_ban_1m_duration_hours"].is_number_integer()) {
return Option<bool>(400, "Parameters `auto_ban_1m_threshold` and `auto_ban_1m_duration_hours` must be integers.");
}
if(rule_json["auto_ban_1m_threshold"].get<int>() < 0 || rule_json["auto_ban_1m_duration_hours"].get<int>() < 0) {
return Option<bool>(400, "Both `auto_ban_1m_threshold` and `auto_ban_1m_duration_hours` must be greater than 0.");
}
}
} else {
return Option<bool>(400, "Invalid action.");
}
return Option<bool>(true);
}
Option<rate_limit_rule_t> RateLimitManager::parse_rule(const nlohmann::json &rule_json)
{
rate_limit_rule_t new_rule;
new_rule.action = magic_enum::enum_cast<RateLimitAction>(rule_json["action"].get<std::string>()).value();
if(rule_json.count("ip_addresses") > 0) {
for(const auto& ip: rule_json["ip_addresses"]) {
new_rule.entities.push_back(rate_limit_entity_t{RateLimitedEntityType::ip, ip});
}
}
if(rule_json.count("api_keys") > 0) {
for(const auto& api_key: rule_json["api_keys"]) {
new_rule.entities.push_back(rate_limit_entity_t{RateLimitedEntityType::api_key, api_key});
}
}
if(rule_json.count("max_requests_1m") > 0) {
new_rule.max_requests.minute_threshold = rule_json["max_requests_1m"];
}
if(rule_json.count("max_requests_1h") > 0) {
new_rule.max_requests.hour_threshold = rule_json["max_requests_1h"];
}
if(rule_json.count("auto_ban_1m_threshold") > 0 && rule_json.count("auto_ban_1m_duration_hours") > 0) {
new_rule.auto_ban_1m_threshold = rule_json["auto_ban_1m_threshold"];
new_rule.auto_ban_1m_duration_hours = rule_json["auto_ban_1m_duration_hours"];
}
if(rule_json.count("apply_limit_per_entity") > 0) {
new_rule.apply_limit_per_entity = rule_json["apply_limit_per_entity"].get<bool>();
}
if(rule_json.count("priority") > 0) {
new_rule.priority = rule_json["priority"];
}
return Option<rate_limit_rule_t>(new_rule);
}
void RateLimitManager::insert_rule(const rate_limit_rule_t &rule) {
std::unique_lock<std::shared_mutex> lock(rate_limit_mutex);
rule_store[rule.id] = rule;
for(const auto &entity : rule.entities) {
rate_limit_entities[entity].push_back(&rule_store[rule.id]);
}
}
Option<bool> RateLimitManager::init(Store *store) {
std::unique_lock<std::shared_mutex> lock(rate_limit_mutex);
this->store = store;
// Load rules from database
std::string last_rule_id_str;
StoreStatus last_rule_id_status = store->get(std::string(RULES_NEXT_ID), last_rule_id_str);
if(last_rule_id_status == StoreStatus::ERROR) {
return Option<bool>(500, "Error while fetching rule next id from database.");
}
else if(last_rule_id_status == StoreStatus::FOUND) {
last_rule_id = StringUtils::deserialize_uint32_t(last_rule_id_str);
}
else {
last_rule_id = 0;
}
std::vector<std::string> rule_json_strs;
store->scan_fill(std::string(RULES_PREFIX) + "_", std::string(RULES_PREFIX) + "`", rule_json_strs);
for(const auto& rule_json_str: rule_json_strs) {
nlohmann::json rule_json = nlohmann::json::parse(rule_json_str);
Option<rate_limit_rule_t> rule_option = parse_rule(rule_json);
if(!rule_option.ok()) {
return Option<bool>(rule_option.code(), rule_option.error());
}
auto rule = rule_option.get();
rule.id = rule_json["id"];
lock.unlock();
insert_rule(rule);
lock.lock();
}
// Load bans from database
std::string last_ban_id_str;
StoreStatus last_ban_id_status = store->get(BANS_NEXT_ID, last_ban_id_str);
if(last_ban_id_status == StoreStatus::ERROR) {
return Option<bool>(500, "Error while fetching ban next id from database.");
}
else if(last_ban_id_status == StoreStatus::FOUND) {
last_ban_id = StringUtils::deserialize_uint32_t(last_ban_id_str);
}
else {
last_ban_id = 0;
}
std::vector<std::string> ban_json_strs;
store->scan_fill(std::string(BANS_PREFIX) + "_", std::string(BANS_PREFIX) + "`", ban_json_strs);
for(const auto& ban_json_str: ban_json_strs) {
nlohmann::json ban_json = nlohmann::json::parse(ban_json_str);
rate_limit_status_t ban_status;
ban_status.parse_json(ban_json);
std::string key = ban_status.entity.entity_id + "_" + (ban_status.and_entity.ok() ? ban_status.and_entity.get().entity_id : ".*");
throttled_entities.insert({key, ban_status});
}
LOG(INFO) << "Loaded " << rule_store.size() << " rate limit rules.";
LOG(INFO) << "Loaded " << throttled_entities.size() << " rate limit bans.";
return Option<bool>(true);
}
std::string RateLimitManager::get_rule_key(const uint32_t id) {
return std::string(RULES_PREFIX) + "_" + std::to_string(id);
}
std::string RateLimitManager::get_ban_key(const uint32_t id) {
return std::string(BANS_PREFIX) + "_" + std::to_string(id);
}
time_t RateLimitManager::get_current_time() {
return base_timestamp + std::chrono::system_clock::to_time_t(std::chrono::system_clock::now());;
}
void RateLimitManager::_set_base_timestamp(const time_t& timestamp) {
base_timestamp = timestamp;
}
Option<std::string> RateLimitManager::get_throttle_key(const rate_limit_entity_t& ip_entity, const rate_limit_entity_t& api_key_entity) {
if(throttled_entities.count(api_key_entity.entity_id + "_" + ip_entity.entity_id) > 0) {
return Option<std::string>(api_key_entity.entity_id + "_" + ip_entity.entity_id);
}
else if(throttled_entities.count(api_key_entity.entity_id + "_.*") > 0) {
return Option<std::string>(api_key_entity.entity_id + "_.*");
}
else if(throttled_entities.count(".*_" + ip_entity.entity_id) > 0) {
return Option<std::string>(".*_" + ip_entity.entity_id);
}
else if(throttled_entities.count(".*_.*") > 0) {
return Option<std::string>(".*_.*");
}
return Option<std::string>(404, "No throttle found.");
}
const std::string RateLimitManager::get_request_counter_key(const rate_limit_rule_t& rule, const rate_limit_entity_t& ip_entity, const rate_limit_entity_t& api_key_entity) {
bool has_api_key = false, has_ip = false, has_wildcard_ip = false, has_wildcard_api_key = false;
for(const auto& entity: rule.entities) {
if(entity.entity_type == RateLimitedEntityType::ip) {
has_ip = true;
if(entity.entity_id == ".*") {
has_wildcard_ip = true;
}
}
else if(entity.entity_type == RateLimitedEntityType::api_key) {
has_api_key = true;
if(entity.entity_id == ".*") {
has_wildcard_api_key = true;
}
}
}
std::string key;
if(!has_api_key || has_wildcard_api_key) {
key += ".*";
} else {
key += api_key_entity.entity_id;
}
key += "_";
if((!has_ip || has_wildcard_ip) && !rule.apply_limit_per_entity) {
key += ".*";
} else {
key += ip_entity.entity_id;
}
return key;
}
const nlohmann::json RateLimitManager::get_exceeded_entities_json() {
std::shared_lock<std::shared_mutex> lock(rate_limit_mutex);
nlohmann::json exceeded_entities_json = nlohmann::json::array();
for(const auto& entity: rate_limit_exceeds) {
exceeded_entities_json.push_back(entity.second.to_json());
}
return exceeded_entities_json;
}
const nlohmann::json RateLimitManager::get_throttled_entities_json() {
std::shared_lock<std::shared_mutex> lock(rate_limit_mutex);
nlohmann::json throttled_entities_json = nlohmann::json::array();
for(const auto& entity: throttled_entities) {
auto json = entity.second.to_json();
json[json["entity_type"].get<std::string>() == "ip" ? "ip_address" : "api_key"] = json["value"];
json.erase("entity_type");
json.erase("value");
if(json["and_entity"].is_object()) {
json[json["and_entity"]["entity_type"].get<std::string>() == "ip" ? "ip_address" : "api_key"] = json["and_entity"]["value"];
json.erase("and_entity");
}
if(json["api_key"] == ".*") {
json.erase("api_key");
}
throttled_entities_json.push_back(json);
}
return throttled_entities_json;
}
bool RateLimitManager::delete_ban_by_id(const uint64_t id) {
std::unique_lock<std::shared_mutex> lock(rate_limit_mutex);
std::string ban_key = get_ban_key(id);
bool deleted = store->remove(ban_key);
if(!deleted) {
return false;
}
auto ban = std::find_if(throttled_entities.begin(), throttled_entities.end(), [id](const auto& ban) {
return ban.second.status_id == id;
});
if(ban != throttled_entities.end()) {
throttled_entities.erase(ban);
} else {
return false;
}
return true;
}
void RateLimitManager::fill_bucket(const rate_limit_entity_t& target_entity, const rate_limit_entity_t& other_entity, std::vector<rate_limit_rule_t*> &rules_bucket) {
auto it = rate_limit_entities.find(target_entity);
if(it == rate_limit_entities.end()) {
return;
}
for(const auto& rule: it->second) {
// Skip if rule already exists in bucket
if(std::find(rules_bucket.begin(), rules_bucket.end(), rule) != rules_bucket.end()) {
continue;
}
// Add the rule only If:
// A. it has no entity with type of other_entity
// B. it has an entity with type of other_entity and it's value is equal to other_entity's value
// C. it has an entity with type of other_entity and it's value is equal to ".*"
bool has_other_entity = false;
for(const auto& entity: rule->entities) {
if(entity.entity_type == other_entity.entity_type) {
has_other_entity = true;
if(entity.entity_id == other_entity.entity_id || entity.entity_id == ".*") {
rules_bucket.push_back(rule);
}
}
}
if(!has_other_entity) {
rules_bucket.push_back(rule);
}
}
}
bool RateLimitManager::delete_throttle_by_id(const uint32_t id) {
std::unique_lock<std::shared_mutex> lock(rate_limit_mutex);
bool flag = false;
decltype(rate_limit_exceeds)::iterator iterator;
for(auto it = rate_limit_exceeds.begin(); it != rate_limit_exceeds.end(); it++) {
if(it->second.rule_id == id) {
flag = true;
iterator = it;
break;
}
}
if(!flag) {
return false;
}
rate_limit_request_counts.erase(iterator->first);
rate_limit_exceeds.erase(iterator);
return true;
}
| 30,524
|
C++
|
.cpp
| 659
| 39.373293
| 309
| 0.634275
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,771
|
or_iterator.cpp
|
typesense_typesense/src/or_iterator.cpp
|
#include "or_iterator.h"
#include "filter.h"
bool or_iterator_t::at_end(const std::vector<or_iterator_t>& its) {
// if any iterator is invalid, we stop
for(const auto& it : its) {
if(!it.valid()) {
return true;
}
}
return false;
}
bool or_iterator_t::at_end2(const std::vector<or_iterator_t>& its) {
// if any iterator is invalid, we stop
return !its[0].valid() || !its[1].valid();
}
bool or_iterator_t::equals(std::vector<or_iterator_t>& its) {
for(int i = 0; i < int(its.size()) - 1; i++) {
if(its[i].id() != its[i+1].id()) {
return false;
}
}
return true;
}
bool or_iterator_t::equals2(std::vector<or_iterator_t>& its) {
return its[0].id() == its[1].id();
}
void or_iterator_t::advance_all(std::vector<or_iterator_t>& its) {
for(size_t i = 0; i < its.size(); i++) {
auto& it = its[i];
bool valid = it.next();
if(!valid) {
its.erase(its.begin() + i);
}
}
}
void or_iterator_t::advance_all2(std::vector<or_iterator_t>& its) {
bool valid0 = its[0].next();
bool valid1 = its[1].next();
if(!valid0) {
its.erase(its.begin() + 0);
if(!valid1) {
// 1st index will be now at 0th index
its.erase(its.begin() + 0);
}
} else if(!valid1) {
its.erase(its.begin() + 1);
}
}
void or_iterator_t::advance_non_largest(std::vector<or_iterator_t>& its) {
// we will find the iter with greatest value and then advance the rest until their value catches up
uint32_t greatest_value = 0;
for(size_t i = 0; i < its.size(); i++) {
if(its[i].id() > greatest_value) {
greatest_value = its[i].id();
}
}
for(size_t i = 0; i < its.size(); i++) {
if(its[i].id() != greatest_value) {
bool valid = its[i].skip_to(greatest_value);
if(!valid) {
its.erase(its.begin() + i);
i--;
}
}
}
}
void or_iterator_t::advance_non_largest2(std::vector<or_iterator_t>& its) {
if(its[0].id() > its[1].id()) {
bool valid = its[1].skip_to(its[0].id());
if(!valid) {
its.erase(its.begin() + 1);
}
} else {
bool valid = its[0].skip_to(its[1].id());
if(!valid) {
its.erase(its.begin() + 0);
}
}
}
bool or_iterator_t::valid() const {
return !its.empty();
}
bool or_iterator_t::next() {
size_t num_lists = its.size();
switch (num_lists) {
case 0:
break;
case 2:
if(!posting_list_t::all_ended2(its)) {
advance_smallest();
}
break;
default:
if(!posting_list_t::all_ended(its)) {
advance_smallest();
}
break;
}
return !its.empty();
}
void or_iterator_t::advance_smallest() {
// we will advance the smallest value and point current_index to next smallest value across the lists
auto smallest_value = its[curr_index].id();
curr_index = 0;
for(int i = 0; i < int(its.size()); i++) {
if(its[i].id() == smallest_value) {
its[i].next();
}
if(!its[i].valid()) {
its[i].reset_cache();
its.erase(its.cbegin() + i);
i--;
}
}
uint32_t new_smallest_value = UINT32_MAX;
for(int i = 0; i < int(its.size()); i++) {
if(its[i].id() < new_smallest_value) {
curr_index = i;
new_smallest_value = its[i].id();
}
}
}
bool or_iterator_t::skip_to(uint32_t id) {
auto current_value = UINT32_MAX;
curr_index = 0;
for(size_t i = 0; i < its.size(); i++) {
auto& it = its[i];
it.skip_to(id);
if(!it.valid()) {
its[i].reset_cache();
its.erase(its.begin() + i);
i--;
} else {
if(it.id() < current_value) {
curr_index = i;
current_value = it.id();
}
}
}
return !its.empty();
}
uint32_t or_iterator_t::id() const {
return its[curr_index].id();
}
bool or_iterator_t::take_id(result_iter_state_t& istate, uint32_t id, bool& is_excluded) {
is_excluded = false;
// decide if this result id should be excluded
if(istate.excluded_result_ids_size != 0) {
if (std::binary_search(istate.excluded_result_ids,
istate.excluded_result_ids + istate.excluded_result_ids_size, id)) {
is_excluded = true;
return false;
}
}
// decide if this result be matched with filter results
if(istate.filter_ids_length != 0) {
if(istate.filter_ids_index >= istate.filter_ids_length) {
return false;
}
// Returns iterator to the first element that is >= to value or last if no such element is found.
size_t found_index = std::lower_bound(istate.filter_ids + istate.filter_ids_index,
istate.filter_ids + istate.filter_ids_length, id) - istate.filter_ids;
if(found_index == istate.filter_ids_length) {
// all elements are lesser than lowest value (id), so we can stop looking
istate.filter_ids_index = found_index + 1;
return false;
} else {
if(istate.filter_ids[found_index] == id) {
istate.filter_ids_index = found_index + 1;
return true;
}
istate.filter_ids_index = found_index;
}
return false;
}
if (istate.fit != nullptr && istate.fit->approx_filter_ids_length > 0) {
return istate.fit->is_valid(id) == 1;
}
return true;
}
bool or_iterator_t::take_id(result_iter_state_t& istate, uint32_t id, bool& is_excluded,
single_filter_result_t& filter_result) {
is_excluded = false;
// decide if this result id should be excluded
if(istate.excluded_result_ids_size != 0) {
if (std::binary_search(istate.excluded_result_ids,
istate.excluded_result_ids + istate.excluded_result_ids_size, id)) {
is_excluded = true;
return false;
}
}
// decide if this result be matched with filter results
if(istate.filter_ids_length != 0) {
if(istate.filter_ids_index >= istate.filter_ids_length) {
return false;
}
// Returns iterator to the first element that is >= to value or last if no such element is found.
size_t found_index = std::lower_bound(istate.filter_ids + istate.filter_ids_index,
istate.filter_ids + istate.filter_ids_length, id) - istate.filter_ids;
if(found_index == istate.filter_ids_length) {
// all elements are lesser than lowest value (id), so we can stop looking
istate.filter_ids_index = found_index + 1;
return false;
} else {
if(istate.filter_ids[found_index] == id) {
filter_result.seq_id = id;
istate.filter_ids_index = found_index + 1;
return true;
}
istate.filter_ids_index = found_index;
}
return false;
}
if (istate.fit != nullptr && istate.fit->approx_filter_ids_length > 0) {
if (istate.fit->is_valid(id) == 1) {
filter_result.seq_id = id;
filter_result.reference_filter_results = std::move(istate.fit->reference);
istate.fit->next();
return true;
}
return false;
}
filter_result.seq_id = id;
return true;
}
or_iterator_t::or_iterator_t(std::vector<posting_list_t::iterator_t>& its): its(std::move(its)) {
curr_index = 0;
for(size_t i = 1; i < this->its.size(); i++) {
if(this->its[i].id() < this->its[curr_index].id()) {
curr_index = i;
}
}
}
or_iterator_t::or_iterator_t(or_iterator_t&& rhs) noexcept {
its = std::move(rhs.its);
curr_index = rhs.curr_index;
}
or_iterator_t& or_iterator_t::operator=(or_iterator_t&& rhs) noexcept {
its = std::move(rhs.its);
curr_index = rhs.curr_index;
return *this;
}
const std::vector<posting_list_t::iterator_t>& or_iterator_t::get_its() const {
return its;
}
or_iterator_t::~or_iterator_t() noexcept {
for(auto& it: its) {
it.reset_cache();
}
}
bool or_iterator_t::contains_atleast_one(std::vector<or_iterator_t>& its, result_iter_state_t&& istate) {
size_t it_size = its.size();
bool is_excluded;
switch (its.size()) {
case 0:
break;
case 1:
if(istate.is_filter_provided() && istate.is_filter_valid()) {
its[0].skip_to(istate.get_filter_id());
}
while(its.size() == it_size && its[0].valid()) {
auto id = its[0].id();
if(take_id(istate, id, is_excluded)) {
return true;
}
if(istate.is_filter_provided() && !is_excluded) {
if(istate.is_filter_valid()) {
// skip iterator till next id available in filter
its[0].skip_to(istate.get_filter_id());
} else {
break;
}
} else {
its[0].next();
}
}
break;
case 2:
if(istate.is_filter_provided() && istate.is_filter_valid()) {
its[0].skip_to(istate.get_filter_id());
its[1].skip_to(istate.get_filter_id());
}
while(its.size() == it_size && !at_end2(its)) {
if(equals2(its)) {
auto id = its[0].id();
if(take_id(istate, id, is_excluded)) {
return true;
}
if(istate.is_filter_provided() != 0 && !is_excluded) {
if(istate.is_filter_valid()) {
// skip iterator till next id available in filter
its[0].skip_to(istate.get_filter_id());
its[1].skip_to(istate.get_filter_id());
} else {
break;
}
} else {
advance_all2(its);
}
} else {
advance_non_largest2(its);
}
}
break;
default:
if(istate.is_filter_provided() && istate.is_filter_valid()) {
for(auto& it: its) {
it.skip_to(istate.get_filter_id());
}
}
while(its.size() == it_size && !at_end(its)) {
if(equals(its)) {
auto id = its[0].id();
if(take_id(istate, id, is_excluded)) {
return true;
}
if(istate.is_filter_provided() && !is_excluded) {
if(istate.is_filter_valid()) {
// skip iterator till next id available in filter
for(auto& it: its) {
it.skip_to(istate.get_filter_id());
}
} else {
break;
}
} else {
advance_all(its);
}
} else {
advance_non_largest(its);
}
}
}
return false;
}
| 11,829
|
C++
|
.cpp
| 335
| 24.376119
| 116
| 0.494227
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
3,772
|
text_embedder_remote.cpp
|
typesense_typesense/src/text_embedder_remote.cpp
|
#include <http_proxy.h>
#include "text_embedder_remote.h"
#include "embedder_manager.h"
Option<bool> RemoteEmbedder::validate_string_properties(const nlohmann::json& model_config, const std::vector<std::string>& properties) {
for(auto& property : properties) {
if(model_config.count(property) == 0 || !model_config[property].is_string()) {
return Option<bool>(400, "Property `embed.model_config." + property + " is missing or is not a string.");
}
}
return Option<bool>(true);
}
long RemoteEmbedder::call_remote_api(const std::string& method, const std::string& url, const std::string& req_body,
std::string& res_body,
std::map<std::string, std::string>& res_headers,
std::unordered_map<std::string, std::string>& req_headers) {
if(raft_server == nullptr || raft_server->get_leader_url().empty()) {
// call proxy's internal send() directly
if(method == "GET" || method == "POST") {
auto proxy_res = HttpProxy::get_instance().send(url, method, req_body, req_headers);
res_body = std::move(proxy_res.body);
res_headers = std::move(proxy_res.headers);
return proxy_res.status_code;
} else {
return 400;
}
}
auto proxy_url = raft_server->get_leader_url() + "proxy";
nlohmann::json proxy_req_body;
proxy_req_body["method"] = method;
proxy_req_body["url"] = url;
proxy_req_body["body"] = req_body;
proxy_req_body["headers"] = req_headers;
size_t per_call_timeout_ms = HttpProxy::default_timeout_ms;
size_t num_try = HttpProxy::default_num_try;
if(req_headers.find("timeout_ms") != req_headers.end()){
per_call_timeout_ms = std::stoul(req_headers.at("timeout_ms"));
}
if(req_headers.find("num_try") != req_headers.end()){
num_try = std::stoul(req_headers.at("num_try"));
}
size_t proxy_call_timeout_ms = (per_call_timeout_ms * num_try) + 1000;
return HttpClient::get_instance().post_response(proxy_url, proxy_req_body.dump(), res_body, res_headers, {},
proxy_call_timeout_ms, true);
}
const std::string RemoteEmbedder::get_model_key(const nlohmann::json& model_config) {
const std::string model_namespace = EmbedderManager::get_model_namespace(model_config["model_name"].get<std::string>());
if(model_namespace == "openai") {
return OpenAIEmbedder::get_model_key(model_config);
} else if(model_namespace == "google") {
return GoogleEmbedder::get_model_key(model_config);
} else if(model_namespace == "gcp") {
return GCPEmbedder::get_model_key(model_config);
} else {
return "";
}
}
OpenAIEmbedder::OpenAIEmbedder(const std::string& openai_model_path, const std::string& api_key, const size_t num_dims, const bool has_custom_dims, const std::string& openai_url) : api_key(api_key), openai_model_path(openai_model_path),
num_dims(num_dims), has_custom_dims(has_custom_dims){
if(openai_url.empty()) {
this->openai_url = "https://api.openai.com";
} else {
this->openai_url = openai_url;
}
}
Option<bool> OpenAIEmbedder::is_model_valid(const nlohmann::json& model_config, size_t& num_dims) {
auto validate_properties = validate_string_properties(model_config, {"model_name", "api_key"});
if (!validate_properties.ok()) {
return validate_properties;
}
const std::string openai_url = model_config.count("url") > 0 ? model_config["url"].get<std::string>() : "https://api.openai.com";
auto model_name = model_config["model_name"].get<std::string>();
auto api_key = model_config["api_key"].get<std::string>();
if(EmbedderManager::get_model_namespace(model_name) != "openai") {
return Option<bool>(400, "Property `embed.model_config.model_name` malformed.");
}
std::unordered_map<std::string, std::string> headers;
std::map<std::string, std::string> res_headers;
headers["Authorization"] = "Bearer " + api_key;
std::string res;
nlohmann::json req_body;
req_body["input"] = "typesense";
// remove "openai/" prefix
auto model_name_without_namespace = EmbedderManager::get_model_name_without_namespace(model_name);
req_body["model"] = model_name_without_namespace;
if(num_dims > 0) {
req_body["dimensions"] = num_dims;
}
std::string embedding_res;
headers["Content-Type"] = "application/json";
auto res_code = call_remote_api("POST", get_openai_create_embedding_url(openai_url), req_body.dump(), embedding_res, res_headers, headers);
if(res_code == 408) {
return Option<bool>(408, "OpenAI API timeout.");
}
if (res_code != 200) {
nlohmann::json json_res;
try {
json_res = nlohmann::json::parse(embedding_res);
} catch (const std::exception& e) {
return Option<bool>(400, "OpenAI API error: " + embedding_res);
}
if(json_res.count("error") == 0 || json_res["error"].count("message") == 0) {
return Option<bool>(400, "OpenAI API error: " + embedding_res);
}
return Option<bool>(400, "OpenAI API error: " + json_res["error"]["message"].get<std::string>());
}
std::vector<float> embedding;
try {
embedding = nlohmann::json::parse(embedding_res)["data"][0]["embedding"].get<std::vector<float>>();
} catch (const std::exception& e) {
return Option<bool>(400, "Got malformed response from OpenAI API.");
}
num_dims = embedding.size();
return Option<bool>(true);
}
embedding_res_t OpenAIEmbedder::Embed(const std::string& text, const size_t remote_embedder_timeout_ms, const size_t remote_embedding_num_tries) {
std::shared_lock<std::shared_mutex> lock(mutex);
std::unordered_map<std::string, std::string> headers;
std::map<std::string, std::string> res_headers;
headers["Authorization"] = "Bearer " + api_key;
headers["Content-Type"] = "application/json";
headers["timeout_ms"] = std::to_string(remote_embedder_timeout_ms);
headers["num_try"] = std::to_string(remote_embedding_num_tries);
std::string res;
nlohmann::json req_body;
req_body["input"] = std::vector<std::string>{text};
if(has_custom_dims) {
req_body["dimensions"] = num_dims;
}
// remove "openai/" prefix
req_body["model"] = EmbedderManager::get_model_name_without_namespace(openai_model_path);
auto res_code = call_remote_api("POST", get_openai_create_embedding_url(openai_url), req_body.dump(), res, res_headers, headers);
if (res_code != 200) {
return embedding_res_t(res_code, get_error_json(req_body, res_code, res));
}
try {
embedding_res_t embedding_res = embedding_res_t(nlohmann::json::parse(res)["data"][0]["embedding"].get<std::vector<float>>());
return embedding_res;
} catch (const std::exception& e) {
return embedding_res_t(500, get_error_json(req_body, res_code, res));
}
}
std::vector<embedding_res_t> OpenAIEmbedder::batch_embed(const std::vector<std::string>& inputs, const size_t remote_embedding_batch_size,
const size_t remote_embedding_timeout_ms, const size_t remote_embedding_num_tries) {
// call recursively if inputs larger than remote_embedding_batch_size
if(inputs.size() > remote_embedding_batch_size) {
std::vector<embedding_res_t> outputs;
for(size_t i = 0; i < inputs.size(); i += remote_embedding_batch_size) {
auto batch = std::vector<std::string>(inputs.begin() + i, inputs.begin() + std::min(i + remote_embedding_batch_size, inputs.size()));
auto batch_outputs = batch_embed(batch, remote_embedding_batch_size, remote_embedding_timeout_ms, remote_embedding_num_tries);
outputs.insert(outputs.end(), batch_outputs.begin(), batch_outputs.end());
}
return outputs;
}
nlohmann::json req_body;
req_body["input"] = inputs;
if(has_custom_dims) {
req_body["dimensions"] = num_dims;
}
// remove "openai/" prefix
req_body["model"] = openai_model_path.substr(7);
std::unordered_map<std::string, std::string> headers;
headers["Authorization"] = "Bearer " + api_key;
headers["Content-Type"] = "application/json";
headers["timeout_ms"] = std::to_string(remote_embedding_timeout_ms);
headers["num_try"] = std::to_string(remote_embedding_num_tries);
std::map<std::string, std::string> res_headers;
std::string res;
auto res_code = call_remote_api("POST", get_openai_create_embedding_url(openai_url), req_body.dump(), res, res_headers, headers);
if(res_code != 200) {
std::vector<embedding_res_t> outputs;
nlohmann::json embedding_res = get_error_json(req_body, res_code, res);
for(size_t i = 0; i < inputs.size(); i++) {
embedding_res["request"]["body"]["input"][0] = inputs[i];
outputs.push_back(embedding_res_t(res_code, embedding_res));
}
return outputs;
}
nlohmann::json res_json;
try {
res_json = nlohmann::json::parse(res);
} catch (const std::exception& e) {
nlohmann::json embedding_res = get_error_json(req_body, res_code, res);
std::vector<embedding_res_t> outputs;
for(size_t i = 0; i < inputs.size(); i++) {
embedding_res["request"]["body"]["input"][0] = inputs[i];
outputs.push_back(embedding_res_t(500, embedding_res));
}
return outputs;
}
if(res_json.count("data") == 0 || !res_json["data"].is_array() || res_json["data"].size() != inputs.size()) {
std::vector<embedding_res_t> outputs;
for(size_t i = 0; i < inputs.size(); i++) {
outputs.push_back(embedding_res_t(500, "Got malformed response from OpenAI API."));
}
return outputs;
}
std::vector<embedding_res_t> outputs;
for(auto& data : res_json["data"]) {
if(data.count("embedding") == 0 || !data["embedding"].is_array() || data["embedding"].size() == 0) {
outputs.push_back(embedding_res_t(500, "Got malformed response from OpenAI API."));
continue;
}
outputs.push_back(embedding_res_t(data["embedding"].get<std::vector<float>>()));
}
return outputs;
}
nlohmann::json OpenAIEmbedder::get_error_json(const nlohmann::json& req_body, long res_code, const std::string& res_body) {
nlohmann::json json_res;
try {
json_res = nlohmann::json::parse(res_body);
} catch (const std::exception& e) {
json_res = nlohmann::json::object();
json_res["error"] = "Malformed response from OpenAI API.";
}
nlohmann::json embedding_res = nlohmann::json::object();
embedding_res["response"] = json_res;
embedding_res["request"] = nlohmann::json::object();
embedding_res["request"]["url"] = get_openai_create_embedding_url(openai_url);
embedding_res["request"]["method"] = "POST";
embedding_res["request"]["body"] = req_body;
if(embedding_res["request"]["body"].count("input") > 0 && embedding_res["request"]["body"]["input"].get<std::vector<std::string>>().size() > 1) {
auto vec = embedding_res["request"]["body"]["input"].get<std::vector<std::string>>();
vec.resize(1);
embedding_res["request"]["body"]["input"] = vec;
}
if(json_res.count("error") != 0 && json_res["error"].count("message") != 0) {
embedding_res["error"] = "OpenAI API error: " + json_res["error"]["message"].get<std::string>();
}
if(res_code == 408) {
embedding_res["error"] = "OpenAI API timeout.";
}
return embedding_res;
}
std::string OpenAIEmbedder::get_model_key(const nlohmann::json& model_config) {
return model_config["model_name"].get<std::string>() + ":" + model_config["api_key"].get<std::string>();
}
GoogleEmbedder::GoogleEmbedder(const std::string& google_api_key) : google_api_key(google_api_key) {
}
Option<bool> GoogleEmbedder::is_model_valid(const nlohmann::json& model_config, size_t& num_dims) {
auto validate_properties = validate_string_properties(model_config, {"model_name", "api_key"});
if (!validate_properties.ok()) {
return validate_properties;
}
auto model_name = model_config["model_name"].get<std::string>();
auto api_key = model_config["api_key"].get<std::string>();
if(EmbedderManager::get_model_namespace(model_name) != "google") {
return Option<bool>(400, "Property `embed.model_config.model_name` malformed.");
}
if(EmbedderManager::get_model_name_without_namespace(model_name) != std::string(SUPPORTED_MODEL)) {
return Option<bool>(400, "Property `embed.model_config.model_name` is not a supported Google model.");
}
std::unordered_map<std::string, std::string> headers;
std::map<std::string, std::string> res_headers;
headers["Content-Type"] = "application/json";
std::string res;
nlohmann::json req_body;
req_body["text"] = "test";
auto res_code = call_remote_api("POST", std::string(GOOGLE_CREATE_EMBEDDING) + api_key, req_body.dump(), res, res_headers, headers);
if(res_code != 200) {
nlohmann::json json_res;
try {
json_res = nlohmann::json::parse(res);
} catch (const std::exception& e) {
json_res = nlohmann::json::object();
json_res["error"] = "Malformed response from Google API.";
}
if(res_code == 408) {
return Option<bool>(408, "Google API timeout.");
}
if(json_res.count("error") == 0 || json_res["error"].count("message") == 0) {
return Option<bool>(400, "Google API error: " + res);
}
return Option<bool>(400, "Google API error: " + json_res["error"]["message"].get<std::string>());
}
try {
num_dims = nlohmann::json::parse(res)["embedding"]["value"].get<std::vector<float>>().size();
} catch (const std::exception& e) {
return Option<bool>(500, "Got malformed response from Google API.");
}
return Option<bool>(true);
}
embedding_res_t GoogleEmbedder::Embed(const std::string& text, const size_t remote_embedder_timeout_ms, const size_t remote_embedding_num_tries) {
std::shared_lock<std::shared_mutex> lock(mutex);
std::unordered_map<std::string, std::string> headers;
std::map<std::string, std::string> res_headers;
headers["Content-Type"] = "application/json";
headers["timeout_ms"] = std::to_string(remote_embedder_timeout_ms);
headers["num_try"] = std::to_string(remote_embedding_num_tries);
std::string res;
nlohmann::json req_body;
req_body["text"] = text;
auto res_code = call_remote_api("POST", std::string(GOOGLE_CREATE_EMBEDDING) + google_api_key, req_body.dump(), res, res_headers, headers);
if(res_code != 200) {
return embedding_res_t(res_code, get_error_json(req_body, res_code, res));
}
try {
return embedding_res_t(nlohmann::json::parse(res)["embedding"]["value"].get<std::vector<float>>());
} catch (const std::exception& e) {
return embedding_res_t(500, get_error_json(req_body, res_code, res));
}
}
std::vector<embedding_res_t> GoogleEmbedder::batch_embed(const std::vector<std::string>& inputs, const size_t remote_embedding_batch_size,
const size_t remote_embedding_timeout_ms, const size_t remote_embedding_num_tries) {
std::vector<embedding_res_t> outputs;
bool timeout_prev = false;
for(auto& input : inputs) {
auto res = Embed(input, remote_embedding_timeout_ms, remote_embedding_num_tries);
if(res.status_code == 408) {
if(timeout_prev) {
// fail whole batch if two consecutive timeouts,
nlohmann::json req_body;
req_body["text"] = input;
return std::vector<embedding_res_t>(inputs.size(), embedding_res_t(408, get_error_json(req_body, 408, "")));
}
timeout_prev = true;
}
timeout_prev = false;
outputs.push_back(res);
}
return outputs;
}
nlohmann::json GoogleEmbedder::get_error_json(const nlohmann::json& req_body, long res_code, const std::string& res_body) {
nlohmann::json json_res;
try {
nlohmann::json json_res = nlohmann::json::parse(res_body);
} catch (const std::exception& e) {
json_res = nlohmann::json::object();
json_res["error"] = "Malformed response from Google API.";
}
nlohmann::json embedding_res = nlohmann::json::object();
embedding_res["response"] = json_res;
embedding_res["request"] = nlohmann::json::object();
embedding_res["request"]["url"] = GOOGLE_CREATE_EMBEDDING;
embedding_res["request"]["method"] = "POST";
embedding_res["request"]["body"] = req_body;
if(json_res.count("error") != 0 && json_res["error"].count("message") != 0) {
embedding_res["error"] = "Google API error: " + json_res["error"]["message"].get<std::string>();
}
if(res_code == 408) {
embedding_res["error"] = "Google API timeout.";
}
return embedding_res;
}
std::string GoogleEmbedder::get_model_key(const nlohmann::json& model_config) {
return model_config["model_name"].get<std::string>() + ":" + model_config["api_key"].get<std::string>();
}
GCPEmbedder::GCPEmbedder(const std::string& project_id, const std::string& model_name, const std::string& access_token,
const std::string& refresh_token, const std::string& client_id, const std::string& client_secret) :
project_id(project_id), access_token(access_token), refresh_token(refresh_token), client_id(client_id), client_secret(client_secret) {
this->model_name = EmbedderManager::get_model_name_without_namespace(model_name);
}
Option<bool> GCPEmbedder::is_model_valid(const nlohmann::json& model_config, size_t& num_dims) {
auto validate_properties = validate_string_properties(model_config, {"model_name", "project_id", "access_token", "refresh_token", "client_id", "client_secret"});
if (!validate_properties.ok()) {
return validate_properties;
}
auto model_name = model_config["model_name"].get<std::string>();
auto project_id = model_config["project_id"].get<std::string>();
auto access_token = model_config["access_token"].get<std::string>();
auto refresh_token = model_config["refresh_token"].get<std::string>();
auto client_id = model_config["client_id"].get<std::string>();
auto client_secret = model_config["client_secret"].get<std::string>();
if(EmbedderManager::get_model_namespace(model_name) != "gcp") {
return Option<bool>(400, "Invalid GCP model name");
}
auto model_name_without_namespace = EmbedderManager::get_model_name_without_namespace(model_name);
std::unordered_map<std::string, std::string> headers;
std::map<std::string, std::string> res_headers;
headers["Content-Type"] = "application/json";
headers["Authorization"] = "Bearer " + access_token;
std::string res;
nlohmann::json req_body;
req_body["instances"] = nlohmann::json::array();
nlohmann::json instance;
instance["content"] = "typesense";
req_body["instances"].push_back(instance);
auto res_code = call_remote_api("POST", get_gcp_embedding_url(project_id, model_name_without_namespace), req_body.dump(), res, res_headers, headers);
if(res_code != 200) {
nlohmann::json json_res;
try {
json_res = nlohmann::json::parse(res);
} catch (const std::exception& e) {
return Option<bool>(400, "Got malformed response from GCP API.");
}
if(json_res == 408) {
return Option<bool>(408, "GCP API timeout.");
}
if(json_res.count("error") == 0 || json_res["error"].count("message") == 0) {
return Option<bool>(400, "GCP API error: " + res);
}
return Option<bool>(400, "GCP API error: " + json_res["error"]["message"].get<std::string>());
}
nlohmann::json res_json;
try {
res_json = nlohmann::json::parse(res);
} catch (const std::exception& e) {
return Option<bool>(400, "Got malformed response from GCP API.");
}
if(res_json.count("predictions") == 0 || res_json["predictions"].size() == 0 || res_json["predictions"][0].count("embeddings") == 0) {
LOG(INFO) << "Invalid response from GCP API: " << res_json.dump();
return Option<bool>(400, "GCP API error: Invalid response");
}
auto generate_access_token_res = generate_access_token(refresh_token, client_id, client_secret);
if(!generate_access_token_res.ok()) {
return Option<bool>(400, "Invalid client_id, client_secret or refresh_token in `embed.model config'.");
}
num_dims = res_json["predictions"][0]["embeddings"]["values"].size();
return Option<bool>(true);
}
embedding_res_t GCPEmbedder::Embed(const std::string& text, const size_t remote_embedder_timeout_ms, const size_t remote_embedding_num_tries) {
std::shared_lock<std::shared_mutex> lock(mutex);
nlohmann::json req_body;
req_body["instances"] = nlohmann::json::array();
nlohmann::json instance;
instance["content"] = text;
req_body["instances"].push_back(instance);
std::unordered_map<std::string, std::string> headers;
headers["Authorization"] = "Bearer " + access_token;
headers["Content-Type"] = "application/json";
headers["timeout_ms"] = std::to_string(remote_embedder_timeout_ms);
headers["num_try"] = std::to_string(remote_embedding_num_tries);
std::map<std::string, std::string> res_headers;
std::string res;
auto res_code = call_remote_api("POST", get_gcp_embedding_url(project_id, model_name), req_body.dump(), res, res_headers, headers);
if(res_code != 200) {
if(res_code == 401) {
auto refresh_op = generate_access_token(refresh_token, client_id, client_secret);
if(!refresh_op.ok()) {
nlohmann::json embedding_res = nlohmann::json::object();
embedding_res["error"] = refresh_op.error();
return embedding_res_t(refresh_op.code(), embedding_res);
}
access_token = refresh_op.get();
// retry
headers["Authorization"] = "Bearer " + access_token;
res_code = call_remote_api("POST", get_gcp_embedding_url(project_id, model_name), req_body.dump(), res, res_headers, headers);
}
}
if(res_code != 200) {
return embedding_res_t(res_code, get_error_json(req_body, res_code, res));
}
nlohmann::json res_json;
try {
res_json = nlohmann::json::parse(res);
} catch (const std::exception& e) {
return embedding_res_t(500, get_error_json(req_body, res_code, res));
}
return embedding_res_t(res_json["predictions"][0]["embeddings"]["values"].get<std::vector<float>>());
}
std::vector<embedding_res_t> GCPEmbedder::batch_embed(const std::vector<std::string>& inputs, const size_t remote_embedding_batch_size,
const size_t remote_embedding_timeout_ms, const size_t remote_embedding_num_tries) {
// GCP API has a limit of 5 instances per request
if(inputs.size() > 5) {
std::vector<embedding_res_t> res;
for(size_t i = 0; i < inputs.size(); i += 5) {
auto batch_res = batch_embed(std::vector<std::string>(inputs.begin() + i, inputs.begin() + std::min(i + 5, inputs.size())));
res.insert(res.end(), batch_res.begin(), batch_res.end());
}
return res;
}
nlohmann::json req_body;
req_body["instances"] = nlohmann::json::array();
for(const auto& input : inputs) {
nlohmann::json instance;
instance["content"] = input;
req_body["instances"].push_back(instance);
}
std::unordered_map<std::string, std::string> headers;
headers["Authorization"] = "Bearer " + access_token;
headers["Content-Type"] = "application/json";
headers["timeout_ms"] = std::to_string(remote_embedding_timeout_ms);
headers["num_try"] = std::to_string(remote_embedding_num_tries);
std::map<std::string, std::string> res_headers;
std::string res;
auto res_code = call_remote_api("POST", get_gcp_embedding_url(project_id, model_name), req_body.dump(), res, res_headers, headers);
if(res_code != 200) {
if(res_code == 401) {
auto refresh_op = generate_access_token(refresh_token, client_id, client_secret);
if(!refresh_op.ok()) {
nlohmann::json embedding_res = nlohmann::json::object();
embedding_res["error"] = refresh_op.error();
std::vector<embedding_res_t> outputs;
for(size_t i = 0; i < inputs.size(); i++) {
outputs.push_back(embedding_res_t(refresh_op.code(), embedding_res));
}
return outputs;
}
access_token = refresh_op.get();
// retry
headers["Authorization"] = "Bearer " + access_token;
res_code = call_remote_api("POST", get_gcp_embedding_url(project_id, model_name), req_body.dump(), res, res_headers, headers);
}
}
if(res_code != 200) {
auto embedding_res = get_error_json(req_body, res_code, res);
std::vector<embedding_res_t> outputs;
for(size_t i = 0; i < inputs.size(); i++) {
outputs.push_back(embedding_res_t(res_code, embedding_res));
}
return outputs;
}
nlohmann::json res_json;
try {
res_json = nlohmann::json::parse(res);
} catch (const std::exception& e) {
nlohmann::json embedding_res = get_error_json(req_body, res_code, res);
std::vector<embedding_res_t> outputs;
for(size_t i = 0; i < inputs.size(); i++) {
outputs.push_back(embedding_res_t(400, embedding_res));
}
return outputs;
}
std::vector<embedding_res_t> outputs;
if(res_json.count("predictions") == 0 || !res_json["predictions"].is_array() || res_json["predictions"].size() != inputs.size()) {
std::vector<embedding_res_t> outputs;
for(size_t i = 0; i < inputs.size(); i++) {
outputs.push_back(embedding_res_t(500, "Got malformed response from GCP API."));
}
return outputs;
}
for(const auto& prediction : res_json["predictions"]) {
if(prediction.count("embeddings") == 0 || !prediction["embeddings"].is_object() || prediction["embeddings"].count("values") == 0 || !prediction["embeddings"]["values"].is_array() || prediction["embeddings"]["values"].size() == 0) {
outputs.push_back(embedding_res_t(500, "Got malformed response from GCP API."));
continue;
}
outputs.push_back(embedding_res_t(prediction["embeddings"]["values"].get<std::vector<float>>()));
}
return outputs;
}
nlohmann::json GCPEmbedder::get_error_json(const nlohmann::json& req_body, long res_code, const std::string& res_body) {
nlohmann::json json_res;
try {
json_res = nlohmann::json::parse(res_body);
} catch (const std::exception& e) {
json_res = nlohmann::json::object();
json_res["error"] = "Malformed response from GCP API.";
}
nlohmann::json embedding_res = nlohmann::json::object();
embedding_res["response"] = json_res;
embedding_res["request"] = nlohmann::json::object();
embedding_res["request"]["url"] = get_gcp_embedding_url(project_id, model_name);
embedding_res["request"]["method"] = "POST";
embedding_res["request"]["body"] = req_body;
if(json_res.count("error") != 0 && json_res["error"].count("message") != 0) {
embedding_res["error"] = "GCP API error: " + json_res["error"]["message"].get<std::string>();
} else {
embedding_res["error"] = "Malformed response from GCP API.";
}
if(res_code == 408) {
embedding_res["error"] = "GCP API timeout.";
}
return embedding_res;
}
Option<std::string> GCPEmbedder::generate_access_token(const std::string& refresh_token, const std::string& client_id, const std::string& client_secret) {
std::unordered_map<std::string, std::string> headers;
headers["Content-Type"] = "application/x-www-form-urlencoded";
std::map<std::string, std::string> res_headers;
std::string res;
std::string req_body;
req_body = "grant_type=refresh_token&client_id=" + client_id + "&client_secret=" + client_secret + "&refresh_token=" + refresh_token;
auto res_code = call_remote_api("POST", GCP_AUTH_TOKEN_URL, req_body, res, res_headers, headers);
if(res_code != 200) {
nlohmann::json json_res;
try {
json_res = nlohmann::json::parse(res);
} catch (const std::exception& e) {
return Option<std::string>(400, "Got malformed response from GCP API.");
}
if(json_res.count("error") == 0 || json_res["error"].count("message") == 0) {
return Option<std::string>(400, "GCP API error: " + res);
}
if(res_code == 408) {
return Option<std::string>(408, "GCP API timeout.");
}
return Option<std::string>(400, "GCP API error: " + json_res["error"]["message"].get<std::string>());
}
nlohmann::json res_json;
try {
res_json = nlohmann::json::parse(res);
} catch (const std::exception& e) {
return Option<std::string>(400, "Got malformed response from GCP API.");
}
std::string access_token = res_json["access_token"].get<std::string>();
return Option<std::string>(access_token);
}
std::string GCPEmbedder::get_model_key(const nlohmann::json& model_config) {
return model_config["model_name"].get<std::string>() + ":" + model_config["project_id"].get<std::string>() + ":" + model_config["client_secret"].get<std::string>();
}
| 30,291
|
C++
|
.cpp
| 588
| 43.644558
| 239
| 0.622346
|
typesense/typesense
| 20,571
| 633
| 548
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.