按语义相似性搜索文档
具有批处理能力的语义搜索端点
Initiate a search operation with a query text of up to 400 words and receive the most semantically similar responses from the stored knowledge. For question-answering, convert your question into an ideal answer and submit it to receive similar real answers.
Up to 400 words sentence for which you wish to find semantically similar chunks of knowledge.
Number of semantically similar chunks of text to return. Use 'n=3' for up to 5, and 'n=10' for more information. If you do not receive enough information, consider trying again with a larger 'n' value.
Start of the time range for documents to be searched, in ISO 8601 format.
End of the time range for documents to be searched, in ISO 8601 format.
Successful retrieval of documents
Bad request
Unauthorized
Not found
Internal server error
GET /api/v1/documents/search/ HTTP/1.1
Host: api.rememberizer.ai
Accept: */*
{
"data_sources": [
{
"name": "text",
"documents": 1
}
],
"matched_chunks": [
{
"document": {
"id": 18,
"document_id": "text",
"name": "text",
"type": "text",
"path": "text",
"url": "text",
"size": 1,
"created_time": "2025-11-15T16:03:28.998Z",
"modified_time": "2025-11-15T16:03:28.998Z",
"indexed_on": "2025-11-15T16:03:28.998Z",
"integration": {
"id": 1,
"integration_type": "text"
}
},
"matched_content": "text",
"distance": 1
}
]
}示例请求
curl -X GET \
"https://api.rememberizer.ai/api/v1/documents/search/?q=如何将Rememberizer与自定义应用程序集成&n=5&from=2023-01-01T00:00:00Z&to=2023-12-31T23:59:59Z" \
-H "Authorization: Bearer YOUR_JWT_TOKEN"const searchDocuments = async (query, numResults = 5, from = null, to = null) => {
const url = new URL('https://api.rememberizer.ai/api/v1/documents/search/');
url.searchParams.append('q', query);
url.searchParams.append('n', numResults);
if (from) {
url.searchParams.append('from', from);
}
if (to) {
url.searchParams.append('to', to);
}
const response = await fetch(url.toString(), {
method: 'GET',
headers: {
'Authorization': 'Bearer YOUR_JWT_TOKEN'
}
});
const data = await response.json();
console.log(data);
};
searchDocuments('如何将Rememberizer与自定义应用程序集成', 5);import requests
def search_documents(query, num_results=5, from_date=None, to_date=None):
headers = {
"Authorization": "Bearer YOUR_JWT_TOKEN"
}
params = {
"q": query,
"n": num_results
}
if from_date:
params["from"] = from_date
if to_date:
params["to"] = to_date
response = requests.get(
"https://api.rememberizer.ai/api/v1/documents/search/",
headers=headers,
params=params
)
data = response.json()
print(data)
search_documents("如何将Rememberizer与自定义应用程序集成", 5)require 'net/http'
require 'uri'
require 'json'
def search_documents(query, num_results=5, from_date=nil, to_date=nil)
uri = URI('https://api.rememberizer.ai/api/v1/documents/search/')
params = {
q: query,
n: num_results
}
params[:from] = from_date if from_date
params[:to] = to_date if to_date
uri.query = URI.encode_www_form(params)
request = Net::HTTP::Get.new(uri)
request['Authorization'] = 'Bearer YOUR_JWT_TOKEN'
http = Net::HTTP.new(uri.host, uri.port)
http.use_ssl = true
response = http.request(request)
data = JSON.parse(response.body)
puts data
end
search_documents("如何将Rememberizer与自定义应用程序集成", 5)查询参数
q
字符串
必填。 搜索查询文本(最多 400 个单词)。
n
整数
要返回的结果数量。默认值:3。使用更高的值(例如,10)以获取更全面的结果。
from
字符串
要搜索的文档的时间范围开始,采用 ISO 8601 格式。
to
字符串
要搜索的文档的时间范围结束,采用 ISO 8601 格式。
prev_chunks
整数
包含的前面块的数量以提供上下文。默认值:2。
next_chunks
整数
包含的后面块的数量以提供上下文。默认值:2。
响应格式
{
"data_sources": [
{
"name": "Google Drive",
"documents": 3
},
{
"name": "Slack",
"documents": 2
}
],
"matched_chunks": [
{
"document": {
"id": 12345,
"document_id": "1aBcD2efGhIjK3lMnOpQrStUvWxYz",
"name": "Rememberizer API 文档.pdf",
"type": "application/pdf",
"path": "/Documents/Rememberizer/API 文档.pdf",
"url": "https://drive.google.com/file/d/1aBcD2efGhIjK3lMnOpQrStUvWxYz/view",
"size": 250000,
"created_time": "2023-05-10T14:30:00Z",
"modified_time": "2023-06-15T09:45:00Z",
"indexed_on": "2023-06-15T10:30:00Z",
"integration": {
"id": 101,
"integration_type": "google_drive"
}
},
"matched_content": "要将 Rememberizer 与自定义应用程序集成,您可以使用 OAuth2 认证流程来授权您的应用程序访问用户的 Rememberizer 数据。一旦获得授权,您的应用程序可以使用 Rememberizer API 来搜索文档、检索内容等。",
"distance": 0.123
},
// ... 更多匹配的片段
],
"message": "搜索成功完成",
"code": "success"
}搜索优化技巧
用于问答
在寻找问题的答案时,尝试将查询表述为理想答案。例如:
而不是:“什么是向量嵌入?” 尝试:“向量嵌入是一种将文本转换为高维空间中的数值向量的技术。”
调整结果数量
从
n=3开始,以获得快速、高相关性的结果增加到
n=10或更高,以获取更全面的信息如果搜索返回的信息不足,请尝试增加
n参数
基于时间的过滤
使用 from 和 to 参数专注于特定时间段的文档:
最近的文档:将
from设置为最近的日期历史分析:指定特定的日期范围
排除过时的信息:设置合适的
to日期
批量操作
为了高效处理大量搜索查询,Rememberizer 支持批量操作以优化性能并减少 API 调用开销。
批量搜索
import requests
import time
import json
from concurrent.futures import ThreadPoolExecutor
def batch_search_documents(queries, num_results=5, batch_size=10):
"""
执行多个查询的批量搜索
参数:
queries: 搜索查询字符串列表
num_results: 每个查询返回的结果数量
batch_size: 并行处理的查询数量
返回:
每个查询的搜索结果列表
"""
headers = {
"Authorization": "Bearer YOUR_JWT_TOKEN",
"Content-Type": "application/json"
}
results = []
# 按批次处理查询
for i in range(0, len(queries), batch_size):
batch = queries[i:i+batch_size]
# 创建线程池以并行发送请求
with ThreadPoolExecutor(max_workers=batch_size) as executor:
futures = []
for query in batch:
params = {
"q": query,
"n": num_results
}
future = executor.submit(
requests.get,
"https://api.rememberizer.ai/api/v1/documents/search/",
headers=headers,
params=params
)
futures.append(future)
# 收集完成的结果
for future in futures:
response = future.result()
results.append(response.json())
# 速率限制 - 在批次之间暂停以避免API限流
if i + batch_size < len(queries):
time.sleep(1)
return results
# 示例用法
queries = [
"如何使用 OAuth 与 Rememberizer",
"向量数据库配置选项",
"语义搜索的最佳实践",
# 根据需要添加更多查询
]
results = batch_search_documents(queries, num_results=3, batch_size=5)/**
* 执行多个查询的批量搜索
*
* @param {string[]} queries - 搜索查询字符串列表
* @param {number} numResults - 每个查询返回的结果数量
* @param {number} batchSize - 并行处理的查询数量
* @param {number} delayBetweenBatches - 批次之间等待的毫秒数
* @returns {Promise<Array>} - 每个查询的搜索结果列表
*/
async function batchSearchDocuments(queries, numResults = 5, batchSize = 10, delayBetweenBatches = 1000) {
const results = [];
// 按批处理查询
for (let i = 0; i < queries.length; i += batchSize) {
const batch = queries.slice(i, i + batchSize);
// 为并发请求创建一个 Promise 数组
const batchPromises = batch.map(query => {
const url = new URL('https://api.rememberizer.ai/api/v1/documents/search/');
url.searchParams.append('q', query);
url.searchParams.append('n', numResults);
return fetch(url.toString(), {
method: 'GET',
headers: {
'Authorization': 'Bearer YOUR_JWT_TOKEN'
}
}).then(response => response.json());
});
// 等待批次中的所有请求完成
const batchResults = await Promise.all(batchPromises);
results.push(...batchResults);
// 速率限制 - 在批次之间暂停以避免 API 限流
if (i + batchSize < queries.length) {
await new Promise(resolve => setTimeout(resolve, delayBetweenBatches));
}
}
return results;
}
// 示例用法
const queries = [
"如何使用 OAuth 与 Rememberizer",
"向量数据库配置选项",
"语义搜索的最佳实践",
// 根据需要添加更多查询
];
batchSearchDocuments(queries, 3, 5)
.then(results => console.log(results))
.catch(error => console.error('批量搜索错误:', error));require 'net/http'
require 'uri'
require 'json'
require 'concurrent'
# 批量搜索多个查询
#
# @param queries [Array<String>] 搜索查询字符串列表
# @param num_results [Integer] 每个查询返回的结果数量
# @param batch_size [Integer] 并行处理的查询数量
# @param delay_between_batches [Float] 批次之间等待的秒数
# @return [Array] 每个查询的搜索结果列表
def batch_search_documents(queries, num_results = 5, batch_size = 10, delay_between_batches = 1.0)
results = []
# 分批处理查询
queries.each_slice(batch_size).with_index do |batch, batch_index|
# 创建一个线程池以进行并发请求
pool = Concurrent::FixedThreadPool.new(batch_size)
futures = []
batch.each do |query|
futures << Concurrent::Future.execute(executor: pool) do
uri = URI('https://api.rememberizer.ai/api/v1/documents/search/')
params = {
q: query,
n: num_results
}
uri.query = URI.encode_www_form(params)
request = Net::HTTP::Get.new(uri)
request['Authorization'] = 'Bearer YOUR_JWT_TOKEN'
http = Net::HTTP.new(uri.host, uri.port)
http.use_ssl = true
response = http.request(request)
JSON.parse(response.body)
end
end
# 收集所有线程的结果
batch_results = futures.map(&:value)
results.concat(batch_results)
# 速率限制 - 在批次之间暂停以避免 API 限流
if batch_index < (queries.length / batch_size.to_f).ceil - 1
sleep(delay_between_batches)
end
end
pool.shutdown
results
end
# 示例用法
queries = [
"如何使用 OAuth 与 Rememberizer",
"向量数据库配置选项",
"语义搜索的最佳实践",
# 根据需要添加更多查询
]
results = batch_search_documents(queries, 3, 5)
puts results性能考虑
在实施批量操作时,请考虑以下最佳实践:
最佳批量大小:从5-10个查询的批量大小开始,根据您应用程序的性能特征进行调整。
速率限制:在批次之间包含延迟,以防止API限流。一个好的起点是在批次之间等待1秒。
错误处理:实施强大的错误处理,以管理批次内的失败请求。
资源管理:监控客户端资源使用情况,特别是在大批量大小时,以防止过度内存消耗。
响应处理:尽可能异步处理批量结果,以改善用户体验。
对于高流量应用程序,请考虑实施队列系统,以有效管理大量搜索请求。
此端点提供强大的语义搜索功能,覆盖您整个知识库。它使用向量嵌入根据意义而非精确关键字匹配来查找内容。
Last updated