index
int64 | repo_id
string | file_path
string | content
string |
|---|---|---|---|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/domain/DictionaryItemDocument.java
|
package ai.driftkit.context.spring.domain;
import ai.driftkit.common.domain.DictionaryItem;
import org.springframework.data.annotation.Id;
import org.springframework.data.mongodb.core.mapping.Document;
import org.springframework.data.mongodb.core.index.Indexed;
@Document(collection = "dictionary_items")
public class DictionaryItemDocument extends DictionaryItem {
@Id
@Override
public String getId() {
return super.getId();
}
@Override
public String getGroupId() {
return super.getGroupId();
}
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/repository/PromptRepository.java
|
package ai.driftkit.context.spring.repository;
import ai.driftkit.common.domain.Prompt;
import ai.driftkit.common.domain.Language;
import org.springframework.data.mongodb.repository.MongoRepository;
import org.springframework.stereotype.Repository;
import java.util.List;
import java.util.Optional;
@Repository
public interface PromptRepository extends MongoRepository<Prompt, String> {
List<Prompt> findByMethod(String method);
List<Prompt> findByMethodIsIn(List<String> method);
List<Prompt> findByMethodIsInAndState(List<String> method, Prompt.State state);
List<Prompt> findByState(Prompt.State state);
List<Prompt> findByMethodAndState(String method, Prompt.State state);
Optional<Prompt> findByMethodAndLanguageAndState(String method, Language language, Prompt.State state);
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/repository/SpringDictionaryGroupRepository.java
|
package ai.driftkit.context.spring.repository;
import ai.driftkit.common.domain.DictionaryGroup;
import ai.driftkit.common.domain.Language;
import ai.driftkit.context.core.service.DictionaryGroupRepository;
import ai.driftkit.context.spring.domain.DictionaryGroupDocument;
import org.springframework.data.mongodb.repository.MongoRepository;
import org.springframework.stereotype.Repository;
import java.util.List;
@Repository
public interface SpringDictionaryGroupRepository extends MongoRepository<DictionaryGroupDocument, String>, DictionaryGroupRepository<DictionaryGroupDocument> {
@Override
List<DictionaryGroupDocument> findDictionaryGroupsByLanguage(Language language);
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/repository/SpringDictionaryItemRepository.java
|
package ai.driftkit.context.spring.repository;
import ai.driftkit.common.domain.Language;
import ai.driftkit.context.core.service.DictionaryItemRepository;
import ai.driftkit.context.spring.domain.DictionaryItemDocument;
import org.springframework.data.mongodb.repository.MongoRepository;
import org.springframework.stereotype.Repository;
import java.util.List;
@Repository
public interface SpringDictionaryItemRepository extends MongoRepository<DictionaryItemDocument, String>, DictionaryItemRepository<DictionaryItemDocument> {
@Override
List<DictionaryItemDocument> findDictionaryItemsByLanguage(Language language);
@Override
List<DictionaryItemDocument> findDictionaryItemsByGroupId(String groupId);
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/service/FolderService.java
|
package ai.driftkit.context.spring.service;
import ai.driftkit.context.spring.testsuite.domain.Folder;
import ai.driftkit.context.spring.testsuite.domain.FolderType;
import ai.driftkit.context.spring.testsuite.domain.TestSet;
import ai.driftkit.context.spring.testsuite.repository.FolderRepository;
import ai.driftkit.context.spring.testsuite.repository.TestSetRepository;
import lombok.Data;
import lombok.RequiredArgsConstructor;
import org.springframework.stereotype.Service;
import java.util.List;
import java.util.Optional;
@Service
@RequiredArgsConstructor
public class FolderService {
private final FolderRepository folderRepository;
private final TestSetRepository testSetRepository;
public List<Folder> getAllFolders() {
return folderRepository.findAllByOrderByCreatedAtDesc();
}
public List<Folder> getFoldersByType(FolderType type) {
return folderRepository.findByTypeOrderByCreatedAtDesc(type);
}
public Optional<Folder> getFolderById(String id) {
return folderRepository.findById(id);
}
public Folder createFolder(Folder folder) {
folder.setId(null);
folder.setCreatedAt(System.currentTimeMillis());
return folderRepository.save(folder);
}
public Optional<Folder> updateFolder(String id, Folder folder) {
if (!folderRepository.existsById(id)) {
return Optional.empty();
}
folder.setId(id);
return Optional.of(folderRepository.save(folder));
}
public boolean deleteFolder(String id) {
if (!folderRepository.existsById(id)) {
return false;
}
// Remove folder reference from all test sets in this folder
List<TestSet> testSets = testSetRepository.findAll();
for (TestSet testSet : testSets) {
if (id.equals(testSet.getFolderId())) {
testSet.setFolderId(null);
testSetRepository.save(testSet);
}
}
folderRepository.deleteById(id);
return true;
}
// This method has been moved to TestSetService for better service separation
// Method removed
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/service/MongodbPromptService.java
|
package ai.driftkit.context.spring.service;
import ai.driftkit.common.domain.Prompt;
import ai.driftkit.common.domain.Prompt.State;
import ai.driftkit.common.domain.Language;
import ai.driftkit.context.core.service.PromptServiceBase;
import ai.driftkit.context.spring.config.ApplicationContextProvider;
import ai.driftkit.context.spring.repository.PromptRepository;
import lombok.extern.slf4j.Slf4j;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.UUID;
@Slf4j
public class MongodbPromptService implements PromptServiceBase {
private PromptRepository promptRepository;
@Override
public void configure(Map<String, String> config) {
getPromptRepository();
}
@Override
public boolean supportsName(String name) {
return "mongodb".equals(name);
}
private PromptRepository getPromptRepository() {
if (this.promptRepository == null) {
if (ApplicationContextProvider.getApplicationContext() != null) {
this.promptRepository = ApplicationContextProvider.getApplicationContext().getBean(PromptRepository.class);
} else {
throw new IllegalStateException("ApplicationContext is not initialized yet.");
}
}
return this.promptRepository;
}
@Override
public Optional<Prompt> getPromptById(String id) {
return getPromptRepository().findById(id);
}
@Override
public List<Prompt> getPromptsByIds(List<String> ids) {
return getPromptRepository().findAllById(ids);
}
@Override
public List<Prompt> getPromptsByMethods(List<String> methods) {
return getPromptRepository().findByMethodIsIn(methods);
}
@Override
public List<Prompt> getPromptsByMethodsAndState(List<String> methods, State state) {
return getPromptRepository().findByMethodIsInAndState(methods, state);
}
@Override
public List<Prompt> getPrompts() {
return getPromptRepository().findAll();
}
@Override
public Prompt savePrompt(Prompt prompt) {
Optional<Prompt> currentPromptOpt = getCurrentPrompt(prompt.getMethod(), prompt.getLanguage());
if (currentPromptOpt.isEmpty() || prompt.getId() == null) {
prompt.setId(UUID.randomUUID().toString());
prompt.setCreatedTime(System.currentTimeMillis());
}
if (currentPromptOpt.isPresent()) {
Prompt currentPrompt = currentPromptOpt.get();
if (prompt.getMessage().equals(currentPrompt.getMessage())) {
prompt.setId(currentPrompt.getId());
} else {
currentPrompt.setState(State.REPLACED);
getPromptRepository().save(currentPrompt);
}
}
if (prompt.getState() == null) {
prompt.setState(State.CURRENT);
}
prompt.setUpdatedTime(System.currentTimeMillis());
return getPromptRepository().save(prompt);
}
@Override
public Prompt deletePrompt(String id) {
Optional<Prompt> prompt = getPromptById(id);
getPromptRepository().deleteById(id);
return prompt.orElse(null);
}
@Override
public boolean isConfigured() {
return promptRepository != null;
}
public Optional<Prompt> getCurrentPrompt(String method, Language language) {
return getPromptRepository().findByMethodAndLanguageAndState(method, language, State.CURRENT);
}
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/service/PromptServiceSpringAdapter.java
|
package ai.driftkit.context.spring.service;
import ai.driftkit.config.EtlConfig;
import ai.driftkit.config.EtlConfig.PromptServiceConfig;
import ai.driftkit.common.domain.Prompt;
import ai.driftkit.context.core.service.DictionaryItemService;
import ai.driftkit.context.core.service.PromptService;
import ai.driftkit.context.core.service.PromptServiceBase;
import ai.driftkit.context.core.service.PromptServiceFactory;
import lombok.SneakyThrows;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.context.event.ApplicationReadyEvent;
import org.springframework.context.annotation.Primary;
import org.springframework.context.event.EventListener;
import org.springframework.stereotype.Service;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
@Slf4j
@Service
@Primary
public class PromptServiceSpringAdapter extends PromptService {
@Autowired
private EtlConfig etlConfig;
private volatile boolean initialized = false;
public PromptServiceSpringAdapter(@Autowired DictionaryItemService dictionaryItemService) {
super(new PlaceholderPromptService(), dictionaryItemService);
}
@SneakyThrows
@EventListener(ApplicationReadyEvent.class)
public void init() {
if (!initialized) {
PromptServiceConfig promptServiceConfig = etlConfig.getPromptService();
PromptServiceBase actualPromptService = PromptServiceFactory.fromName(
promptServiceConfig.getName(),
promptServiceConfig.getConfig()
);
this.promptService = actualPromptService;
initialized = true;
}
}
private static class PlaceholderPromptService implements PromptServiceBase {
@Override
public void configure(Map<String, String> config) {}
@Override
public boolean supportsName(String name) { return false; }
@Override
public Optional<Prompt> getPromptById(String id) {
return Optional.empty();
}
@Override
public List<Prompt> getPromptsByIds(List<String> ids) {
return Collections.emptyList();
}
@Override
public List<Prompt> getPromptsByMethods(List<String> methods) {
return Collections.emptyList();
}
@Override
public List<Prompt> getPromptsByMethodsAndState(List<String> methods, Prompt.State state) {
return Collections.emptyList();
}
@Override
public List<Prompt> getPrompts() {
return Collections.emptyList();
}
@Override
public Prompt savePrompt(Prompt prompt) {
return prompt;
}
@Override
public Prompt deletePrompt(String id) {
return null;
}
@Override
public boolean isConfigured() { return false; }
}
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/service/SpringDictionaryGroupService.java
|
package ai.driftkit.context.spring.service;
import ai.driftkit.common.domain.DictionaryGroup;
import ai.driftkit.common.domain.Language;
import ai.driftkit.context.core.service.DictionaryGroupService;
import ai.driftkit.context.spring.domain.DictionaryGroupDocument;
import ai.driftkit.context.spring.repository.SpringDictionaryGroupRepository;
import lombok.RequiredArgsConstructor;
import org.springframework.stereotype.Service;
import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
/**
* Spring implementation of DictionaryGroupService.
* This service acts as an adapter between the core business layer
* and the Spring Data MongoDB repository layer.
*/
@Service
@RequiredArgsConstructor
public class SpringDictionaryGroupService implements DictionaryGroupService {
private final SpringDictionaryGroupRepository repository;
@Override
public Optional<DictionaryGroup> findById(String id) {
return repository.findById(id).map(doc -> (DictionaryGroup) doc);
}
@Override
public List<DictionaryGroup> findByLanguage(Language language) {
return repository.findDictionaryGroupsByLanguage(language)
.stream()
.map(doc -> (DictionaryGroup) doc)
.collect(Collectors.toList());
}
@Override
public DictionaryGroup save(DictionaryGroup group) {
DictionaryGroupDocument document = convertToDocument(group);
DictionaryGroupDocument saved = repository.save(document);
return saved;
}
@Override
public List<DictionaryGroup> saveAll(List<DictionaryGroup> groups) {
List<DictionaryGroupDocument> documents = groups.stream()
.map(this::convertToDocument)
.collect(Collectors.toList());
List<DictionaryGroupDocument> saved = repository.saveAll(documents);
return saved.stream()
.map(doc -> (DictionaryGroup) doc)
.collect(Collectors.toList());
}
@Override
public void deleteById(String id) {
repository.deleteById(id);
}
@Override
public boolean existsById(String id) {
return repository.existsById(id);
}
@Override
public List<DictionaryGroup> findAll() {
return repository.findAll()
.stream()
.map(doc -> (DictionaryGroup) doc)
.collect(Collectors.toList());
}
private DictionaryGroupDocument convertToDocument(DictionaryGroup group) {
if (group instanceof DictionaryGroupDocument) {
return (DictionaryGroupDocument) group;
}
// If it's a plain DictionaryGroup, we need to convert it to a document
DictionaryGroupDocument document = new DictionaryGroupDocument();
document.setId(group.getId());
document.setName(group.getName());
document.setLanguage(group.getLanguage());
document.setCreatedAt(group.getCreatedAt());
document.setUpdatedAt(group.getUpdatedAt());
return document;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/service/SpringDictionaryItemService.java
|
package ai.driftkit.context.spring.service;
import ai.driftkit.common.domain.DictionaryItem;
import ai.driftkit.common.domain.Language;
import ai.driftkit.context.core.service.DictionaryItemService;
import ai.driftkit.context.spring.domain.DictionaryItemDocument;
import ai.driftkit.context.spring.repository.SpringDictionaryItemRepository;
import lombok.RequiredArgsConstructor;
import org.springframework.stereotype.Service;
import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
/**
* Spring implementation of DictionaryItemService.
* This service acts as an adapter between the core business layer
* and the Spring Data MongoDB repository layer.
*/
@Service
@RequiredArgsConstructor
public class SpringDictionaryItemService implements DictionaryItemService {
private final SpringDictionaryItemRepository repository;
@Override
public Optional<DictionaryItem> findById(String id) {
return repository.findById(id).map(doc -> (DictionaryItem) doc);
}
@Override
public List<DictionaryItem> findByLanguage(Language language) {
return repository.findDictionaryItemsByLanguage(language)
.stream()
.map(doc -> (DictionaryItem) doc)
.collect(Collectors.toList());
}
@Override
public List<DictionaryItem> findByGroupId(String groupId) {
return repository.findDictionaryItemsByGroupId(groupId)
.stream()
.map(doc -> (DictionaryItem) doc)
.collect(Collectors.toList());
}
@Override
public DictionaryItem save(DictionaryItem item) {
DictionaryItemDocument document = convertToDocument(item);
DictionaryItemDocument saved = repository.save(document);
return saved;
}
@Override
public List<DictionaryItem> saveAll(List<DictionaryItem> items) {
List<DictionaryItemDocument> documents = items.stream()
.map(this::convertToDocument)
.collect(Collectors.toList());
List<DictionaryItemDocument> saved = repository.saveAll(documents);
return saved.stream()
.map(doc -> (DictionaryItem) doc)
.collect(Collectors.toList());
}
@Override
public void deleteById(String id) {
repository.deleteById(id);
}
@Override
public boolean existsById(String id) {
return repository.existsById(id);
}
@Override
public List<DictionaryItem> findAll() {
return repository.findAll()
.stream()
.map(doc -> (DictionaryItem) doc)
.collect(Collectors.toList());
}
private DictionaryItemDocument convertToDocument(DictionaryItem item) {
if (item instanceof DictionaryItemDocument) {
return (DictionaryItemDocument) item;
}
// If it's a plain DictionaryItem, we need to convert it to a document
DictionaryItemDocument document = new DictionaryItemDocument();
document.setId(item.getId());
document.setGroupId(item.getGroupId());
document.setLanguage(item.getLanguage());
document.setSamples(item.getSamples());
document.setMarkers(item.getMarkers());
document.setCreatedAt(item.getCreatedAt());
document.setUpdatedAt(item.getUpdatedAt());
return document;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite/controller/EvaluationController.java
|
package ai.driftkit.context.spring.testsuite.controller;
import ai.driftkit.common.domain.RestResponse;
import ai.driftkit.context.spring.testsuite.domain.Evaluation;
import ai.driftkit.context.spring.testsuite.domain.EvaluationResult;
import ai.driftkit.context.spring.testsuite.domain.EvaluationRun;
import ai.driftkit.context.spring.testsuite.service.EvaluationService;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.springframework.web.bind.annotation.*;
import java.util.List;
import java.util.Optional;
/**
* Controller for managing evaluations
*/
@Slf4j
@RestController
@RequiredArgsConstructor
@RequestMapping("/data/v1.0/admin")
public class EvaluationController {
private final EvaluationService evaluationService;
/**
* Get all global evaluations (not tied to a specific test set)
*/
@GetMapping("/evaluations/global")
public RestResponse<List<Evaluation>> getGlobalEvaluations() {
List<Evaluation> evaluations = evaluationService.getGlobalEvaluations();
return new RestResponse<>(true, evaluations);
}
/**
* Create a new global evaluation
*/
@PostMapping("/evaluations/global")
public RestResponse<Evaluation> createGlobalEvaluation(@RequestBody Evaluation evaluation) {
try {
log.info("Received global evaluation create request: {}", evaluation);
// Ensure testSetId is null for global evaluations
evaluation.setTestSetId(null);
Evaluation created = evaluationService.createEvaluation(evaluation);
return new RestResponse<>(true, created);
} catch (Exception e) {
log.error("Error creating global evaluation: {}", e.getMessage(), e);
return new RestResponse<>(false, null, "Error creating global evaluation: " + e.getMessage());
}
}
/**
* Add a global evaluation to a test set (creates a copy)
*/
@PostMapping("/test-sets/{testSetId}/evaluations/add/{evaluationId}")
public RestResponse<Evaluation> addEvaluationToTestSet(
@PathVariable String testSetId,
@PathVariable String evaluationId) {
try {
log.info("Adding evaluation {} to test set {}", evaluationId, testSetId);
Evaluation added = evaluationService.copyEvaluation(evaluationId, testSetId);
return new RestResponse<>(true, added);
} catch (Exception e) {
log.error("Error adding evaluation to test set: {}", e.getMessage(), e);
return new RestResponse<>(false, null, "Error adding evaluation: " + e.getMessage());
}
}
/**
* Get all evaluations for a test set
*/
@GetMapping("/test-sets/{testSetId}/evaluations")
public RestResponse<List<Evaluation>> getEvaluationsForTestSet(@PathVariable String testSetId) {
List<Evaluation> evaluations = evaluationService.getEvaluationsForTestSet(testSetId);
return new RestResponse<>(true, evaluations);
}
/**
* Get a specific evaluation
*/
@GetMapping("/evaluations/{id}")
public RestResponse<Evaluation> getEvaluation(@PathVariable String id) {
Optional<Evaluation> evaluation = evaluationService.getEvaluation(id);
return evaluation.map(value -> new RestResponse<Evaluation>(true, value))
.orElseGet(() -> new RestResponse<Evaluation>(false, null, "Evaluation not found"));
}
/**
* Create a new evaluation
*/
@PostMapping("/test-sets/{testSetId}/evaluations")
public RestResponse<Evaluation> createEvaluation(@PathVariable String testSetId, @RequestBody Evaluation evaluation) {
try {
log.info("Received evaluation create request: {}", evaluation);
evaluation.setTestSetId(testSetId);
Evaluation created = evaluationService.createEvaluation(evaluation);
return new RestResponse<>(true, created);
} catch (Exception e) {
log.error("Error creating evaluation: {}", e.getMessage(), e);
return new RestResponse<Evaluation>(false, null, "Error creating evaluation: " + e.getMessage());
}
}
/**
* Copy an evaluation to another test set
*/
@PostMapping("/test-sets/{targetTestSetId}/evaluations/copy/{evaluationId}")
public RestResponse<Evaluation> copyEvaluation(@PathVariable String targetTestSetId, @PathVariable String evaluationId) {
try {
log.info("Copying evaluation {} to test set {}", evaluationId, targetTestSetId);
Evaluation copied = evaluationService.copyEvaluation(evaluationId, targetTestSetId);
return new RestResponse<>(true, copied);
} catch (Exception e) {
log.error("Error copying evaluation: {}", e.getMessage(), e);
return new RestResponse<Evaluation>(false, null, "Error copying evaluation: " + e.getMessage());
}
}
/**
* Update an evaluation
*/
@PutMapping("/evaluations/{id}")
public RestResponse<Evaluation> updateEvaluation(@PathVariable String id, @RequestBody Evaluation evaluation) {
try {
Evaluation updated = evaluationService.updateEvaluation(id, evaluation);
return new RestResponse<>(true, updated);
} catch (IllegalArgumentException e) {
return new RestResponse<Evaluation>(false, null, e.getMessage());
}
}
/**
* Update a test set evaluation
*/
@PutMapping("/test-sets/{testSetId}/evaluations/{id}")
public RestResponse<Evaluation> updateTestSetEvaluation(
@PathVariable String testSetId,
@PathVariable String id,
@RequestBody Evaluation evaluation) {
try {
log.info("Updating evaluation {} for test set {}", id, testSetId);
// Ensure we maintain the test set association
evaluation.setTestSetId(testSetId);
Evaluation updated = evaluationService.updateEvaluation(id, evaluation);
return new RestResponse<>(true, updated);
} catch (IllegalArgumentException e) {
return new RestResponse<Evaluation>(false, null, e.getMessage());
} catch (Exception e) {
log.error("Error updating test set evaluation: {}", e.getMessage(), e);
return new RestResponse<Evaluation>(false, null, "Error updating evaluation: " + e.getMessage());
}
}
/**
* Delete an evaluation
*/
@DeleteMapping("/evaluations/{id}")
public RestResponse<Void> deleteEvaluation(@PathVariable String id) {
evaluationService.deleteEvaluation(id);
return new RestResponse<>(true, null);
}
/**
* Get all runs for a test set
*/
@GetMapping("/test-sets/{testSetId}/runs")
public RestResponse<List<EvaluationRun>> getRunsForTestSet(@PathVariable String testSetId) {
List<EvaluationRun> runs = evaluationService.getRunsForTestSet(testSetId);
return new RestResponse<>(true, runs);
}
/**
* Get a specific run
*/
@GetMapping("/runs/{id}")
public RestResponse<EvaluationRun> getRun(@PathVariable String id) {
Optional<EvaluationRun> run = evaluationService.getRun(id);
return run.map(value -> new RestResponse<EvaluationRun>(true, value))
.orElseGet(() -> new RestResponse<EvaluationRun>(false, null, "Run not found"));
}
/**
* Create a new run
*/
@PostMapping("/test-sets/{testSetId}/runs")
public RestResponse<EvaluationRun> createRun(@PathVariable String testSetId, @RequestBody EvaluationRun run) {
try {
log.info("Creating evaluation run for test set {}: {}", testSetId, run);
// Validate the request parameters
if (StringUtils.isNotBlank(run.getModelId()) && StringUtils.isNotBlank(run.getWorkflow())) {
return new RestResponse<>(false, null,
"Cannot specify both modelId and workflow. Choose one execution method.");
}
run.setTestSetId(testSetId);
EvaluationRun created = evaluationService.createEvaluationRun(run);
return new RestResponse<>(true, created);
} catch (IllegalArgumentException e) {
log.warn("Invalid run configuration: {}", e.getMessage());
return new RestResponse<>(false, null, e.getMessage());
} catch (Exception e) {
log.error("Error creating evaluation run: {}", e.getMessage(), e);
return new RestResponse<>(false, null, "Error creating run: " + e.getMessage());
}
}
/**
* Delete a run
*/
@DeleteMapping("/test-sets/runs/{id}")
public RestResponse<Void> deleteRun(@PathVariable String id) {
evaluationService.deleteRun(id);
return new RestResponse<>(true, null);
}
/**
* Get all results for a run
*/
@GetMapping("/test-sets/runs/{runId}/results")
public RestResponse<List<EvaluationResult>> getResultsForRun(@PathVariable String runId) {
List<EvaluationResult> results = evaluationService.getResultsForRun(runId);
return new RestResponse<>(true, results);
}
/**
* Start a run
*/
@PostMapping("/test-sets/runs/{runId}/start")
public RestResponse<Void> startRun(@PathVariable String runId) {
evaluationService.executeEvaluationRun(runId);
return new RestResponse<Void>(true, null, "Run started");
}
/**
* Quick run - create and start a new run for a test set
*/
@PostMapping("/test-sets/{testSetId}/quick-run")
public RestResponse<EvaluationRun> quickRun(@PathVariable String testSetId) {
try {
EvaluationRun run = evaluationService.createAndExecuteRun(testSetId);
return new RestResponse<>(true, run, "Run created and started");
} catch (Exception e) {
log.error("Error creating quick run: {}", e.getMessage(), e);
return new RestResponse<EvaluationRun>(false, null, "Error creating run: " + e.getMessage());
}
}
/**
* Get all runs across all test sets
*/
@GetMapping("/test-sets/all-runs")
public RestResponse<List<EvaluationRun>> getAllRuns() {
try {
List<EvaluationRun> runs = evaluationService.getAllRuns();
return new RestResponse<>(true, runs);
} catch (Exception e) {
log.error("Error fetching all runs: {}", e.getMessage(), e);
return new RestResponse<List<EvaluationRun>>(false, null, "Error fetching runs: " + e.getMessage());
}
}
/**
* Update the status of a manual evaluation result
*/
@PostMapping("/evaluation-results/{resultId}/manual-review")
public RestResponse<EvaluationResult> updateManualEvaluationStatus(
@PathVariable String resultId,
@RequestBody ManualReviewRequest request) {
try {
EvaluationResult result = evaluationService.updateManualEvaluationStatus(
resultId, request.isPassed(), request.getFeedback());
return new RestResponse<>(true, result);
} catch (IllegalArgumentException e) {
log.warn("Invalid manual review update: {}", e.getMessage());
return new RestResponse<>(false, null, e.getMessage());
} catch (Exception e) {
log.error("Error updating manual evaluation status: {}", e.getMessage(), e);
return new RestResponse<>(false, null, "Error updating status: " + e.getMessage());
}
}
/**
* Request body for manual review updates
*/
@lombok.Data
@lombok.NoArgsConstructor
@lombok.AllArgsConstructor
public static class ManualReviewRequest {
private boolean passed;
private String feedback;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite/controller/TestSetController.java
|
package ai.driftkit.context.spring.testsuite.controller;
import ai.driftkit.context.spring.testsuite.domain.TestSet;
import ai.driftkit.context.spring.testsuite.domain.TestSetItem;
import ai.driftkit.context.spring.testsuite.service.TestSetService;
import ai.driftkit.workflows.spring.domain.ModelRequestTrace;
import lombok.Data;
import lombok.RequiredArgsConstructor;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.*;
import java.util.List;
import java.util.Optional;
@RestController
@RequestMapping("/data/v1.0/admin/test-sets")
@RequiredArgsConstructor
public class TestSetController {
private final TestSetService testSetService;
@GetMapping
public ResponseEntity<List<TestSet>> getAllTestSets() {
List<TestSet> testSets = testSetService.getAllTestSets();
return ResponseEntity.ok(testSets);
}
@GetMapping("/folder/{folderId}")
public ResponseEntity<List<TestSet>> getTestSetsByFolder(@PathVariable String folderId) {
List<TestSet> testSets = testSetService.getTestSetsByFolder(folderId);
return ResponseEntity.ok(testSets);
}
@GetMapping("/folder")
public ResponseEntity<List<TestSet>> getTestSetsWithoutFolder() {
List<TestSet> testSets = testSetService.getTestSetsByFolder(null);
return ResponseEntity.ok(testSets);
}
@GetMapping("/{id}")
public ResponseEntity<TestSet> getTestSetById(@PathVariable String id) {
Optional<TestSet> testSet = testSetService.getTestSetById(id);
return testSet.map(ResponseEntity::ok)
.orElse(ResponseEntity.notFound().build());
}
@GetMapping("/{id}/items")
public ResponseEntity<List<TestSetItem>> getTestSetItems(@PathVariable String id) {
List<TestSetItem> items = testSetService.getTestSetItems(id);
return ResponseEntity.ok(items);
}
@PostMapping
public ResponseEntity<TestSet> createTestSet(@RequestBody TestSet testSet) {
TestSet created = testSetService.createTestSet(testSet);
return ResponseEntity.ok(created);
}
@PutMapping("/{id}")
public ResponseEntity<TestSet> updateTestSet(@PathVariable String id, @RequestBody TestSet testSet) {
Optional<TestSet> updated = testSetService.updateTestSet(id, testSet);
return updated.map(ResponseEntity::ok)
.orElse(ResponseEntity.notFound().build());
}
@DeleteMapping("/{id}")
public ResponseEntity<Void> deleteTestSet(@PathVariable String id) {
if (testSetService.deleteTestSet(id)) {
return ResponseEntity.noContent().build();
}
return ResponseEntity.notFound().build();
}
@PostMapping("/{id}/items")
public ResponseEntity<TestSetService.AddItemsResult> addItemsToTestSet(
@PathVariable String id,
@RequestBody AddItemsRequest request) {
TestSetService.AddItemsResult result = testSetService.addItemsToTestSet(
request.getMessageTaskIds(),
request.getTraceSteps() != null ?
request.getTraceSteps().stream()
.map(step -> {
TestSetService.TraceStep traceStep = new TestSetService.TraceStep();
traceStep.setTraceId(step.getTraceId());
return traceStep;
}).toList() : null,
request.getImageTaskIds(),
id
);
return ResponseEntity.ok(result);
}
@DeleteMapping("/{testSetId}/items/{itemId}")
public ResponseEntity<Void> deleteTestSetItem(
@PathVariable String testSetId,
@PathVariable String itemId) {
if (testSetService.deleteTestSetItem(testSetId, itemId)) {
return ResponseEntity.noContent().build();
}
return ResponseEntity.notFound().build();
}
@PostMapping("/move-to-folder")
public ResponseEntity<String> moveTestSetsToFolder(@RequestBody MoveToFolderRequest request) {
boolean success = testSetService.moveTestSetsToFolder(request.getTestSetIds(), request.getFolderId());
if (success) {
return ResponseEntity.ok("TestSets moved successfully");
} else {
return ResponseEntity.badRequest().body("Failed to move TestSets");
}
}
@Data
public static class AddItemsRequest {
private List<String> messageTaskIds;
private List<TraceStep> traceSteps;
private List<String> imageTaskIds;
}
@Data
public static class TraceStep {
private String traceId;
}
@Data
public static class MoveToFolderRequest {
private List<String> testSetIds;
private String folderId;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite/domain/ArrayLengthEvalConfig.java
|
package ai.driftkit.context.spring.testsuite.domain;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.jayway.jsonpath.JsonPath;
import com.jayway.jsonpath.PathNotFoundException;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
import lombok.experimental.SuperBuilder;
import lombok.extern.slf4j.Slf4j;
import java.util.List;
/**
* Configuration for evaluations that check array length in JSON responses
*/
@Slf4j
@Data
@SuperBuilder
@NoArgsConstructor
@AllArgsConstructor
@EqualsAndHashCode(callSuper = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class ArrayLengthEvalConfig extends EvaluationConfig {
/**
* JSON path to the array (e.g., "$.results", "$.data.items")
*/
private String jsonPath;
/**
* Minimum length required
*/
private Integer minLength;
/**
* Maximum length allowed
*/
private Integer maxLength;
/**
* Exact length required (overrides min/max if specified)
*/
private Integer exactLength;
/**
* Result of array length evaluation
*/
@Data
@Builder
public static class ArrayLengthEvalResult {
private boolean passed;
private String message;
private Integer length;
private String jsonPath;
private Integer minLength;
private Integer maxLength;
private Integer exactLength;
private String error;
}
@Override
public EvaluationResult.EvaluationOutput evaluate(EvaluationContext context) {
String result = context.getActualResult();
// Check if JSON path is empty
if (jsonPath == null || jsonPath.isEmpty()) {
ArrayLengthEvalResult evalResult = ArrayLengthEvalResult.builder()
.passed(false)
.message("No JSON path specified for array length check")
.jsonPath(jsonPath)
.minLength(minLength)
.maxLength(maxLength)
.exactLength(exactLength)
.build();
return createOutputFromResult(evalResult);
}
try {
// First, check if the result is valid JSON
if (!isValidJson(result)) {
ArrayLengthEvalResult evalResult = ArrayLengthEvalResult.builder()
.passed(false)
.message("Response is not valid JSON")
.jsonPath(jsonPath)
.minLength(minLength)
.maxLength(maxLength)
.exactLength(exactLength)
.error("Invalid JSON")
.build();
return createOutputFromResult(evalResult);
}
// Extract the array
try {
Object array = JsonPath.read(result, jsonPath);
if (!(array instanceof List)) {
ArrayLengthEvalResult evalResult = ArrayLengthEvalResult.builder()
.passed(false)
.message("JSON path does not point to an array")
.jsonPath(jsonPath)
.minLength(minLength)
.maxLength(maxLength)
.exactLength(exactLength)
.error("Not an array")
.build();
return createOutputFromResult(evalResult);
}
List<?> list = (List<?>) array;
int length = list.size();
// Check against exact length first
if (exactLength != null) {
boolean passed = length == exactLength;
String message = passed
? "Array length is exactly " + length
: "Array length is " + length + ", expected exactly " + exactLength;
ArrayLengthEvalResult evalResult = ArrayLengthEvalResult.builder()
.passed(passed)
.message(message)
.length(length)
.jsonPath(jsonPath)
.minLength(minLength)
.maxLength(maxLength)
.exactLength(exactLength)
.build();
return createOutputFromResult(evalResult);
}
// Otherwise check against min/max
boolean passedMin = minLength == null || length >= minLength;
boolean passedMax = maxLength == null || length <= maxLength;
boolean passed = passedMin && passedMax;
// Build message
StringBuilder message = new StringBuilder();
message.append("Array length is ").append(length);
if (!passed) {
message.append(" - ");
if (!passedMin) {
message.append("expected at least ").append(minLength);
}
if (!passedMin && !passedMax) {
message.append(" and ");
}
if (!passedMax) {
message.append("expected at most ").append(maxLength);
}
}
ArrayLengthEvalResult evalResult = ArrayLengthEvalResult.builder()
.passed(passed)
.message(message.toString())
.length(length)
.jsonPath(jsonPath)
.minLength(minLength)
.maxLength(maxLength)
.exactLength(exactLength)
.build();
return createOutputFromResult(evalResult);
} catch (PathNotFoundException e) {
ArrayLengthEvalResult evalResult = ArrayLengthEvalResult.builder()
.passed(false)
.message("JSON path not found: " + jsonPath)
.jsonPath(jsonPath)
.minLength(minLength)
.maxLength(maxLength)
.exactLength(exactLength)
.error("Path not found")
.build();
return createOutputFromResult(evalResult);
}
} catch (Exception e) {
log.error("Error evaluating array length: {}", e.getMessage(), e);
ArrayLengthEvalResult evalResult = ArrayLengthEvalResult.builder()
.passed(false)
.message("Error evaluating array length: " + e.getMessage())
.jsonPath(jsonPath)
.minLength(minLength)
.maxLength(maxLength)
.exactLength(exactLength)
.error(e.getMessage())
.build();
return createOutputFromResult(evalResult);
}
}
private boolean isValidJson(String json) {
try {
new ObjectMapper().readTree(json);
return true;
} catch (JsonProcessingException e) {
return false;
}
}
private EvaluationResult.EvaluationOutput createOutputFromResult(ArrayLengthEvalResult evalResult) {
EvaluationResult.EvaluationOutput output = EvaluationResult.EvaluationOutput.builder()
.status(evalResult.isPassed() ? EvaluationResult.EvaluationStatus.PASSED : EvaluationResult.EvaluationStatus.FAILED)
.message(evalResult.getMessage())
.details(evalResult)
.build();
return applyNegation(output);
}
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite/domain/ContainsKeywordsEvalConfig.java
|
package ai.driftkit.context.spring.testsuite.domain;
import com.fasterxml.jackson.annotation.JsonInclude;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
import lombok.experimental.SuperBuilder;
import lombok.extern.slf4j.Slf4j;
import java.util.ArrayList;
import java.util.List;
/**
* Configuration for evaluations that check for keywords in responses
*/
@Slf4j
@Data
@SuperBuilder
@NoArgsConstructor
@AllArgsConstructor
@EqualsAndHashCode(callSuper = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class ContainsKeywordsEvalConfig extends EvaluationConfig {
/**
* List of keywords to check for in the response
*/
private List<String> keywords;
/**
* Match type for keywords
*/
private MatchType matchType;
/**
* If true, matching is case-sensitive
*/
private boolean caseSensitive;
/**
* Different types of keyword matching
*/
public enum MatchType {
ALL, // All keywords must be present
ANY, // At least one keyword must be present
EXACTLY, // Exactly all these keywords must be present, no more, no less
NONE, // None of the keywords should be present
MAJORITY // More than half of the keywords must be present
}
/**
* Result of keywords evaluation
*/
@Data
@Builder
public static class KeywordsEvalResult {
private boolean passed;
private String message;
private List<String> foundKeywords;
private List<String> missingKeywords;
private MatchType matchType;
}
@Override
public EvaluationResult.EvaluationOutput evaluate(EvaluationContext context) {
String result = context.getActualResult();
List<String> foundKeywords = new ArrayList<>();
List<String> missingKeywords = new ArrayList<>();
// Check if keywords list is empty
if (keywords == null || keywords.isEmpty()) {
KeywordsEvalResult evalResult = KeywordsEvalResult.builder()
.passed(false)
.message("No keywords specified for evaluation")
.foundKeywords(foundKeywords)
.missingKeywords(missingKeywords)
.matchType(matchType)
.build();
return createOutputFromResult(evalResult);
}
// Search for each keyword
for (String keyword : keywords) {
String searchKeyword = keyword;
String searchResult = result;
// Apply case-sensitivity settings
if (!isCaseSensitive()) {
searchKeyword = searchKeyword.toLowerCase();
searchResult = searchResult.toLowerCase();
}
if (searchResult.contains(searchKeyword)) {
foundKeywords.add(keyword);
} else {
missingKeywords.add(keyword);
}
}
// Apply match type logic
boolean passed = false;
String message;
switch (matchType) {
case ALL:
passed = missingKeywords.isEmpty();
message = passed
? "All keywords found"
: "Missing keywords: " + String.join(", ", missingKeywords);
break;
case ANY:
passed = !foundKeywords.isEmpty();
message = passed
? "Found keywords: " + String.join(", ", foundKeywords)
: "No keywords found";
break;
case EXACTLY:
passed = missingKeywords.isEmpty() && foundKeywords.size() == keywords.size();
message = passed
? "Found exactly all keywords"
: missingKeywords.isEmpty()
? "Found additional keywords"
: "Missing keywords: " + String.join(", ", missingKeywords);
break;
case NONE:
passed = foundKeywords.isEmpty();
message = passed
? "No keywords found (as expected)"
: "Found unexpected keywords: " + String.join(", ", foundKeywords);
break;
case MAJORITY:
passed = foundKeywords.size() > keywords.size() / 2;
message = passed
? "Found majority of keywords (" + foundKeywords.size() + "/" + keywords.size() + ")"
: "Found only " + foundKeywords.size() + " of " + keywords.size() + " keywords";
break;
default:
message = "Unknown match type: " + matchType;
passed = false;
}
KeywordsEvalResult evalResult = KeywordsEvalResult.builder()
.passed(passed)
.message(message)
.foundKeywords(foundKeywords)
.missingKeywords(missingKeywords)
.matchType(matchType)
.build();
return createOutputFromResult(evalResult);
}
private EvaluationResult.EvaluationOutput createOutputFromResult(KeywordsEvalResult evalResult) {
EvaluationResult.EvaluationOutput output = EvaluationResult.EvaluationOutput.builder()
.status(evalResult.isPassed() ? EvaluationResult.EvaluationStatus.PASSED : EvaluationResult.EvaluationStatus.FAILED)
.message(evalResult.getMessage())
.details(evalResult)
.build();
return applyNegation(output);
}
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite/domain/Evaluation.java
|
package ai.driftkit.context.spring.testsuite.domain;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonSubTypes;
import com.fasterxml.jackson.annotation.JsonTypeInfo;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import lombok.experimental.SuperBuilder;
import org.springframework.data.annotation.Id;
import org.springframework.data.mongodb.core.mapping.Document;
/**
* Base Evaluation class for TestSets
* Defines the common properties for all types of evaluations
*/
@Data
@SuperBuilder
@NoArgsConstructor
@AllArgsConstructor
@JsonInclude(JsonInclude.Include.NON_NULL)
@Document(collection = "evaluations")
public class Evaluation {
@Id
private String id;
private String testSetId;
private String name;
private String description;
private EvaluationType type;
private Long createdAt;
private Long updatedAt;
@JsonTypeInfo(
use = JsonTypeInfo.Id.NAME,
include = JsonTypeInfo.As.PROPERTY,
property = "configType")
@JsonSubTypes({
@JsonSubTypes.Type(value = JsonSchemaEvalConfig.class, name = "JSON_SCHEMA"),
@JsonSubTypes.Type(value = ContainsKeywordsEvalConfig.class, name = "CONTAINS_KEYWORDS"),
@JsonSubTypes.Type(value = ExactMatchEvalConfig.class, name = "EXACT_MATCH"),
@JsonSubTypes.Type(value = LlmEvalConfig.class, name = "LLM_EVALUATION"),
@JsonSubTypes.Type(value = WordCountEvalConfig.class, name = "WORD_COUNT"),
@JsonSubTypes.Type(value = ArrayLengthEvalConfig.class, name = "ARRAY_LENGTH"),
@JsonSubTypes.Type(value = FieldValueEvalConfig.class, name = "FIELD_VALUE_CHECK"),
@JsonSubTypes.Type(value = RegexMatchEvalConfig.class, name = "REGEX_MATCH"),
@JsonSubTypes.Type(value = ManualEvalConfig.class, name = "MANUAL_EVALUATION")
})
private EvaluationConfig config;
/**
* Types of evaluations that can be applied to test sets
*/
public enum EvaluationType {
JSON_SCHEMA, // Validate response against a JSON schema
CONTAINS_KEYWORDS, // Check for specific keywords in response
EXACT_MATCH, // Exact string matching
LLM_EVALUATION, // Use an LLM to evaluate the response
WORD_COUNT, // Count occurrences of words
ARRAY_LENGTH, // Check array length in JSON
FIELD_VALUE_CHECK, // Check specific field value in JSON
REGEX_MATCH, // Match response against a regex pattern
MANUAL_EVALUATION // Manual review by a human evaluator
}
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite/domain/EvaluationConfig.java
|
package ai.driftkit.context.spring.testsuite.domain;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonSubTypes;
import com.fasterxml.jackson.annotation.JsonTypeInfo;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import lombok.experimental.SuperBuilder;
/**
* Base configuration class for evaluations
*/
@Data
@SuperBuilder
@NoArgsConstructor
@AllArgsConstructor
@JsonInclude(JsonInclude.Include.NON_NULL)
@JsonTypeInfo(
use = JsonTypeInfo.Id.NAME,
include = JsonTypeInfo.As.PROPERTY,
property = "configType")
@JsonSubTypes({
@JsonSubTypes.Type(value = JsonSchemaEvalConfig.class, name = "JSON_SCHEMA"),
@JsonSubTypes.Type(value = ContainsKeywordsEvalConfig.class, name = "CONTAINS_KEYWORDS"),
@JsonSubTypes.Type(value = ExactMatchEvalConfig.class, name = "EXACT_MATCH"),
@JsonSubTypes.Type(value = LlmEvalConfig.class, name = "LLM_EVALUATION"),
@JsonSubTypes.Type(value = WordCountEvalConfig.class, name = "WORD_COUNT"),
@JsonSubTypes.Type(value = ArrayLengthEvalConfig.class, name = "ARRAY_LENGTH"),
@JsonSubTypes.Type(value = FieldValueEvalConfig.class, name = "FIELD_VALUE_CHECK"),
@JsonSubTypes.Type(value = RegexMatchEvalConfig.class, name = "REGEX_MATCH"),
@JsonSubTypes.Type(value = ImageEvalConfig.class, name = "IMAGE_EVALUATION"),
@JsonSubTypes.Type(value = ManualEvalConfig.class, name = "MANUAL_EVALUATION")
})
public abstract class EvaluationConfig {
// Common properties for all eval configs
private boolean negateResult; // If true, negates the evaluation result (PASS becomes FAIL and vice versa)
/**
* Evaluate a result against this configuration
*
* @param context The evaluation context containing test set item, original and actual result
* @return The evaluation result
*/
public abstract EvaluationResult.EvaluationOutput evaluate(EvaluationContext context);
protected EvaluationResult.EvaluationOutput applyNegation(EvaluationResult.EvaluationOutput output) {
if (isNegateResult() && output != null) {
// Flip pass/fail status but keep the same message
EvaluationResult.EvaluationStatus newStatus = output.getStatus() == EvaluationResult.EvaluationStatus.PASSED
? EvaluationResult.EvaluationStatus.FAILED
: output.getStatus() == EvaluationResult.EvaluationStatus.FAILED
? EvaluationResult.EvaluationStatus.PASSED
: output.getStatus();
return EvaluationResult.EvaluationOutput.builder()
.status(newStatus)
.message("Negated: " + output.getMessage())
.details(output.getDetails())
.build();
}
return output;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite/domain/EvaluationContext.java
|
package ai.driftkit.context.spring.testsuite.domain;
import ai.driftkit.context.spring.testsuite.domain.TestSetItem;
import ai.driftkit.workflows.spring.service.AIService;
import lombok.Builder;
import lombok.Data;
/**
* Context for evaluation execution
*/
@Data
@Builder
public class EvaluationContext {
/**
* The test set item being evaluated
*/
private TestSetItem testSetItem;
/**
* The original result from the test set item
*/
private String originalResult;
/**
* The actual result to evaluate (may be different if using alternative prompt)
*/
private String actualResult;
/**
* The AIService instance (for LLM evaluations)
*/
private AIService aiService;
/**
* Additional context data for evaluation (if needed)
*/
private Object additionalContext;
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite/domain/EvaluationResult.java
|
package ai.driftkit.context.spring.testsuite.domain;
import com.fasterxml.jackson.annotation.JsonInclude;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import org.springframework.data.annotation.Id;
import org.springframework.data.mongodb.core.mapping.Document;
/**
* Results of evaluation runs
*/
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
@JsonInclude(JsonInclude.Include.NON_NULL)
@Document(collection = "evaluation_results")
public class EvaluationResult {
@Id
private String id;
private String evaluationId;
private String testSetItemId;
private String runId; // To group results from the same run
private EvaluationStatus status;
private String message;
private Object details; // Additional details specific to evaluation type
private Long createdAt;
// Enhanced data for better debugging and analysis
private String originalPrompt; // The prompt used for the request
private String modelResult; // The actual model response
private Object promptVariables; // Variables used in the prompt
private Long processingTimeMs; // Time taken to process this evaluation
private String errorDetails; // Detailed error information if available
/**
* Status of the evaluation result
*/
public enum EvaluationStatus {
PASSED,
FAILED,
ERROR,
SKIPPED,
PENDING // Used for manual evaluations awaiting human review
}
/**
* Output of an evaluation, to be stored in the result
*/
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
@JsonInclude(JsonInclude.Include.NON_NULL)
public static class EvaluationOutput {
private EvaluationStatus status;
private String message;
private Object details;
private String originalPrompt;
private String modelResult;
private Object promptVariables;
private Long processingTimeMs;
private String errorDetails;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite/domain/EvaluationRun.java
|
package ai.driftkit.context.spring.testsuite.domain;
import com.fasterxml.jackson.annotation.JsonInclude;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import org.springframework.data.annotation.Id;
import org.springframework.data.mongodb.core.mapping.Document;
import java.util.Map;
/**
* Represents a test run for evaluations, optionally with an alternative prompt
*/
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
@JsonInclude(JsonInclude.Include.NON_NULL)
@Document(collection = "evaluation_runs")
public class EvaluationRun {
@Id
private String id;
private String testSetId;
private String name;
private String description;
// If specified, this prompt will be used instead of the original one
private String alternativePromptId;
private String alternativePromptTemplate;
// Either modelId OR workflow should be specified, not both
private String modelId; // Direct model ID (like "gpt-4-turbo")
private String workflow; // Workflow ID for more advanced processing
private Double temperature;
// Run status
private RunStatus status;
private Long startedAt;
private Long completedAt;
// Image Test Settings
@Builder.Default
private boolean regenerateImages = true; // If true, will regenerate images instead of using existing ones
// Statistics
private Map<String, Integer> statusCounts; // Map of status -> count
/**
* Status of the evaluation run
*/
public enum RunStatus {
QUEUED,
RUNNING,
COMPLETED,
FAILED,
CANCELLED,
PENDING // Used when manual evaluations are waiting for human review
}
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite/domain/ExactMatchEvalConfig.java
|
package ai.driftkit.context.spring.testsuite.domain;
import com.fasterxml.jackson.annotation.JsonInclude;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
import lombok.experimental.SuperBuilder;
import lombok.extern.slf4j.Slf4j;
/**
* Configuration for exact text matching evaluations
*/
@Slf4j
@Data
@SuperBuilder
@NoArgsConstructor
@AllArgsConstructor
@EqualsAndHashCode(callSuper = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class ExactMatchEvalConfig extends EvaluationConfig {
/**
* The exact text to match against
*/
private String expectedText;
/**
* If true, matching is case-sensitive
*/
private boolean caseSensitive;
/**
* If true, ignores whitespace differences
*/
private boolean ignoreWhitespace;
/**
* Result of exact match evaluation
*/
@Data
@Builder
public static class ExactMatchEvalResult {
private boolean passed;
private String message;
private String expectedText;
private String actualText;
private boolean caseSensitive;
private boolean ignoreWhitespace;
}
@Override
public EvaluationResult.EvaluationOutput evaluate(EvaluationContext context) {
String result = context.getActualResult();
// Check if expected text is empty
if (expectedText == null || expectedText.isEmpty()) {
ExactMatchEvalResult evalResult = ExactMatchEvalResult.builder()
.passed(false)
.message("No expected text specified for evaluation")
.expectedText(expectedText)
.actualText(result)
.caseSensitive(caseSensitive)
.ignoreWhitespace(ignoreWhitespace)
.build();
return createOutputFromResult(evalResult);
}
String processedExpected = expectedText;
String processedResult = result;
// Apply case sensitivity
if (!caseSensitive) {
processedExpected = processedExpected.toLowerCase();
processedResult = processedResult.toLowerCase();
}
// Apply whitespace handling
if (ignoreWhitespace) {
processedExpected = processedExpected.replaceAll("\\s+", " ").trim();
processedResult = processedResult.replaceAll("\\s+", " ").trim();
}
boolean matches = processedResult.equals(processedExpected);
String message = matches
? "Result matches expected text exactly"
: "Result does not match expected text";
ExactMatchEvalResult evalResult = ExactMatchEvalResult.builder()
.passed(matches)
.message(message)
.expectedText(expectedText)
.actualText(result)
.caseSensitive(caseSensitive)
.ignoreWhitespace(ignoreWhitespace)
.build();
return createOutputFromResult(evalResult);
}
private EvaluationResult.EvaluationOutput createOutputFromResult(ExactMatchEvalResult evalResult) {
EvaluationResult.EvaluationOutput output = EvaluationResult.EvaluationOutput.builder()
.status(evalResult.isPassed() ? EvaluationResult.EvaluationStatus.PASSED : EvaluationResult.EvaluationStatus.FAILED)
.message(evalResult.getMessage())
.details(evalResult)
.build();
return applyNegation(output);
}
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite/domain/FieldValueEvalConfig.java
|
package ai.driftkit.context.spring.testsuite.domain;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.jayway.jsonpath.JsonPath;
import com.jayway.jsonpath.PathNotFoundException;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
import lombok.experimental.SuperBuilder;
import lombok.extern.slf4j.Slf4j;
import java.util.List;
import java.util.Map;
/**
* Configuration for evaluations that check specific field values in JSON responses
*/
@Slf4j
@Data
@SuperBuilder
@NoArgsConstructor
@AllArgsConstructor
@EqualsAndHashCode(callSuper = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class FieldValueEvalConfig extends EvaluationConfig {
/**
* JSON path to the field (e.g., "$.name", "$.user.email")
*/
private String jsonPath;
/**
* Expected value of the field
*/
private String expectedValue;
/**
* Type of expected value
*/
private ValueType valueType;
/**
* Comparison operator to use
*/
private ComparisonOperator operator;
/**
* Types of values that can be compared
*/
public enum ValueType {
STRING,
NUMBER,
BOOLEAN,
NULL,
ARRAY,
OBJECT
}
/**
* Comparison operators
*/
public enum ComparisonOperator {
EQUALS, // ==
NOT_EQUALS, // !=
GREATER_THAN, // >
LESS_THAN, // <
GREATER_THAN_EQUALS, // >=
LESS_THAN_EQUALS, // <=
CONTAINS, // contains
STARTS_WITH, // starts with
ENDS_WITH, // ends with
MATCHES_REGEX, // matches regex
EXISTS // field exists
}
/**
* Result of field value evaluation
*/
@Data
@Builder
public static class FieldValueEvalResult {
private boolean passed;
private String message;
private String jsonPath;
private Object actualValue;
private String expectedValue;
private ValueType valueType;
private ComparisonOperator operator;
private String error;
}
@Override
public EvaluationResult.EvaluationOutput evaluate(EvaluationContext context) {
String result = context.getActualResult();
// Check if JSON path is empty
if (jsonPath == null || jsonPath.isEmpty()) {
FieldValueEvalResult evalResult = FieldValueEvalResult.builder()
.passed(false)
.message("No JSON path specified for field value check")
.jsonPath(jsonPath)
.expectedValue(expectedValue)
.valueType(valueType)
.operator(operator)
.error("Missing JSON path")
.build();
return createOutputFromResult(evalResult);
}
try {
// First, check if the result is valid JSON
if (!isValidJson(result)) {
FieldValueEvalResult evalResult = FieldValueEvalResult.builder()
.passed(false)
.message("Response is not valid JSON")
.jsonPath(jsonPath)
.expectedValue(expectedValue)
.valueType(valueType)
.operator(operator)
.error("Invalid JSON")
.build();
return createOutputFromResult(evalResult);
}
// For EXISTS operator, just check if the path exists
if (operator == ComparisonOperator.EXISTS) {
try {
JsonPath.read(result, jsonPath);
FieldValueEvalResult evalResult = FieldValueEvalResult.builder()
.passed(true)
.message("Field exists: " + jsonPath)
.jsonPath(jsonPath)
.operator(operator)
.build();
return createOutputFromResult(evalResult);
} catch (PathNotFoundException e) {
FieldValueEvalResult evalResult = FieldValueEvalResult.builder()
.passed(false)
.message("Field does not exist: " + jsonPath)
.jsonPath(jsonPath)
.operator(operator)
.error("Path not found")
.build();
return createOutputFromResult(evalResult);
}
}
// For all other operators, we need expected value and value type
if (expectedValue == null && operator != ComparisonOperator.EXISTS) {
FieldValueEvalResult evalResult = FieldValueEvalResult.builder()
.passed(false)
.message("No expected value specified for field value check")
.jsonPath(jsonPath)
.expectedValue(expectedValue)
.valueType(valueType)
.operator(operator)
.error("Missing expected value")
.build();
return createOutputFromResult(evalResult);
}
if (valueType == null && operator != ComparisonOperator.EXISTS) {
FieldValueEvalResult evalResult = FieldValueEvalResult.builder()
.passed(false)
.message("No value type specified for field value check")
.jsonPath(jsonPath)
.expectedValue(expectedValue)
.operator(operator)
.error("Missing value type")
.build();
return createOutputFromResult(evalResult);
}
// Extract the field value
try {
Object actualValue = JsonPath.read(result, jsonPath);
// Compare values based on type and operator
boolean passed = compareValues(actualValue, expectedValue, valueType, operator);
String message = passed
? "Field value matches expectation"
: "Field value does not match expectation";
FieldValueEvalResult evalResult = FieldValueEvalResult.builder()
.passed(passed)
.message(message)
.jsonPath(jsonPath)
.actualValue(actualValue)
.expectedValue(expectedValue)
.valueType(valueType)
.operator(operator)
.build();
return createOutputFromResult(evalResult);
} catch (PathNotFoundException e) {
FieldValueEvalResult evalResult = FieldValueEvalResult.builder()
.passed(false)
.message("JSON path not found: " + jsonPath)
.jsonPath(jsonPath)
.expectedValue(expectedValue)
.valueType(valueType)
.operator(operator)
.error("Path not found")
.build();
return createOutputFromResult(evalResult);
}
} catch (Exception e) {
log.error("Error evaluating field value: {}", e.getMessage(), e);
FieldValueEvalResult evalResult = FieldValueEvalResult.builder()
.passed(false)
.message("Error evaluating field value: " + e.getMessage())
.jsonPath(jsonPath)
.expectedValue(expectedValue)
.valueType(valueType)
.operator(operator)
.error(e.getMessage())
.build();
return createOutputFromResult(evalResult);
}
}
private boolean isValidJson(String json) {
try {
new ObjectMapper().readTree(json);
return true;
} catch (JsonProcessingException e) {
return false;
}
}
/**
* Compare values based on type and operator
*/
private boolean compareValues(Object actual, String expected, ValueType valueType, ComparisonOperator operator) {
if (actual == null) {
return valueType == ValueType.NULL;
}
switch (valueType) {
case STRING:
return compareStringValues(actual.toString(), expected, operator);
case NUMBER:
try {
double actualNum = Double.parseDouble(actual.toString());
double expectedNum = Double.parseDouble(expected);
return compareNumberValues(actualNum, expectedNum, operator);
} catch (NumberFormatException e) {
return false;
}
case BOOLEAN:
boolean actualBool = Boolean.parseBoolean(actual.toString());
boolean expectedBool = Boolean.parseBoolean(expected);
return actualBool == expectedBool;
case NULL:
return actual == null;
case ARRAY:
if (!(actual instanceof List)) {
return false;
}
// For arrays, we only support EQUALS and NOT_EQUALS
if (operator == ComparisonOperator.EQUALS) {
return actual.toString().equals(expected);
} else if (operator == ComparisonOperator.NOT_EQUALS) {
return !actual.toString().equals(expected);
}
return false;
case OBJECT:
if (!(actual instanceof Map)) {
return false;
}
// For objects, we only support EQUALS and NOT_EQUALS
if (operator == ComparisonOperator.EQUALS) {
return actual.toString().equals(expected);
} else if (operator == ComparisonOperator.NOT_EQUALS) {
return !actual.toString().equals(expected);
}
return false;
default:
return false;
}
}
/**
* Compare string values
*/
private boolean compareStringValues(String actual, String expected, ComparisonOperator operator) {
switch (operator) {
case EQUALS:
return actual.equals(expected);
case NOT_EQUALS:
return !actual.equals(expected);
case CONTAINS:
return actual.contains(expected);
case STARTS_WITH:
return actual.startsWith(expected);
case ENDS_WITH:
return actual.endsWith(expected);
case MATCHES_REGEX:
return actual.matches(expected);
default:
return false;
}
}
/**
* Compare number values
*/
private boolean compareNumberValues(double actual, double expected, ComparisonOperator operator) {
switch (operator) {
case EQUALS:
return actual == expected;
case NOT_EQUALS:
return actual != expected;
case GREATER_THAN:
return actual > expected;
case LESS_THAN:
return actual < expected;
case GREATER_THAN_EQUALS:
return actual >= expected;
case LESS_THAN_EQUALS:
return actual <= expected;
default:
return false;
}
}
private EvaluationResult.EvaluationOutput createOutputFromResult(FieldValueEvalResult evalResult) {
EvaluationResult.EvaluationOutput output = EvaluationResult.EvaluationOutput.builder()
.status(evalResult.isPassed() ? EvaluationResult.EvaluationStatus.PASSED : EvaluationResult.EvaluationStatus.FAILED)
.message(evalResult.getMessage())
.details(evalResult)
.build();
return applyNegation(output);
}
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite/domain/Folder.java
|
package ai.driftkit.context.spring.testsuite.domain;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import org.springframework.data.annotation.Id;
import org.springframework.data.mongodb.core.mapping.Document;
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
@Document(collection = "folders")
public class Folder {
@Id
private String id;
private String name;
private String description;
private long createdAt;
@Builder.Default
private FolderType type = FolderType.TEST_SET;
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite/domain/FolderType.java
|
package ai.driftkit.context.spring.testsuite.domain;
public enum FolderType {
TEST_SET,
TEST_SET_RUNS
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite/domain/ImageEvalConfig.java
|
package ai.driftkit.context.spring.testsuite.domain;
import ai.driftkit.common.domain.ImageMessageTask;
import ai.driftkit.context.spring.testsuite.domain.EvaluationResult.EvaluationStatus;
import com.fasterxml.jackson.annotation.JsonInclude;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
import lombok.experimental.SuperBuilder;
import org.springframework.util.StringUtils;
/**
* Configuration for evaluating image generation
*/
@Data
@SuperBuilder
@NoArgsConstructor
@AllArgsConstructor
@EqualsAndHashCode(callSuper = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class ImageEvalConfig extends EvaluationConfig {
/**
* The criteria to evaluate (what aspect of the image to check)
*/
private ImageEvaluationCriteria criteria;
/**
* Expected number of images to be generated
*/
private Integer expectedImageCount;
/**
* Expected minimum file size in bytes
*/
private Long minImageSize;
/**
* Expected maximum file size in bytes
*/
private Long maxImageSize;
/**
* Expected image aspect ratio (width/height, e.g. 1.0 for square)
* Allows for some tolerance (+/- 0.1)
*/
private Double expectedAspectRatio;
/**
* Whether to check that revised prompt is provided
*/
private Boolean hasRevisedPrompt;
@Override
public EvaluationResult.EvaluationOutput evaluate(EvaluationContext context) {
// For image evaluation, we need the ImageMessageTask from additionalContext
if (context.getAdditionalContext() == null || !(context.getAdditionalContext() instanceof ImageMessageTask)) {
return EvaluationResult.EvaluationOutput.builder()
.status(EvaluationResult.EvaluationStatus.ERROR)
.message("Image task not available in evaluation context")
.build();
}
ImageMessageTask imageTask = (ImageMessageTask) context.getAdditionalContext();
if (imageTask.getImages() == null || imageTask.getImages().isEmpty()) {
return EvaluationResult.EvaluationOutput.builder()
.status(EvaluationResult.EvaluationStatus.FAILED)
.message("No images generated")
.build();
}
// Evaluate based on the specified criteria
switch (criteria) {
case IMAGE_COUNT:
return evaluateImageCount(imageTask);
case IMAGE_SIZE:
return evaluateImageSize(imageTask);
case ASPECT_RATIO:
return evaluateAspectRatio(imageTask);
case REVISED_PROMPT:
return evaluateRevisedPrompt(imageTask);
case BASIC_VALIDATION:
default:
return evaluateBasicValidation(imageTask);
}
}
private EvaluationResult.EvaluationOutput evaluateImageCount(ImageMessageTask imageTask) {
int actualCount = imageTask.getImages().size();
if (expectedImageCount != null && actualCount != expectedImageCount) {
return applyNegation(EvaluationResult.EvaluationOutput.builder()
.status(EvaluationResult.EvaluationStatus.FAILED)
.message("Expected " + expectedImageCount + " images but got " + actualCount)
.build());
}
return applyNegation(EvaluationResult.EvaluationOutput.builder()
.status(EvaluationResult.EvaluationStatus.PASSED)
.message("Image count: " + actualCount)
.build());
}
private EvaluationResult.EvaluationOutput evaluateImageSize(ImageMessageTask imageTask) {
boolean allValid = true;
StringBuilder message = new StringBuilder("Image sizes: ");
for (int i = 0; i < imageTask.getImages().size(); i++) {
ImageMessageTask.GeneratedImage img = imageTask.getImages().get(i);
if (img.getData() == null) {
message.append("Image #").append(i + 1).append(" has no data. ");
allValid = false;
continue;
}
long size = img.getData().length;
message.append(size).append(" bytes");
if (minImageSize != null && size < minImageSize) {
message.append(" (too small, min: ").append(minImageSize).append(")");
allValid = false;
}
if (maxImageSize != null && size > maxImageSize) {
message.append(" (too large, max: ").append(maxImageSize).append(")");
allValid = false;
}
if (i < imageTask.getImages().size() - 1) {
message.append(", ");
}
}
return applyNegation(EvaluationResult.EvaluationOutput.builder()
.status(allValid ? EvaluationResult.EvaluationStatus.PASSED : EvaluationResult.EvaluationStatus.FAILED)
.message(message.toString())
.build());
}
private EvaluationResult.EvaluationOutput evaluateAspectRatio(ImageMessageTask imageTask) {
// Note: This would require image metadata extraction, which isn't available in the current code
// A placeholder implementation that would need to be enhanced with actual image dimension reading
return applyNegation(EvaluationResult.EvaluationOutput.builder()
.status(EvaluationStatus.ERROR)
.message("Aspect ratio evaluation requires image dimension extraction, which is not implemented")
.build());
}
private EvaluationResult.EvaluationOutput evaluateRevisedPrompt(ImageMessageTask imageTask) {
boolean allHaveRevised = true;
StringBuilder message = new StringBuilder("Revised prompts: ");
for (int i = 0; i < imageTask.getImages().size(); i++) {
ImageMessageTask.GeneratedImage img = imageTask.getImages().get(i);
boolean hasRevised = StringUtils.hasText(img.getRevisedPrompt());
if (!hasRevised) {
allHaveRevised = false;
message.append("Image #").append(i + 1).append(" has no revised prompt. ");
} else {
message.append("Image #").append(i + 1).append(" has revised prompt. ");
}
}
boolean passed = hasRevisedPrompt == null || allHaveRevised == hasRevisedPrompt;
return applyNegation(EvaluationResult.EvaluationOutput.builder()
.status(passed ? EvaluationResult.EvaluationStatus.PASSED : EvaluationResult.EvaluationStatus.FAILED)
.message(message.toString())
.build());
}
private EvaluationResult.EvaluationOutput evaluateBasicValidation(ImageMessageTask imageTask) {
boolean valid = true;
StringBuilder message = new StringBuilder("Image validation: ");
// Check that all images have data or URLs
for (int i = 0; i < imageTask.getImages().size(); i++) {
ImageMessageTask.GeneratedImage img = imageTask.getImages().get(i);
boolean hasData = img.getData() != null && img.getData().length > 0;
boolean hasUrl = StringUtils.hasText(img.getUrl());
if (!hasData && !hasUrl) {
valid = false;
message.append("Image #").append(i + 1).append(" has no data or URL. ");
}
}
if (valid) {
message.append("All images have valid data or URLs.");
}
return applyNegation(EvaluationResult.EvaluationOutput.builder()
.status(valid ? EvaluationResult.EvaluationStatus.PASSED : EvaluationResult.EvaluationStatus.FAILED)
.message(message.toString())
.build());
}
public enum ImageEvaluationCriteria {
BASIC_VALIDATION, // Check that images exist and have basic data
IMAGE_COUNT, // Check number of images generated
IMAGE_SIZE, // Check image file size
ASPECT_RATIO, // Check image dimensions/ratio
REVISED_PROMPT // Check if revised prompts are provided
}
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite/domain/JsonSchemaEvalConfig.java
|
package ai.driftkit.context.spring.testsuite.domain;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.networknt.schema.JsonSchema;
import com.networknt.schema.JsonSchemaFactory;
import com.networknt.schema.SpecVersion;
import com.networknt.schema.ValidationMessage;
import lombok.*;
import lombok.experimental.SuperBuilder;
import lombok.extern.slf4j.Slf4j;
import java.util.Set;
import java.util.stream.Collectors;
/**
* Configuration for JSON Schema validation evaluations
*/
@Slf4j
@Data
@SuperBuilder
@NoArgsConstructor
@AllArgsConstructor
@EqualsAndHashCode(callSuper = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class JsonSchemaEvalConfig extends EvaluationConfig {
/**
* The JSON schema to validate against, in standard JSON Schema format.
* Example:
* {
* "$schema": "http://json-schema.org/draft-07/schema#",
* "type": "object",
* "properties": {
* "name": { "type": "string" },
* "age": { "type": "integer", "minimum": 0 }
* },
* "required": ["name"]
* }
*/
private String jsonSchema;
/**
* If true, only validates that the response is valid JSON, ignoring the schema
*/
private boolean validateJsonOnly;
/**
* Result of JSON Schema validation
*/
@Data
@Builder
public static class JsonSchemaEvalResult {
private boolean passed;
private String message;
private Set<String> validationErrors;
private boolean isValidJson;
}
@Override
public EvaluationResult.EvaluationOutput evaluate(EvaluationContext context) {
String result = context.getActualResult();
JsonSchemaEvalResult evalResult;
try {
ObjectMapper objectMapper = new ObjectMapper();
// Check if result is valid JSON
boolean isValidJson = isValidJson(result, objectMapper);
if (!isValidJson) {
evalResult = JsonSchemaEvalResult.builder()
.passed(false)
.isValidJson(false)
.message("Response is not valid JSON")
.build();
}
// If we only need to validate JSON format
else if (isValidateJsonOnly() || getJsonSchema() == null || getJsonSchema().trim().isEmpty()) {
evalResult = JsonSchemaEvalResult.builder()
.passed(true)
.isValidJson(true)
.message("Response is valid JSON")
.build();
}
// Validate against schema
else {
// Parse the actual JSON and schema
JsonNode jsonNode = objectMapper.readTree(result);
JsonSchemaFactory factory = JsonSchemaFactory.getInstance(SpecVersion.VersionFlag.V7);
// Use a minimal schema if the provided one is empty or invalid
String schemaToUse = getJsonSchema();
if (schemaToUse == null || schemaToUse.trim().isEmpty()) {
schemaToUse = "{\"type\": \"object\"}";
}
JsonSchema schema = factory.getSchema(schemaToUse);
// Validate against the schema
Set<ValidationMessage> validationMessages = schema.validate(jsonNode);
boolean passed = validationMessages.isEmpty();
Set<String> errorMessages = validationMessages.stream()
.map(ValidationMessage::getMessage)
.collect(Collectors.toSet());
evalResult = JsonSchemaEvalResult.builder()
.passed(passed)
.isValidJson(true)
.message(passed ? "JSON schema validation passed" : "JSON schema validation failed")
.validationErrors(errorMessages)
.build();
}
} catch (Exception e) {
log.error("Error in JSON schema validation: {}", e.getMessage(), e);
evalResult = JsonSchemaEvalResult.builder()
.passed(false)
.message("JSON schema validation error: " + e.getMessage())
.build();
}
// Create result output and apply negation if configured
EvaluationResult.EvaluationOutput output = EvaluationResult.EvaluationOutput.builder()
.status(evalResult.isPassed() ? EvaluationResult.EvaluationStatus.PASSED : EvaluationResult.EvaluationStatus.FAILED)
.message(evalResult.getMessage())
.details(evalResult)
.build();
return applyNegation(output);
}
/**
* Check if a string is valid JSON
*/
private boolean isValidJson(String json, ObjectMapper objectMapper) {
try {
JsonNode jsonNode = objectMapper.readTree(json);
return jsonNode.isArray() || jsonNode.isObject();
} catch (Exception e) {
return false;
}
}
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite/domain/LlmEvalConfig.java
|
package ai.driftkit.context.spring.testsuite.domain;
import ai.driftkit.common.domain.ImageMessageTask;
import ai.driftkit.common.domain.Language;
import ai.driftkit.common.domain.MessageTask;
import ai.driftkit.common.domain.client.ModelImageResponse.ModelContentMessage;
import ai.driftkit.common.domain.client.ModelTextResponse;
import ai.driftkit.context.spring.testsuite.service.EvaluationService;
import ai.driftkit.workflows.spring.domain.ModelRequestTrace;
import ai.driftkit.workflows.spring.service.AIService;
import ai.driftkit.workflows.spring.service.ModelRequestContext;
import com.fasterxml.jackson.annotation.JsonInclude;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
import lombok.experimental.SuperBuilder;
import lombok.extern.slf4j.Slf4j;
import java.util.*;
/**
* Configuration for evaluations that use an LLM to evaluate responses
*/
@Slf4j
@Data
@SuperBuilder
@NoArgsConstructor
@AllArgsConstructor
@EqualsAndHashCode(callSuper = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class LlmEvalConfig extends EvaluationConfig {
/**
* The evaluation prompt to use
* Available placeholders:
* {{task}} - The original task/prompt
* {{actual}} - The actual model response
* {{expected}} - The reference/expected result from TestSetItem
*/
private String evaluationPrompt;
/**
* Model to use for evaluation (if null, uses the default model)
*/
private String modelId;
/**
* If true, returns detailed feedback in addition to pass/fail
*/
private boolean generateFeedback;
/**
* Temperature setting for the LLM evaluation
*/
private Double temperature;
/**
* Result of LLM evaluation
*/
@Data
@Builder
public static class LlmEvalResult {
private boolean passed;
private String message;
private String feedback;
private String prompt;
}
@Override
public EvaluationResult.EvaluationOutput evaluate(EvaluationContext context) {
try {
String task = context.getTestSetItem().getMessage();
String actual = context.getActualResult();
String expected = context.getOriginalResult();
AIService aiService = context.getAiService();
// Check if AI service is available
if (aiService == null) {
log.error("AIService not provided in evaluation context");
return EvaluationResult.EvaluationOutput.builder()
.status(EvaluationResult.EvaluationStatus.ERROR)
.message("AIService not available for LLM evaluation")
.build();
}
// Handle image evaluation if this is an image test item and we have image context
if (context.getTestSetItem().isImageTask() && context.getAdditionalContext() instanceof ImageMessageTask) {
return evaluateImages(context, aiService);
}
// Standard text evaluation
// Prepare the evaluation prompt
String prompt = getEvaluationPrompt();
prompt = prompt.replace("{{task}}", task)
.replace("{{actual}}", String.valueOf(actual))
.replace("{{expected}}", String.valueOf(expected));
// Prepare the message task with all necessary properties
MessageTask messageTask = new MessageTask();
messageTask.setMessageId(UUID.randomUUID().toString());
messageTask.setMessage(prompt);
// Use original workflow from test set item
String workflow = context.getTestSetItem().getWorkflow();
if (workflow != null) {
messageTask.setWorkflow(workflow);
}
// Copy system message from test set item
String systemMessage = context.getTestSetItem().getSystemMessage();
if (systemMessage != null) {
messageTask.setSystemMessage(systemMessage);
}
// Copy variables from test set item
messageTask.setVariables(context.getTestSetItem().getVariablesAsObjectMap());
// Set language from test set item (use GENERAL as default if not specified)
messageTask.setLanguage(Optional.ofNullable(context.getTestSetItem().getLanguage()).orElse(Language.GENERAL));
// Set JSON request/response flags
messageTask.setJsonRequest(context.getTestSetItem().isJsonRequest());
messageTask.setJsonResponse(context.getTestSetItem().isJsonResponse());
// Set purpose to indicate this request is from test pipeline
messageTask.setPurpose(EvaluationService.QA_TEST_PIPELINE_PURPOSE);
// Copy log probability settings if available
if (context.getTestSetItem().getLogprobs() != null) {
messageTask.setLogprobs(context.getTestSetItem().getLogprobs() > 0);
}
if (context.getTestSetItem().getTopLogprobs() != null) {
messageTask.setTopLogprobs(context.getTestSetItem().getTopLogprobs());
}
// Set model parameters if specified in the evaluation config
if (getModelId() != null) {
messageTask.setModelId(getModelId());
} else {
messageTask.setModelId(context.getTestSetItem().getModel());
}
if (getTemperature() != null) {
messageTask.setTemperature(getTemperature());
} else {
messageTask.setTemperature(context.getTestSetItem().getTemperature());
}
// Run the evaluation using AIService.chat to ensure workflow support
MessageTask response = aiService.chat(messageTask);
String responseText = response.getResult();
// Parse the response to determine pass/fail
// Assuming the LLM returns a response with PASS/FAIL at the beginning
boolean passed = responseText.toLowerCase().contains("pass") && !responseText.toLowerCase().contains("fail");
LlmEvalResult evalResult = LlmEvalResult.builder()
.passed(passed)
.message(passed ? "LLM evaluation passed" : "LLM evaluation failed")
.feedback(responseText)
.prompt(prompt)
.build();
// Create result output and apply negation if configured
EvaluationResult.EvaluationOutput output = EvaluationResult.EvaluationOutput.builder()
.status(evalResult.isPassed() ? EvaluationResult.EvaluationStatus.PASSED : EvaluationResult.EvaluationStatus.FAILED)
.message(evalResult.getMessage())
.details(evalResult)
.build();
return applyNegation(output);
} catch (Exception e) {
log.error("Error in LLM evaluation: {}", e.getMessage(), e);
return EvaluationResult.EvaluationOutput.builder()
.status(EvaluationResult.EvaluationStatus.ERROR)
.message("LLM evaluation error: " + e.getMessage())
.build();
}
}
/**
* Evaluates image content using LLM to compare the prompt with the generated image
* This method creates a multimodal request that sends both the image and the prompt to the LLM
*/
private EvaluationResult.EvaluationOutput evaluateImages(EvaluationContext context, AIService aiService) {
try {
// Get the image task from the additional context
ImageMessageTask imageTask = (ImageMessageTask) context.getAdditionalContext();
if (imageTask.getImages() == null || imageTask.getImages().isEmpty()) {
return EvaluationResult.EvaluationOutput.builder()
.status(EvaluationResult.EvaluationStatus.FAILED)
.message("No images available for evaluation")
.build();
}
// Prepare image data to send to the model
ImageMessageTask.GeneratedImage generatedImage = imageTask.getImages().get(0);
if (generatedImage.getData() == null || generatedImage.getData().length == 0) {
return EvaluationResult.EvaluationOutput.builder()
.status(EvaluationResult.EvaluationStatus.FAILED)
.message("Image data not available for evaluation")
.build();
}
// Get the original prompt that was used to generate the image
String imagePrompt = context.getTestSetItem().getMessage();
// Create a specialized prompt for LLM to evaluate image-text matching
String evaluationPrompt = getEvaluationPrompt();
if (evaluationPrompt == null || evaluationPrompt.isEmpty()) {
// If no custom evaluation prompt was provided, use a default one
evaluationPrompt =
"I'm going to show you an image that was generated based on the following prompt:\n\n" +
"\"{{task}}\"\n\n" +
"Please carefully analyze the image and evaluate if it matches what was requested in the prompt.\n" +
"Consider elements like subjects, style, composition, colors, and overall faithfulness to the prompt.\n\n" +
"Respond with either PASS if the image properly represents the prompt, or FAIL if it doesn't match.\n" +
"Then provide a detailed explanation of your assessment, discussing specific elements that match or don't match.";
}
// Replace placeholders in the evaluation prompt
evaluationPrompt = evaluationPrompt.replace("{{task}}", imagePrompt);
// Create message task for evaluation
MessageTask messageTask = new MessageTask();
messageTask.setMessageId(UUID.randomUUID().toString());
// Configure model parameters
String workflow = context.getTestSetItem().getWorkflow();
String modelId = getModelId() != null ? getModelId() : context.getTestSetItem().getModel();
Double temperature = getTemperature() != null ? getTemperature() : context.getTestSetItem().getTemperature();
// Create a request context for sending through ModelRequestService
ModelRequestContext requestContext = ModelRequestContext.builder()
.contextId(messageTask.getMessageId())
.contextType(ModelRequestTrace.ContextType.MESSAGE_TASK)
.requestType(ModelRequestTrace.RequestType.IMAGE_TO_TEXT)
.promptText(evaluationPrompt)
.messageTask(messageTask)
.imageData(List.of(new ModelContentMessage.ModelContentElement.ImageData(
generatedImage.getData(),
generatedImage.getMimeType() != null ? generatedImage.getMimeType() : "image/png"
)))
.purpose("image_evaluation_test")
.model(modelId) // Ensure model ID is set here
.build();
// Set system message requesting visual analysis capabilities
messageTask.setSystemMessage(
"You are a specialist in analyzing and evaluating images. " +
"You have excellent visual perception and can accurately describe and assess images. " +
"Focus on whether the image faithfully represents what was requested in the prompt. " +
"Be specific in your assessment, referencing visual elements, subjects, composition, style, etc."
);
// Set parameters
messageTask.setMessage(evaluationPrompt);
messageTask.setWorkflow(workflow);
messageTask.setModelId(modelId);
messageTask.setTemperature(temperature);
messageTask.setPurpose("image_evaluation_test");
// Send the evaluation request to the AI service
// Using direct call to ModelRequestService
ModelTextResponse modelResponse = aiService.getModelRequestService().imageToText(
aiService.getModelClient(),
requestContext
);
String responseText = modelResponse.getResponse();
messageTask.setResult(responseText);
messageTask.setResponseTime(System.currentTimeMillis());
// Parse the response to determine pass/fail
String lowerCaseResponse = responseText.toLowerCase();
boolean passed = lowerCaseResponse.contains("pass") && !lowerCaseResponse.contains("fail");
LlmEvalResult evalResult = LlmEvalResult.builder()
.passed(passed)
.message(passed ? "Image evaluation passed" : "Image evaluation failed")
.feedback(responseText)
.prompt(evaluationPrompt)
.build();
// Create result output and apply negation if configured
EvaluationResult.EvaluationOutput output = EvaluationResult.EvaluationOutput.builder()
.status(evalResult.isPassed() ? EvaluationResult.EvaluationStatus.PASSED : EvaluationResult.EvaluationStatus.FAILED)
.message(evalResult.getMessage())
.details(evalResult)
.build();
return applyNegation(output);
} catch (Exception e) {
log.error("Error in LLM image evaluation: {}", e.getMessage(), e);
return EvaluationResult.EvaluationOutput.builder()
.status(EvaluationResult.EvaluationStatus.ERROR)
.message("LLM image evaluation error: " + e.getMessage())
.build();
}
}
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite/domain/ManualEvalConfig.java
|
package ai.driftkit.context.spring.testsuite.domain;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonTypeName;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
import lombok.experimental.SuperBuilder;
import lombok.extern.slf4j.Slf4j;
/**
* Configuration for evaluations that require manual user inspection and verification
* These evaluations will initially be marked as PENDING until a human reviewer
* manually marks them as PASSED or FAILED
*/
@Slf4j
@Data
@SuperBuilder
@NoArgsConstructor
@AllArgsConstructor
@EqualsAndHashCode(callSuper = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
@JsonTypeName("MANUAL_EVALUATION")
public class ManualEvalConfig extends EvaluationConfig {
/**
* Optional instructions for the human reviewer
*/
private String reviewInstructions;
@Override
public EvaluationResult.EvaluationOutput evaluate(EvaluationContext context) {
// Manual evaluations start in PENDING status and require human intervention
EvaluationResult.EvaluationOutput output = EvaluationResult.EvaluationOutput.builder()
.status(EvaluationResult.EvaluationStatus.PENDING)
.message("Awaiting manual review")
.details(new ManualEvalDetails(getReviewInstructions()))
.build();
// We don't apply negation to PENDING evaluations
return output;
}
/**
* Details of the manual evaluation
*/
@Data
@NoArgsConstructor
@AllArgsConstructor
public static class ManualEvalDetails {
private String reviewInstructions;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite/domain/RegexMatchEvalConfig.java
|
package ai.driftkit.context.spring.testsuite.domain;
import com.fasterxml.jackson.annotation.JsonInclude;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
import lombok.experimental.SuperBuilder;
import lombok.extern.slf4j.Slf4j;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.regex.PatternSyntaxException;
/**
* Configuration for evaluations that use regex pattern matching
*/
@Slf4j
@Data
@SuperBuilder
@NoArgsConstructor
@AllArgsConstructor
@EqualsAndHashCode(callSuper = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class RegexMatchEvalConfig extends EvaluationConfig {
/**
* Regular expression pattern to match against
*/
private String pattern;
/**
* If true, the regex should NOT match for the test to pass
*/
private boolean shouldNotMatch;
/**
* Minimum number of matches required
*/
private Integer minMatches;
/**
* Maximum number of matches allowed
*/
private Integer maxMatches;
/**
* Result of regex match evaluation
*/
@Data
@Builder
public static class RegexMatchEvalResult {
private boolean passed;
private String message;
private String pattern;
private int matchCount;
private boolean shouldNotMatch;
private Integer minMatches;
private Integer maxMatches;
private List<String> matches;
private String error;
}
@Override
public EvaluationResult.EvaluationOutput evaluate(EvaluationContext context) {
String result = context.getActualResult();
// Check if pattern is empty
if (pattern == null || pattern.isEmpty()) {
RegexMatchEvalResult evalResult = RegexMatchEvalResult.builder()
.passed(false)
.message("No regex pattern specified for matching")
.pattern(pattern)
.matchCount(0)
.shouldNotMatch(shouldNotMatch)
.minMatches(minMatches)
.maxMatches(maxMatches)
.matches(new ArrayList<>())
.error("Missing pattern")
.build();
return createOutputFromResult(evalResult);
}
try {
// Compile and use the regex pattern
Pattern regex = Pattern.compile(pattern);
Matcher matcher = regex.matcher(result);
// Count all matches and collect them
int count = 0;
List<String> matches = new ArrayList<>();
while (matcher.find()) {
count++;
matches.add(matcher.group());
}
// If shouldNotMatch is true, we expect no matches
if (shouldNotMatch) {
boolean passed = count == 0;
String message = passed
? "No regex matches found (as expected)"
: "Found " + count + " regex matches (expected none)";
RegexMatchEvalResult evalResult = RegexMatchEvalResult.builder()
.passed(passed)
.message(message)
.pattern(pattern)
.matchCount(count)
.shouldNotMatch(shouldNotMatch)
.minMatches(minMatches)
.maxMatches(maxMatches)
.matches(matches)
.build();
return createOutputFromResult(evalResult);
}
// Otherwise check against min/max
boolean passedMin = minMatches == null || count >= minMatches;
boolean passedMax = maxMatches == null || count <= maxMatches;
boolean passed = passedMin && passedMax;
// Build message
StringBuilder message = new StringBuilder();
message.append("Found ").append(count).append(" regex matches");
if (!passed) {
message.append(" - ");
if (!passedMin) {
message.append("expected at least ").append(minMatches);
}
if (!passedMin && !passedMax) {
message.append(" and ");
}
if (!passedMax) {
message.append("expected at most ").append(maxMatches);
}
}
RegexMatchEvalResult evalResult = RegexMatchEvalResult.builder()
.passed(passed)
.message(message.toString())
.pattern(pattern)
.matchCount(count)
.shouldNotMatch(shouldNotMatch)
.minMatches(minMatches)
.maxMatches(maxMatches)
.matches(matches)
.build();
return createOutputFromResult(evalResult);
} catch (PatternSyntaxException e) {
log.error("Invalid regex pattern: {}", e.getMessage(), e);
RegexMatchEvalResult evalResult = RegexMatchEvalResult.builder()
.passed(false)
.message("Invalid regex pattern: " + e.getMessage())
.pattern(pattern)
.matchCount(0)
.shouldNotMatch(shouldNotMatch)
.minMatches(minMatches)
.maxMatches(maxMatches)
.matches(new ArrayList<>())
.error("Invalid pattern: " + e.getMessage())
.build();
return createOutputFromResult(evalResult);
} catch (Exception e) {
log.error("Error evaluating regex match: {}", e.getMessage(), e);
RegexMatchEvalResult evalResult = RegexMatchEvalResult.builder()
.passed(false)
.message("Error evaluating regex match: " + e.getMessage())
.pattern(pattern)
.matchCount(0)
.shouldNotMatch(shouldNotMatch)
.minMatches(minMatches)
.maxMatches(maxMatches)
.matches(new ArrayList<>())
.error(e.getMessage())
.build();
return createOutputFromResult(evalResult);
}
}
private EvaluationResult.EvaluationOutput createOutputFromResult(RegexMatchEvalResult evalResult) {
EvaluationResult.EvaluationOutput output = EvaluationResult.EvaluationOutput.builder()
.status(evalResult.isPassed() ? EvaluationResult.EvaluationStatus.PASSED : EvaluationResult.EvaluationStatus.FAILED)
.message(evalResult.getMessage())
.details(evalResult)
.build();
return applyNegation(output);
}
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite/domain/TestSet.java
|
package ai.driftkit.context.spring.testsuite.domain;
import lombok.Data;
import org.springframework.data.annotation.Id;
import org.springframework.data.mongodb.core.mapping.Document;
@Data
@Document(collection = "testsets")
public class TestSet {
@Id
private String id;
private String name;
private String description;
private String folderId;
private Long createdAt;
private Long updatedAt;
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite/domain/TestSetItem.java
|
package ai.driftkit.context.spring.testsuite.domain;
import ai.driftkit.common.domain.Language;
import java.util.Map;
/**
* Interface for test set items
*/
public interface TestSetItem {
String getId();
String getTestSetId();
String getMessage();
String getSystemMessage();
Language getLanguage();
String getWorkflowType();
String getWorkflow();
boolean isJsonRequest();
boolean isJsonResponse();
Integer getLogprobs();
Integer getTopLogprobs();
Double getTemperature();
String getModel();
Map<String, Object> getVariablesAsObjectMap();
String getResult();
boolean isImageTask();
String getOriginalImageTaskId();
Long getCreatedAt();
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite/domain/WordCountEvalConfig.java
|
package ai.driftkit.context.spring.testsuite.domain;
import com.fasterxml.jackson.annotation.JsonInclude;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
import lombok.experimental.SuperBuilder;
import lombok.extern.slf4j.Slf4j;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Configuration for evaluations that count word occurrences
*/
@Slf4j
@Data
@SuperBuilder
@NoArgsConstructor
@AllArgsConstructor
@EqualsAndHashCode(callSuper = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class WordCountEvalConfig extends EvaluationConfig {
/**
* The word or phrase to count
*/
private String word;
/**
* Minimum occurrences required
*/
private Integer minOccurrences;
/**
* Maximum occurrences allowed
*/
private Integer maxOccurrences;
/**
* If true, matching is case-sensitive
*/
private boolean caseSensitive;
/**
* If true, matches only whole words (not parts of words)
*/
private boolean wholeWordsOnly;
/**
* Result of word count evaluation
*/
@Data
@Builder
public static class WordCountEvalResult {
private boolean passed;
private String message;
private int count;
private String word;
private Integer minOccurrences;
private Integer maxOccurrences;
private boolean caseSensitive;
private boolean wholeWordsOnly;
}
@Override
public EvaluationResult.EvaluationOutput evaluate(EvaluationContext context) {
String result = context.getActualResult();
// Check if word is empty
if (word == null || word.isEmpty()) {
WordCountEvalResult evalResult = WordCountEvalResult.builder()
.passed(false)
.message("No word specified for counting")
.count(0)
.word(word)
.minOccurrences(minOccurrences)
.maxOccurrences(maxOccurrences)
.caseSensitive(caseSensitive)
.wholeWordsOnly(wholeWordsOnly)
.build();
return createOutputFromResult(evalResult);
}
// Count occurrences
int count;
if (wholeWordsOnly) {
// Count whole words only
String regex = caseSensitive ? "\\b" + Pattern.quote(word) + "\\b" : "(?i)\\b" + Pattern.quote(word) + "\\b";
Pattern pattern = Pattern.compile(regex);
Matcher matcher = pattern.matcher(result);
count = 0;
while (matcher.find()) {
count++;
}
} else {
// Count all occurrences
String searchWord = word;
String searchResult = result;
// Apply case-sensitivity settings
if (!caseSensitive) {
searchWord = searchWord.toLowerCase();
searchResult = searchResult.toLowerCase();
}
count = 0;
int index = 0;
while ((index = searchResult.indexOf(searchWord, index)) != -1) {
count++;
index += searchWord.length();
}
}
// Check against min/max
boolean passedMin = minOccurrences == null || count >= minOccurrences;
boolean passedMax = maxOccurrences == null || count <= maxOccurrences;
boolean passed = passedMin && passedMax;
// Build message
StringBuilder message = new StringBuilder();
message.append("Found ").append(count).append(" occurrences of '").append(word).append("'");
if (!passed) {
message.append(" - ");
if (!passedMin) {
message.append("expected at least ").append(minOccurrences);
}
if (!passedMin && !passedMax) {
message.append(" and ");
}
if (!passedMax) {
message.append("expected at most ").append(maxOccurrences);
}
}
WordCountEvalResult evalResult = WordCountEvalResult.builder()
.passed(passed)
.message(message.toString())
.count(count)
.word(word)
.minOccurrences(minOccurrences)
.maxOccurrences(maxOccurrences)
.caseSensitive(caseSensitive)
.wholeWordsOnly(wholeWordsOnly)
.build();
return createOutputFromResult(evalResult);
}
private EvaluationResult.EvaluationOutput createOutputFromResult(WordCountEvalResult evalResult) {
EvaluationResult.EvaluationOutput output = EvaluationResult.EvaluationOutput.builder()
.status(evalResult.isPassed() ? EvaluationResult.EvaluationStatus.PASSED : EvaluationResult.EvaluationStatus.FAILED)
.message(evalResult.getMessage())
.details(evalResult)
.build();
return applyNegation(output);
}
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite/domain
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite/domain/archive/TestSetItemImpl.java
|
package ai.driftkit.context.spring.testsuite.domain.archive;
import ai.driftkit.context.spring.testsuite.domain.TestSetItem;
import ai.driftkit.common.domain.Language;
import lombok.Builder;
import lombok.Data;
import org.springframework.data.annotation.Id;
import org.springframework.data.mongodb.core.mapping.Document;
import java.util.HashMap;
import java.util.Map;
@Data
@Builder
@Document(collection = "testsetitems")
public class TestSetItemImpl implements TestSetItem {
@Id
private String id;
private String testSetId;
private String message;
private String systemMessage;
private Language language;
private String workflowType;
private String workflow;
private boolean jsonRequest;
private boolean jsonResponse;
private Integer logprobs;
private Integer topLogprobs;
private Double temperature;
private String model;
private Map<String, String> variables;
private String result;
private boolean isImageTask;
private String originalImageTaskId;
private String originalMessageTaskId;
private String originalTraceId;
private String promptId;
private Long createdAt;
private Long updatedAt;
@Override
public Map<String, Object> getVariablesAsObjectMap() {
return variables != null ? new HashMap<>(variables) : null;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite/repository/EvaluationRepository.java
|
package ai.driftkit.context.spring.testsuite.repository;
import ai.driftkit.context.spring.testsuite.domain.Evaluation;
import org.springframework.data.mongodb.repository.MongoRepository;
import org.springframework.stereotype.Repository;
import java.util.List;
@Repository
public interface EvaluationRepository extends MongoRepository<Evaluation, String> {
/**
* Find all evaluations for a test set
*/
List<Evaluation> findByTestSetId(String testSetId);
/**
* Find all global evaluations (where testSetId is null)
*/
List<Evaluation> findByTestSetIdIsNull();
/**
* Delete all evaluations for a test set
*/
void deleteByTestSetId(String testSetId);
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite/repository/EvaluationResultRepository.java
|
package ai.driftkit.context.spring.testsuite.repository;
import ai.driftkit.context.spring.testsuite.domain.EvaluationResult;
import org.springframework.data.mongodb.repository.MongoRepository;
import org.springframework.stereotype.Repository;
import java.util.List;
@Repository
public interface EvaluationResultRepository extends MongoRepository<EvaluationResult, String> {
/**
* Find all results for a specific evaluation
*/
List<EvaluationResult> findByEvaluationId(String evaluationId);
/**
* Find all results for a specific test set item
*/
List<EvaluationResult> findByTestSetItemId(String testSetItemId);
/**
* Find all results for a specific run
*/
List<EvaluationResult> findByRunId(String runId);
/**
* Find all results for a specific evaluation and run
*/
List<EvaluationResult> findByEvaluationIdAndRunId(String evaluationId, String runId);
/**
* Delete all results for a specific evaluation
*/
void deleteByEvaluationId(String evaluationId);
/**
* Delete all results for a specific run
*/
void deleteByRunId(String runId);
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite/repository/EvaluationRunRepository.java
|
package ai.driftkit.context.spring.testsuite.repository;
import ai.driftkit.context.spring.testsuite.domain.EvaluationRun;
import org.springframework.data.mongodb.repository.MongoRepository;
import org.springframework.stereotype.Repository;
import java.util.List;
@Repository
public interface EvaluationRunRepository extends MongoRepository<EvaluationRun, String> {
/**
* Find all runs for a test set
*/
List<EvaluationRun> findByTestSetId(String testSetId);
/**
* Find all runs with a specific status
*/
List<EvaluationRun> findByStatus(EvaluationRun.RunStatus status);
/**
* Delete all runs for a test set
*/
void deleteByTestSetId(String testSetId);
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite/repository/FolderRepository.java
|
package ai.driftkit.context.spring.testsuite.repository;
import ai.driftkit.context.spring.testsuite.domain.Folder;
import ai.driftkit.context.spring.testsuite.domain.FolderType;
import org.springframework.data.mongodb.repository.MongoRepository;
import org.springframework.stereotype.Repository;
import java.util.List;
@Repository
public interface FolderRepository extends MongoRepository<Folder, String> {
List<Folder> findByTypeOrderByCreatedAtDesc(FolderType type);
List<Folder> findAllByOrderByCreatedAtDesc();
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite/repository/TestSetItemRepository.java
|
package ai.driftkit.context.spring.testsuite.repository;
import ai.driftkit.context.spring.testsuite.domain.archive.TestSetItemImpl;
import org.springframework.data.mongodb.repository.MongoRepository;
import org.springframework.stereotype.Repository;
import java.util.List;
@Repository
public interface TestSetItemRepository extends MongoRepository<TestSetItemImpl, String> {
List<TestSetItemImpl> findByTestSetId(String testSetId);
List<TestSetItemImpl> findByTestSetIdOrderByCreatedAtDesc(String testSetId);
void deleteByTestSetId(String testSetId);
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite/repository/TestSetRepository.java
|
package ai.driftkit.context.spring.testsuite.repository;
import ai.driftkit.context.spring.testsuite.domain.TestSet;
import org.springframework.data.mongodb.repository.MongoRepository;
import org.springframework.stereotype.Repository;
import java.util.List;
@Repository
public interface TestSetRepository extends MongoRepository<TestSet, String> {
List<TestSet> findAllByOrderByCreatedAtDesc();
List<TestSet> findByFolderIdOrderByCreatedAtDesc(String folderId);
List<TestSet> findByFolderIdIsNullOrderByCreatedAtDesc();
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite/service/EvaluationService.java
|
package ai.driftkit.context.spring.testsuite.service;
import ai.driftkit.common.domain.ImageMessageTask;
import ai.driftkit.common.domain.Language;
import ai.driftkit.common.domain.MessageTask;
import ai.driftkit.context.core.util.PromptUtils;
import ai.driftkit.context.spring.testsuite.domain.*;
import ai.driftkit.context.spring.testsuite.repository.*;
import ai.driftkit.workflows.spring.domain.ImageMessageTaskEntity;
import ai.driftkit.workflows.spring.repository.ImageTaskRepository;
import ai.driftkit.workflows.spring.service.ImageModelService;
import ai.driftkit.workflows.spring.service.AIService;
import lombok.Data;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.springframework.stereotype.Service;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.text.SimpleDateFormat;
import java.util.*;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
/**
* Service for managing evaluations for test sets
*/
@Slf4j
@Service
@RequiredArgsConstructor
public class EvaluationService {
private static final SimpleDateFormat DATE_FORMAT = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
public static final String QA_TEST_PIPELINE_PURPOSE = "qa_test_pipeline";
private ExecutorService testExecutor = Executors.newFixedThreadPool(
Math.max(2, Runtime.getRuntime().availableProcessors())
);
private final EvaluationRepository evaluationRepository;
private final EvaluationResultRepository resultRepository;
private final EvaluationRunRepository runRepository;
private final TestSetItemRepository testSetItemRepository;
private final TestSetRepository testSetRepository;
private final AIService aiService;
private final ImageModelService imageModelService;
private final ImageTaskRepository imageTaskRepository;
/**
* Create a new evaluation
*/
public Evaluation createEvaluation(Evaluation evaluation) {
evaluation.setCreatedAt(System.currentTimeMillis());
evaluation.setUpdatedAt(System.currentTimeMillis());
return evaluationRepository.save(evaluation);
}
/**
* Copy an evaluation to another test set
*/
public Evaluation copyEvaluation(String evaluationId, String targetTestSetId) {
Optional<Evaluation> existingOpt = evaluationRepository.findById(evaluationId);
if (existingOpt.isEmpty()) {
throw new IllegalArgumentException("Evaluation not found: " + evaluationId);
}
Evaluation existing = existingOpt.get();
Evaluation copy = new Evaluation();
copy.setName(existing.getName() + " (Copy)");
copy.setDescription(existing.getDescription());
copy.setTestSetId(targetTestSetId);
copy.setType(existing.getType());
copy.setConfig(existing.getConfig());
copy.setCreatedAt(System.currentTimeMillis());
copy.setUpdatedAt(System.currentTimeMillis());
return evaluationRepository.save(copy);
}
/**
* Get all evaluations for a test set
*/
public List<Evaluation> getEvaluationsForTestSet(String testSetId) {
return evaluationRepository.findByTestSetId(testSetId);
}
/**
* Get all global evaluations (not tied to a specific test set)
*/
public List<Evaluation> getGlobalEvaluations() {
return evaluationRepository.findByTestSetIdIsNull();
}
/**
* Get a specific evaluation
*/
public Optional<Evaluation> getEvaluation(String id) {
return evaluationRepository.findById(id);
}
/**
* Update an evaluation
*/
public Evaluation updateEvaluation(String id, Evaluation evaluation) {
Optional<Evaluation> existing = evaluationRepository.findById(id);
if (existing.isEmpty()) {
throw new IllegalArgumentException("Evaluation not found: " + id);
}
evaluation.setId(id);
evaluation.setCreatedAt(existing.get().getCreatedAt());
evaluation.setUpdatedAt(System.currentTimeMillis());
return evaluationRepository.save(evaluation);
}
/**
* Delete an evaluation
*/
public void deleteEvaluation(String id) {
resultRepository.deleteByEvaluationId(id);
evaluationRepository.deleteById(id);
}
/**
* Create a new evaluation run
*/
public EvaluationRun createEvaluationRun(EvaluationRun run) {
if (StringUtils.isNotBlank(run.getModelId()) && StringUtils.isNotBlank(run.getWorkflow())) {
throw new IllegalArgumentException("Cannot specify both modelId and workflow. Choose one execution method.");
}
run.setStatus(EvaluationRun.RunStatus.QUEUED);
run.setStartedAt(System.currentTimeMillis());
return runRepository.save(run);
}
/**
* Get all runs for a test set
*/
public List<EvaluationRun> getRunsForTestSet(String testSetId) {
return runRepository.findByTestSetId(testSetId);
}
/**
* Get all runs across all test sets
*/
public List<EvaluationRun> getAllRuns() {
return runRepository.findAll();
}
/**
* Get a specific run
*/
public Optional<EvaluationRun> getRun(String id) {
return runRepository.findById(id);
}
/**
* Delete a run and all its results
*/
public void deleteRun(String id) {
resultRepository.deleteByRunId(id);
runRepository.deleteById(id);
}
/**
* Get all results for a run
*/
public List<EvaluationResult> getResultsForRun(String runId) {
return resultRepository.findByRunId(runId);
}
/**
* Create a new evaluation run for a test set and immediately execute it
*/
public EvaluationRun createAndExecuteRun(String testSetId) {
EvaluationRun run = EvaluationRun.builder()
.testSetId(testSetId)
.name("Run " + DATE_FORMAT.format(new Date()))
.description("Automatic run")
.status(EvaluationRun.RunStatus.QUEUED)
.startedAt(System.currentTimeMillis())
.build();
EvaluationRun savedRun = runRepository.save(run);
final String runId = savedRun.getId();
testExecutor.submit(() -> {
try {
Optional<EvaluationRun> optionalRun = runRepository.findById(runId);
if (optionalRun.isEmpty()) {
log.error("Run not found: {}", runId);
return;
}
EvaluationRun runToExecute = optionalRun.get();
executeRun(runToExecute);
} catch (Exception e) {
log.error("Error in background execution of run {}: {}", runId, e.getMessage(), e);
}
});
return savedRun;
}
/**
* Create and execute runs for all test sets in a folder
*/
public List<EvaluationRun> createAndExecuteRunsForFolder(String folderId, String modelId, String workflow) {
return createAndExecuteRunsForFolder(folderId, modelId, workflow, false);
}
/**
* Create and execute runs for all test sets in a folder
*/
public List<EvaluationRun> createAndExecuteRunsForFolder(String folderId, String modelId, String workflow, boolean regenerateImages) {
log.info("Creating and executing runs for all test sets in folder: {}", folderId);
log.info("Options: modelId={}, workflow={}, regenerateImages={}", modelId, workflow, regenerateImages);
List<TestSet> testSets = testSetRepository.findByFolderIdOrderByCreatedAtDesc(folderId);
if (testSets.isEmpty()) {
log.warn("No test sets found in folder: {}", folderId);
return Collections.emptyList();
}
List<EvaluationRun> runs = new ArrayList<>();
for (TestSet testSet : testSets) {
EvaluationRun run = EvaluationRun.builder()
.testSetId(testSet.getId())
.name("Folder Run " + DATE_FORMAT.format(new Date()))
.description("Run as part of folder execution")
.status(EvaluationRun.RunStatus.QUEUED)
.startedAt(System.currentTimeMillis())
.regenerateImages(regenerateImages)
.build();
if (StringUtils.isNotBlank(modelId)) {
run.setModelId(modelId);
}
if (StringUtils.isNotBlank(workflow)) {
run.setWorkflow(workflow);
}
runs.add(run);
}
for (EvaluationRun run : runs) {
try {
executeRun(run);
} catch (Exception e) {
log.error("Error in background execution of run {}: {}", run.getId(), e.getMessage(), e);
}
}
return runs;
}
/**
* Start the evaluation process
*/
public void executeEvaluationRun(String runId) {
Optional<EvaluationRun> optionalRun = runRepository.findById(runId);
if (optionalRun.isEmpty()) {
log.error("Run not found: {}", runId);
return;
}
EvaluationRun run = optionalRun.get();
executeRun(run);
}
private Future<EvaluationRun> executeRun(EvaluationRun run) {
return testExecutor.submit(() -> {
run.setStatus(EvaluationRun.RunStatus.RUNNING);
runRepository.save(run);
try {
List<TestSetItem> items = testSetItemRepository.findByTestSetId(run.getTestSetId()).stream()
.map(TestSetItem.class::cast)
.toList();
if (items.isEmpty()) {
log.warn("No items found for test set: {}", run.getTestSetId());
completeRunWithStatus(run, EvaluationRun.RunStatus.COMPLETED);
return run;
}
List<Evaluation> evaluations = evaluationRepository.findByTestSetId(run.getTestSetId());
if (evaluations.isEmpty()) {
log.warn("No evaluations found for test set: {}", run.getTestSetId());
completeRunWithStatus(run, EvaluationRun.RunStatus.COMPLETED);
return run;
}
Map<String, Integer> statusCounts = new HashMap<>();
for (TestSetItem item : items) {
String actualResult;
ProcessingResult processingResult = null;
if (item.isImageTask()) {
processingResult = processImageTask(item, run);
} else {
processingResult = processWithAlternativePrompt(item, run);
}
actualResult = processingResult.getModelResult();
for (Evaluation evaluation : evaluations) {
EvaluationResult result;
if (processingResult != null) {
result = evaluateResult(item, actualResult, evaluation, run.getId(), processingResult);
} else {
result = evaluateResult(item, actualResult, evaluation, run.getId());
}
String statusKey = result.getStatus().toString();
statusCounts.put(statusKey, statusCounts.getOrDefault(statusKey, 0) + 1);
}
}
run.setStatusCounts(statusCounts);
int pendingCount = statusCounts.getOrDefault("PENDING", 0);
if (pendingCount > 0) {
completeRunWithStatus(run, EvaluationRun.RunStatus.PENDING);
log.info("Run {} set to PENDING status due to {} pending manual evaluations", run.getId(), pendingCount);
} else if (statusCounts.getOrDefault("FAILED", 0) > 0 || statusCounts.getOrDefault("ERROR", 0) > 0) {
completeRunWithStatus(run, EvaluationRun.RunStatus.FAILED);
log.info("Run {} completed with FAILED status due to failed tests", run.getId());
} else {
completeRunWithStatus(run, EvaluationRun.RunStatus.COMPLETED);
log.info("Run {} completed with COMPLETED status, all tests passed", run.getId());
}
} catch (Exception e) {
log.error("Error executing evaluation run: {}", e.getMessage(), e);
completeRunWithStatus(run, EvaluationRun.RunStatus.FAILED);
}
return run;
});
}
private void completeRunWithStatus(EvaluationRun run, EvaluationRun.RunStatus status) {
run.setStatus(status);
run.setCompletedAt(System.currentTimeMillis());
runRepository.save(run);
}
/**
* Process a test set item with alternative prompt and track execution metrics
*/
private ProcessingResult processWithAlternativePrompt(TestSetItem item, EvaluationRun run) {
ProcessingResult result = new ProcessingResult();
long startTime = System.currentTimeMillis();
try {
log.info("Processing test item with ID: {} for run: {}", item.getId(), run.getId());
MessageTask messageTask = new MessageTask();
messageTask.setMessageId(UUID.randomUUID().toString());
log.debug("Created message task with ID: {}", messageTask.getMessageId());
String prompt;
if (run.getAlternativePromptTemplate() != null) {
prompt = run.getAlternativePromptTemplate();
log.debug("Using alternative prompt template: {}", prompt);
} else {
prompt = item.getMessage();
log.debug("Using original message from test item: {}", prompt);
}
messageTask.setMessage(prompt);
result.setOriginalPrompt(prompt);
if (run.getAlternativePromptId() != null) {
log.debug("Setting alternative prompt ID: {}", run.getAlternativePromptId());
if (messageTask.getPromptIds() == null) {
messageTask.setPromptIds(new ArrayList<>());
}
messageTask.getPromptIds().add(run.getAlternativePromptId());
}
Map<String, Object> variables = item.getVariablesAsObjectMap();
messageTask.setVariables(variables);
result.setPromptVariables(variables);
log.debug("Set {} variables from test item", variables != null ? variables.size() : 0);
messageTask.setSystemMessage(item.getSystemMessage());
Language language = Optional.ofNullable(item.getLanguage()).orElse(Language.GENERAL);
messageTask.setLanguage(language);
log.debug("Using language: {}", language);
messageTask.setWorkflow(item.getWorkflowType());
log.debug("Set workflow: {}", item.getWorkflow());
messageTask.setJsonRequest(item.isJsonRequest());
messageTask.setJsonResponse(item.isJsonResponse());
messageTask.setPurpose(QA_TEST_PIPELINE_PURPOSE);
if (item.getLogprobs() != null) {
messageTask.setLogprobs(item.getLogprobs() > 0);
}
if (item.getTopLogprobs() != null) {
messageTask.setTopLogprobs(item.getTopLogprobs());
}
if (run.getTemperature() != null) {
messageTask.setTemperature(run.getTemperature());
log.debug("Using temperature from run: {}", run.getTemperature());
} else if (item.getTemperature() != null) {
messageTask.setTemperature(item.getTemperature());
log.debug("Using temperature from test item: {}", item.getTemperature());
}
if (messageTask.getWorkflow() == null && messageTask.getModelId() == null) {
String modelId;
if (run.getModelId() != null) {
modelId = run.getModelId();
log.debug("Using model ID from run: {}", modelId);
} else {
modelId = item.getModel();
log.debug("Using model ID from test item: {}", modelId);
}
messageTask.setModelId(modelId);
}
log.info("Sending request to AI service with message task: {}", messageTask.getMessageId());
MessageTask response = aiService.chat(messageTask);
if (response == null) {
throw new IllegalStateException("AI service returned null response");
}
String responseText = response.getResult();
log.info("Received response from AI service: {}", responseText != null ? "success" : "null");
if (responseText == null || responseText.trim().isEmpty()) {
log.warn("Empty response received from AI service for message task: {}", messageTask.getMessageId());
result.setSuccess(false);
result.setModelResult("ERROR: Empty response from model");
result.setErrorDetails("The model returned an empty response. Check model configuration and permissions.");
return result;
}
result.setModelResult(responseText);
result.setSuccess(true);
return result;
} catch (Exception e) {
log.error("Error processing with alternative prompt for test item {}: {}",
item.getId(), e.getMessage(), e);
result.setSuccess(false);
result.setModelResult("ERROR: " + e.getMessage());
result.setErrorDetails(e.getMessage() + "\n" + getStackTraceAsString(e));
return result;
} finally {
long endTime = System.currentTimeMillis();
long duration = endTime - startTime;
result.setProcessingTimeMs(duration);
log.info("Finished processing test item in {}ms", duration);
}
}
/**
* Process an image test set item
*/
private ProcessingResult processImageTask(TestSetItem item, EvaluationRun run) {
ProcessingResult result = new ProcessingResult();
long startTime = System.currentTimeMillis();
try {
log.info("Processing image test item with ID: {} for run: {}", item.getId(), run.getId());
if (StringUtils.isBlank(item.getOriginalImageTaskId())) {
throw new IllegalArgumentException("No original image task ID found for test item: " + item.getId());
}
String imageTaskId = item.getOriginalImageTaskId();
Optional<ImageMessageTaskEntity> originalImageTask = imageTaskRepository.findById(imageTaskId);
if (originalImageTask.isEmpty()) {
throw new IllegalArgumentException("Original image task not found: " + imageTaskId);
}
ImageMessageTask imageTask = originalImageTask.get();
result.setOriginalPrompt(imageTask.getMessage());
if (run.isRegenerateImages()) {
log.info("Regenerating image for test item {} (run configuration has regenerateImages=true)", item.getId());
MessageTask messageTask = new MessageTask();
messageTask.setMessageId(UUID.randomUUID().toString());
messageTask.setChatId(imageTask.getChatId());
String alternativePromptTemplate = run.getAlternativePromptTemplate();
if (StringUtils.isNotBlank(alternativePromptTemplate)) {
messageTask.setMessage(PromptUtils.applyVariables(alternativePromptTemplate, item.getVariablesAsObjectMap()));
imageTask.setMessage(messageTask.getMessage());
} else {
messageTask.setPromptIds(imageTask.getPromptIds());
}
messageTask.setSystemMessage(imageTask.getSystemMessage());
messageTask.setPurpose(QA_TEST_PIPELINE_PURPOSE);
if (StringUtils.isNotBlank(run.getWorkflow())) {
messageTask.setWorkflow(run.getWorkflow());
} else {
messageTask.setWorkflow(imageTask.getWorkflow());
}
int numImages = imageTask.getImages() != null ? imageTask.getImages().size() : 1;
ImageMessageTask newImageTask = imageModelService.addTaskSync(
messageTask,
imageTask.getMessage(),
numImages
);
result.setSuccess(true);
result.setModelResult(String.format("Regenerated %d image(s)",
newImageTask.getImages() != null ? newImageTask.getImages().size() : 0));
result.setAdditionalContext(newImageTask);
} else {
log.info("Using existing image for test item {}", item.getId());
result.setSuccess(true);
result.setModelResult(String.format("Using existing image(s) (%d)",
imageTask.getImages() != null ? imageTask.getImages().size() : 0));
result.setAdditionalContext(imageTask);
}
return result;
} catch (Exception e) {
log.error("Error processing image task for test item {}: {}",
item.getId(), e.getMessage(), e);
result.setSuccess(false);
result.setModelResult("ERROR: " + e.getMessage());
result.setErrorDetails(e.getMessage() + "\n" + getStackTraceAsString(e));
return result;
} finally {
long endTime = System.currentTimeMillis();
long duration = endTime - startTime;
result.setProcessingTimeMs(duration);
log.info("Finished processing image test item in {}ms", duration);
}
}
/**
* Helper class to store processing result and metadata
*/
@Data
private static class ProcessingResult {
private boolean success;
private String modelResult;
private String originalPrompt;
private Object promptVariables;
private Long processingTimeMs;
private String errorDetails;
private Object additionalContext;
}
/**
* Helper method to convert stack trace to string
*/
private String getStackTraceAsString(Exception e) {
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
e.printStackTrace(pw);
return sw.toString();
}
/**
* Evaluate a result against an evaluation
*/
private EvaluationResult evaluateResult(TestSetItem item, String actualResult, Evaluation evaluation, String runId) {
return evaluateResult(item, actualResult, evaluation, runId, null);
}
private EvaluationResult evaluateResult(TestSetItem item, String actualResult, Evaluation evaluation, String runId, ProcessingResult processingResult) {
try {
EvaluationContext.EvaluationContextBuilder contextBuilder = EvaluationContext.builder()
.testSetItem(item)
.originalResult(item.getResult())
.actualResult(actualResult)
.aiService(aiService);
if (processingResult != null && processingResult.getAdditionalContext() != null) {
contextBuilder.additionalContext(processingResult.getAdditionalContext());
}
EvaluationContext context = contextBuilder.build();
boolean isImageEvaluation = item.isImageTask() && evaluation.getConfig() instanceof ImageEvalConfig;
EvaluationResult.EvaluationOutput output = evaluation.getConfig().evaluate(context);
EvaluationResult.EvaluationResultBuilder resultBuilder = EvaluationResult.builder()
.evaluationId(evaluation.getId())
.testSetItemId(item.getId())
.runId(runId)
.status(output.getStatus())
.message(output.getMessage())
.details(output.getDetails())
.createdAt(System.currentTimeMillis());
if (processingResult != null) {
resultBuilder
.originalPrompt(processingResult.getOriginalPrompt())
.modelResult(processingResult.getModelResult())
.promptVariables(processingResult.getPromptVariables())
.processingTimeMs(processingResult.getProcessingTimeMs())
.errorDetails(processingResult.getErrorDetails());
if (processingResult.getAdditionalContext() instanceof ImageMessageTask) {
ImageMessageTask imageTask = (ImageMessageTask) processingResult.getAdditionalContext();
Map<String, Object> details = new HashMap<>();
details.put("imageTaskId", imageTask.getMessageId());
if (output.getDetails() != null) {
if (output.getDetails() instanceof Map) {
((Map) details).putAll((Map) output.getDetails());
}
}
resultBuilder.details(details);
}
} else if (output.getOriginalPrompt() != null) {
resultBuilder
.originalPrompt(output.getOriginalPrompt())
.modelResult(output.getModelResult())
.promptVariables(output.getPromptVariables())
.processingTimeMs(output.getProcessingTimeMs())
.errorDetails(output.getErrorDetails());
}
return resultRepository.save(resultBuilder.build());
} catch (Exception e) {
log.error("Error evaluating result: {}", e.getMessage(), e);
EvaluationResult.EvaluationResultBuilder resultBuilder = EvaluationResult.builder()
.evaluationId(evaluation.getId())
.testSetItemId(item.getId())
.runId(runId)
.status(EvaluationResult.EvaluationStatus.ERROR)
.message("Evaluation error: " + e.getMessage())
.errorDetails(getStackTraceAsString(e))
.createdAt(System.currentTimeMillis());
if (processingResult != null) {
resultBuilder
.originalPrompt(processingResult.getOriginalPrompt())
.modelResult(processingResult.getModelResult())
.promptVariables(processingResult.getPromptVariables())
.processingTimeMs(processingResult.getProcessingTimeMs());
}
return resultRepository.save(resultBuilder.build());
}
}
/**
* Update the status of a manual evaluation result
*/
public EvaluationResult updateManualEvaluationStatus(String resultId, boolean passed, String feedback) {
Optional<EvaluationResult> optionalResult = resultRepository.findById(resultId);
if (optionalResult.isEmpty()) {
throw new IllegalArgumentException("Evaluation result not found: " + resultId);
}
EvaluationResult result = optionalResult.get();
if (result.getStatus() != EvaluationResult.EvaluationStatus.PENDING) {
throw new IllegalArgumentException("Cannot update status for non-manual evaluation or evaluation that has already been reviewed");
}
result.setStatus(passed ?
EvaluationResult.EvaluationStatus.PASSED :
EvaluationResult.EvaluationStatus.FAILED);
result.setMessage(feedback);
EvaluationResult savedResult = resultRepository.save(result);
checkAndUpdateRunStatus(result.getRunId());
return savedResult;
}
/**
* Check if all manual evaluations in a run have been completed, and update run status if needed
*/
private void checkAndUpdateRunStatus(String runId) {
Optional<EvaluationRun> optionalRun = runRepository.findById(runId);
if (optionalRun.isEmpty()) {
log.warn("Couldn't find run with ID {} when checking manual evaluations", runId);
return;
}
EvaluationRun run = optionalRun.get();
if (run.getStatus() != EvaluationRun.RunStatus.PENDING) {
return;
}
List<EvaluationResult> allResults = resultRepository.findByRunId(runId);
long pendingCount = allResults.stream()
.filter(r -> r.getStatus() == EvaluationResult.EvaluationStatus.PENDING)
.count();
if (pendingCount == 0) {
log.info("All manual evaluations completed for run {}, updating status", runId);
long failedCount = allResults.stream()
.filter(r -> r.getStatus() == EvaluationResult.EvaluationStatus.FAILED
|| r.getStatus() == EvaluationResult.EvaluationStatus.ERROR)
.count();
if (failedCount > 0) {
completeRunWithStatus(run, EvaluationRun.RunStatus.FAILED);
log.info("Run {} completed with FAILED status after manual review", runId);
} else {
completeRunWithStatus(run, EvaluationRun.RunStatus.COMPLETED);
log.info("Run {} completed with COMPLETED status after manual review", runId);
}
Map<String, Integer> statusCounts = new HashMap<>();
for (EvaluationResult result : allResults) {
String statusKey = result.getStatus().toString();
statusCounts.put(statusKey, statusCounts.getOrDefault(statusKey, 0) + 1);
}
run.setStatusCounts(statusCounts);
runRepository.save(run);
}
}
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/context/spring/testsuite/service/TestSetService.java
|
package ai.driftkit.context.spring.testsuite.service;
import ai.driftkit.common.domain.ImageMessageTask;
import ai.driftkit.common.domain.MessageTask;
import ai.driftkit.context.core.util.PromptUtils;
import ai.driftkit.context.spring.testsuite.domain.TestSet;
import ai.driftkit.context.spring.testsuite.domain.TestSetItem;
import ai.driftkit.context.spring.testsuite.domain.archive.TestSetItemImpl;
import ai.driftkit.context.spring.testsuite.repository.TestSetItemRepository;
import ai.driftkit.context.spring.testsuite.repository.TestSetRepository;
import ai.driftkit.workflows.spring.domain.ImageMessageTaskEntity;
import ai.driftkit.workflows.spring.domain.MessageTaskEntity;
import ai.driftkit.workflows.spring.domain.ModelRequestTrace;
import ai.driftkit.workflows.spring.repository.ImageTaskRepository;
import ai.driftkit.workflows.spring.repository.MessageTaskRepositoryV1;
import ai.driftkit.workflows.spring.repository.ModelRequestTraceRepository;
import lombok.Data;
import lombok.RequiredArgsConstructor;
import org.springframework.stereotype.Service;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
@Service
@RequiredArgsConstructor
public class TestSetService {
private final TestSetRepository testSetRepository;
private final TestSetItemRepository testSetItemRepository;
private final MessageTaskRepositoryV1 messageTaskRepository;
private final ModelRequestTraceRepository modelRequestTraceRepository;
private final ImageTaskRepository imageTaskRepository;
public List<TestSet> getAllTestSets() {
return testSetRepository.findAllByOrderByCreatedAtDesc();
}
public List<TestSet> getTestSetsByFolder(String folderId) {
if (folderId == null) {
return testSetRepository.findByFolderIdIsNullOrderByCreatedAtDesc();
}
return testSetRepository.findByFolderIdOrderByCreatedAtDesc(folderId);
}
public Optional<TestSet> getTestSetById(String id) {
return testSetRepository.findById(id);
}
public List<TestSetItem> getTestSetItems(String testSetId) {
return testSetItemRepository.findByTestSetIdOrderByCreatedAtDesc(testSetId).stream()
.map(TestSetItem.class::cast)
.toList();
}
public TestSet createTestSet(TestSet testSet) {
testSet.setId(null);
testSet.setCreatedAt(System.currentTimeMillis());
testSet.setUpdatedAt(System.currentTimeMillis());
return testSetRepository.save(testSet);
}
public Optional<TestSet> updateTestSet(String id, TestSet testSet) {
if (!testSetRepository.existsById(id)) {
return Optional.empty();
}
testSet.setId(id);
testSet.setUpdatedAt(System.currentTimeMillis());
return Optional.of(testSetRepository.save(testSet));
}
public boolean deleteTestSet(String id) {
if (!testSetRepository.existsById(id)) {
return false;
}
testSetItemRepository.deleteByTestSetId(id);
testSetRepository.deleteById(id);
return true;
}
public AddItemsResult addItemsToTestSet(
List<String> messageTaskIds,
List<TraceStep> traceSteps,
List<String> imageTaskIds,
String testSetId) {
Optional<TestSet> testSetOpt = testSetRepository.findById(testSetId).map(TestSet.class::cast);
if (testSetOpt.isEmpty()) {
return new AddItemsResult(Status.TEST_SET_NOT_FOUND, null, 0);
}
TestSet testSet = testSetOpt.get();
List<TestSetItemImpl> testSetItems = new ArrayList<>();
// Process message tasks if provided
if (messageTaskIds != null && !messageTaskIds.isEmpty()) {
List<MessageTask> messageTasks = messageTaskRepository.findAllById(messageTaskIds)
.stream()
.map(MessageTaskEntity::toMessageTask)
.collect(java.util.stream.Collectors.toList());
for (MessageTask messageTask : messageTasks) {
boolean isImageMessage = messageTask.getMessage() != null &&
messageTask.getMessage().toLowerCase().startsWith("image:");
boolean hasImageTaskId = messageTask.getImageTaskId() != null;
TestSetItemImpl.TestSetItemImplBuilder builder = TestSetItemImpl.builder()
.testSetId(testSet.getId())
.originalMessageTaskId(messageTask.getMessageId())
.message(messageTask.getMessage())
.result(messageTask.getResult())
.variables(messageTask.getVariables() != null ?
PromptUtils.convertVariables(messageTask.getVariables()) : null
)
.model(messageTask.getModelId())
.temperature(messageTask.getTemperature())
.workflowType(messageTask.getWorkflow())
.promptId(messageTask.getPromptIds() != null && !messageTask.getPromptIds().isEmpty() ?
messageTask.getPromptIds().get(0) : null)
.isImageTask(isImageMessage || hasImageTaskId)
.createdAt(System.currentTimeMillis());
if (hasImageTaskId) {
builder.originalImageTaskId(messageTask.getImageTaskId());
}
TestSetItemImpl testSetItem = builder.build();
testSetItems.add(testSetItem);
}
}
// Process image tasks if provided
if (imageTaskIds != null && !imageTaskIds.isEmpty()) {
List<ImageMessageTaskEntity> imageTasks = imageTaskRepository.findAllById(imageTaskIds);
for (ImageMessageTask imageTask : imageTasks) {
TestSetItemImpl testSetItem = TestSetItemImpl.builder()
.testSetId(testSet.getId())
.originalImageTaskId(imageTask.getMessageId())
.message(imageTask.getMessage())
.result(String.format("Generated %d image(s)",
imageTask.getImages() != null ? imageTask.getImages().size() : 0))
.variables(imageTask.getVariables() != null ?
PromptUtils.convertVariables(imageTask.getVariables()) : null
)
.model(null)
.workflowType(imageTask.getWorkflow())
.isImageTask(true)
.createdAt(System.currentTimeMillis())
.build();
testSetItems.add(testSetItem);
}
}
// Process trace steps if provided
if (traceSteps != null && !traceSteps.isEmpty()) {
for (TraceStep step : traceSteps) {
Optional<ModelRequestTrace> traceOpt = modelRequestTraceRepository.findById(step.getTraceId());
if (traceOpt.isPresent()) {
ModelRequestTrace trace = traceOpt.get();
boolean isImageGeneration = trace.getRequestType() == ModelRequestTrace.RequestType.TEXT_TO_IMAGE;
boolean isImageMessage = trace.getPromptTemplate() != null &&
trace.getPromptTemplate().toLowerCase().startsWith("image:");
boolean isImageTaskContext = trace.getContextType() == ModelRequestTrace.ContextType.IMAGE_TASK;
boolean isImageTask = isImageGeneration || isImageMessage || isImageTaskContext;
TestSetItemImpl.TestSetItemImplBuilder builder = TestSetItemImpl.builder()
.testSetId(testSet.getId())
.originalTraceId(trace.getId())
.message(trace.getPromptTemplate())
.result(trace.getResponse())
.variables(trace.getVariables())
.model(trace.getModelId())
.promptId(trace.getPromptId())
.workflowType(trace.getWorkflowInfo() != null ? trace.getWorkflowInfo().getWorkflowType() : null)
.isImageTask(isImageTask)
.createdAt(System.currentTimeMillis());
if (isImageTask && trace.getContextId() != null) {
builder.originalImageTaskId(trace.getContextId());
}
TestSetItemImpl testSetItem = builder.build();
testSetItems.add(testSetItem);
}
}
}
if (testSetItems.isEmpty()) {
return new AddItemsResult(Status.NO_ITEMS_FOUND, testSet, 0);
}
testSetItemRepository.saveAll(testSetItems);
return new AddItemsResult(Status.SUCCESS, testSet, testSetItems.size());
}
public boolean deleteTestSetItem(String testSetId, String itemId) {
Optional<TestSetItemImpl> itemOpt = testSetItemRepository.findById(itemId).map(TestSetItemImpl.class::cast);
if (itemOpt.isEmpty() || !itemOpt.get().getTestSetId().equals(testSetId)) {
return false;
}
testSetItemRepository.deleteById(itemId);
return true;
}
public boolean moveTestSetsToFolder(List<String> testSetIds, String folderId) {
if (testSetIds == null || testSetIds.isEmpty()) {
return false;
}
List<TestSet> testSets = testSetRepository.findAllById(testSetIds).stream()
.map(TestSet.class::cast)
.toList();
if (testSets.isEmpty()) {
return false;
}
for (TestSet testSet : testSets) {
testSet.setFolderId(folderId);
testSet.setUpdatedAt(System.currentTimeMillis());
}
testSetRepository.saveAll(testSets);
return true;
}
@Data
public static class TraceStep {
private String traceId;
}
public enum Status {
SUCCESS,
TEST_SET_NOT_FOUND,
NO_MESSAGE_TASKS_FOUND,
NO_ITEMS_FOUND
}
@Data
public static class AddItemsResult {
private final Status status;
private final TestSet testSet;
private final int itemsAdded;
private final String message;
public AddItemsResult(Status status, TestSet testSet, int itemsAdded) {
this.status = status;
this.testSet = testSet;
this.itemsAdded = itemsAdded;
this.message = switch (status) {
case SUCCESS -> "Added " + itemsAdded + " items to TestSet";
case TEST_SET_NOT_FOUND -> "TestSet not found";
case NO_MESSAGE_TASKS_FOUND -> "No valid message tasks found";
case NO_ITEMS_FOUND -> "No valid items found";
};
}
}
}
|
0
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/contextengineering
|
java-sources/ai/driftkit/driftkit-context-engineering-spring-boot-starter/0.8.1/ai/driftkit/contextengineering/autoconfigure/WebMvcConfiguration.java
|
package ai.driftkit.contextengineering.autoconfigure;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.web.servlet.config.annotation.ResourceHandlerRegistry;
import org.springframework.web.servlet.config.annotation.WebMvcConfigurer;
@Configuration
public class WebMvcConfiguration {
@Bean
public WebMvcConfigurer webMvcConfigurer() {
return new WebMvcConfigurer() {
@Override
public void addResourceHandlers(ResourceHandlerRegistry registry) {
// Explicitly configure static resource handling for frontend
registry.addResourceHandler("/prompt-engineering/**")
.addResourceLocations("classpath:/static/prompt-engineering/")
.setCachePeriod(3600);
}
};
}
}
|
0
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core/cohere/CohereApiClient.java
|
package ai.driftkit.embedding.core.cohere;// CohereApiClient.java
import feign.Headers;
import feign.RequestLine;
public interface CohereApiClient {
@RequestLine("POST /embed")
@Headers("Content-Type: application/json")
CohereEmbeddingResponse getEmbeddings(CohereEmbeddingRequest request);
}
|
0
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core/cohere/CohereAuthInterceptor.java
|
package ai.driftkit.embedding.core.cohere;// CohereAuthInterceptor.java
import feign.RequestInterceptor;
import feign.RequestTemplate;
public class CohereAuthInterceptor implements RequestInterceptor {
private final String apiKey;
public CohereAuthInterceptor(String apiKey) {
this.apiKey = apiKey;
}
@Override
public void apply(RequestTemplate template) {
template.header("Authorization", "Bearer " + apiKey);
template.header("Content-Type", "application/json");
}
}
|
0
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core/cohere/CohereEmbeddingModel.java
|
package ai.driftkit.embedding.core.cohere;
import ai.driftkit.config.EtlConfig.EmbeddingServiceConfig;
import ai.driftkit.embedding.core.domain.Embedding;
import ai.driftkit.embedding.core.domain.TextSegment;
import ai.driftkit.embedding.core.local.AIOnnxBertBiEncoder;
import ai.driftkit.embedding.core.service.EmbeddingModel;
import ai.driftkit.embedding.core.domain.Response;
import ai.driftkit.embedding.core.domain.TokenUsage;
import java.util.ArrayList;
import java.util.List;
public class CohereEmbeddingModel implements EmbeddingModel {
private final CohereApiClient apiClient;
public CohereEmbeddingModel(CohereApiClient apiClient) {
this.apiClient = apiClient;
}
@Override
public boolean supportsName(String name) {
return "cohere".equals(name);
}
@Override
public AIOnnxBertBiEncoder model() {
throw new UnsupportedOperationException("Not supported in CohereEmbeddingModel");
}
@Override
public void configure(EmbeddingServiceConfig config) {
}
@Override
public Response<List<Embedding>> embedAll(List<TextSegment> segments) {
List<String> texts = new ArrayList<>();
for (TextSegment segment : segments) {
texts.add(segment.text());
}
CohereEmbeddingRequest request = new CohereEmbeddingRequest();
request.setTexts(texts);
CohereEmbeddingResponse response = apiClient.getEmbeddings(request);
List<Embedding> embeddings = new ArrayList<>();
for (List<Double> embeddingValues : response.getEmbeddings()) {
double[] embeddingArray = embeddingValues.stream().mapToDouble(Double::doubleValue).toArray();
embeddings.add(Embedding.from(embeddingArray));
}
// Cohere API may not provide token usage; set to zero or estimate if possible
return Response.from(embeddings, new TokenUsage(0));
}
@Override
public int estimateTokenCount(String text) {
// Implement token estimation logic or return an approximate value
return text.length() / 5; // Approximate estimation
}
public Response<Embedding> embed(TextSegment segment) {
List<TextSegment> segments = new ArrayList<>();
segments.add(segment);
Response<List<Embedding>> responseList = embedAll(segments);
Embedding embedding = responseList.content().get(0);
TokenUsage tokenUsage = responseList.tokenUsage();
return Response.from(embedding, tokenUsage);
}
}
|
0
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core/cohere/CohereEmbeddingModelUsage.java
|
package ai.driftkit.embedding.core.cohere;// CohereEmbeddingModelUsage.java
import ai.driftkit.embedding.core.domain.Embedding;
import ai.driftkit.embedding.core.domain.TextSegment;
import ai.driftkit.embedding.core.domain.Response;
import feign.Feign;
import feign.jackson.JacksonDecoder;
import feign.jackson.JacksonEncoder;
public class CohereEmbeddingModelUsage {
public static void main(String[] args) {
String apiKey = "YOUR_COHERE_API_KEY";
CohereApiClient apiClient = Feign.builder()
.encoder(new JacksonEncoder())
.decoder(new JacksonDecoder())
.requestInterceptor(new CohereAuthInterceptor(apiKey))
.target(CohereApiClient.class, "https://api.cohere.ai");
CohereEmbeddingModel embeddingModel = new CohereEmbeddingModel(apiClient);
TextSegment segment = TextSegment.from("Your text here");
Response<Embedding> response = embeddingModel.embed(segment);
Embedding embedding = response.content();
System.out.println("Embedding Vector: " + embedding);
}
}
|
0
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core/cohere/CohereEmbeddingRequest.java
|
package ai.driftkit.embedding.core.cohere;// CohereEmbeddingRequest.java
import lombok.Data;
import java.util.List;
@Data
public class CohereEmbeddingRequest {
private List<String> texts;
}
|
0
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core/cohere/CohereEmbeddingResponse.java
|
package ai.driftkit.embedding.core.cohere;// CohereEmbeddingResponse.java
import lombok.Data;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.List;
@Data
public class CohereEmbeddingResponse {
private List<List<Double>> embeddings;
private String model;
private String apiVersion;
}
|
0
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core/domain/Embedding.java
|
package ai.driftkit.embedding.core.domain;
import lombok.AllArgsConstructor;
import lombok.Data;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
@Data
@AllArgsConstructor
public class Embedding {
private final float[] vector;
public static Embedding from(double[] embeddingArray) {
return new Embedding(toFloatArray(embeddingArray));
}
public float[] vector() {
return vector;
}
public List<Float> vectorAsList() {
List<Float> list = new ArrayList<>(vector.length);
for (float f : vector) {
list.add(f);
}
return list;
}
/**
* Normalize vector
*/
public void normalize() {
double norm = 0.0;
for (float f : vector) {
norm += f * f;
}
norm = Math.sqrt(norm);
for (int i = 0; i < vector.length; i++) {
vector[i] /= norm;
}
}
public int dimension() {
return vector.length;
}
public static Embedding from(float[] vector) {
return new Embedding(vector);
}
public static Embedding from(List<Float> vector) {
float[] array = new float[vector.size()];
for (int i = 0; i < vector.size(); i++) {
array[i] = vector.get(i);
}
return new Embedding(array);
}
public static float[] toFloatArray(double[] src) {
float[] dst = new float[src.length];
for (int i = 0; i < src.length; i++) {
dst[i] = (float) src[i];
}
return dst;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core/domain/EmbeddingData.java
|
package ai.driftkit.embedding.core.domain;// EmbeddingData.java
import lombok.Data;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.List;
@Data
public class EmbeddingData {
private String object;
private int index;
private float[] embedding;
}
|
0
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core/domain/EmbeddingRequest.java
|
package ai.driftkit.embedding.core.domain;// EmbeddingRequest.java
import lombok.Data;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.List;
@Data
public class EmbeddingRequest {
private String model;
private List<String> input;
}
|
0
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core/domain/EmbeddingResponse.java
|
package ai.driftkit.embedding.core.domain;// EmbeddingResponse.java
import lombok.Data;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.List;
@Data
public class EmbeddingResponse {
private String object;
private List<EmbeddingData> data;
private Usage usage;
}
|
0
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core/domain/Metadata.java
|
package ai.driftkit.embedding.core.domain;
import java.util.*;
import static ai.driftkit.embedding.core.util.DriftKitExceptions.illegalArgument;
import static ai.driftkit.embedding.core.util.DriftKitExceptions.runtime;
import static ai.driftkit.embedding.core.util.ValidationUtils.ensureNotBlank;
import static ai.driftkit.embedding.core.util.ValidationUtils.ensureNotNull;
public class Metadata {
private static final Set<Class<?>> SUPPORTED_VALUE_TYPES = new LinkedHashSet<>();
static {
SUPPORTED_VALUE_TYPES.add(String.class);
SUPPORTED_VALUE_TYPES.add(UUID.class);
SUPPORTED_VALUE_TYPES.add(int.class);
SUPPORTED_VALUE_TYPES.add(Integer.class);
SUPPORTED_VALUE_TYPES.add(long.class);
SUPPORTED_VALUE_TYPES.add(Long.class);
SUPPORTED_VALUE_TYPES.add(float.class);
SUPPORTED_VALUE_TYPES.add(Float.class);
SUPPORTED_VALUE_TYPES.add(double.class);
SUPPORTED_VALUE_TYPES.add(Double.class);
}
private final Map<String, Object> metadata;
/**
* Construct a Metadata object with an empty map of key-value pairs.
*/
public Metadata() {
this.metadata = new HashMap<>();
}
/**
* Constructs a Metadata object from a map of key-value pairs.
*
* @param metadata the map of key-value pairs; must not be {@code null}. {@code null} values are not permitted.
* Supported value types: {@link String}, {@link Integer}, {@link Long}, {@link Float}, {@link Double}
*/
public Metadata(Map<String, ?> metadata) {
ensureNotNull(metadata, "metadata").forEach((key, value) -> {
validate(key, value);
if (!SUPPORTED_VALUE_TYPES.contains(value.getClass())) {
throw illegalArgument("The metadata key '%s' has the value '%s', which is of the unsupported type '%s'. " +
"Currently, the supported types are: %s",
key, value, value.getClass().getName(), SUPPORTED_VALUE_TYPES
);
}
});
this.metadata = new HashMap<>(metadata);
}
private static void validate(String key, Object value) {
ensureNotBlank(key, "The metadata key with the value '" + value + "'");
ensureNotNull(value, "The metadata value for the key '" + key + "'");
}
/**
* Returns the value associated with the given key.
*
* @param key the key
* @return the value associated with the given key, or {@code null} if the key is not present.
* @deprecated as of 0.31.0, use {@link #getString(String)}, {@link #getInteger(String)}, {@link #getLong(String)},
* {@link #getFloat(String)}, {@link #getDouble(String)} instead.
*/
@Deprecated
public String get(String key) {
Object value = metadata.get(key);
if (value != null) {
return value.toString();
} else {
return null;
}
}
/**
* Returns the {@code String} value associated with the given key.
*
* @param key the key
* @return the {@code String} value associated with the given key, or {@code null} if the key is not present.
* @throws RuntimeException if the value is not of type String
*/
public String getString(String key) {
if (!containsKey(key)) {
return null;
}
Object value = metadata.get(key);
if (value instanceof String) {
return (String) value;
}
throw runtime("Metadata entry with the key '%s' has a value of '%s' and type '%s'. " +
"It cannot be returned as a String.", key, value, value.getClass().getName());
}
/**
* Returns the {@code UUID} value associated with the given key.
*
* @param key the key
* @return the {@code UUID} value associated with the given key, or {@code null} if the key is not present.
* @throws RuntimeException if the value is not of type String
*/
public UUID getUUID(String key) {
if (!containsKey(key)) {
return null;
}
Object value = metadata.get(key);
if (value instanceof UUID) {
return (UUID) value;
}
if (value instanceof String) {
return UUID.fromString((String)value);
}
throw runtime("Metadata entry with the key '%s' has a value of '%s' and type '%s'. " +
"It cannot be returned as a UUID.", key, value, value.getClass().getName());
}
/**
* Returns the {@code Integer} value associated with the given key.
* <br>
* Some embedding store implementations (still) store {@code Metadata} values as {@code String}s.
* In this case, the {@code String} value will be parsed into an {@code Integer} when this method is called.
* <br>
* Some embedding store implementations store {@code Metadata} key-value pairs as JSON.
* In this case, type information is lost when serializing to JSON and then deserializing back from JSON.
* JSON libraries can, for example, serialize an {@code Integer} and then deserialize it as a {@code Long}.
* Or serialize a {@code Float} and then deserialize it as a {@code Double}, and so on.
* In such cases, the actual value will be cast to an {@code Integer} when this method is called.
*
* @param key the key
* @return the {@link Integer} value associated with the given key, or {@code null} if the key is not present.
* @throws RuntimeException if the value is not {@link Number}
*/
public Integer getInteger(String key) {
if (!containsKey(key)) {
return null;
}
Object value = metadata.get(key);
if (value instanceof String) {
return Integer.parseInt(value.toString());
} else if (value instanceof Number) {
return ((Number) value).intValue();
}
throw runtime("Metadata entry with the key '%s' has a value of '%s' and type '%s'. " +
"It cannot be returned as an Integer.", key, value, value.getClass().getName());
}
/**
* Returns the {@code Long} value associated with the given key.
* <br>
* Some embedding store implementations (still) store {@code Metadata} values as {@code String}s.
* In this case, the {@code String} value will be parsed into an {@code Long} when this method is called.
* <br>
* Some embedding store implementations store {@code Metadata} key-value pairs as JSON.
* In this case, type information is lost when serializing to JSON and then deserializing back from JSON.
* JSON libraries can, for example, serialize an {@code Integer} and then deserialize it as a {@code Long}.
* Or serialize a {@code Float} and then deserialize it as a {@code Double}, and so on.
* In such cases, the actual value will be cast to a {@code Long} when this method is called.
*
* @param key the key
* @return the {@code Long} value associated with the given key, or {@code null} if the key is not present.
* @throws RuntimeException if the value is not {@link Number}
*/
public Long getLong(String key) {
if (!containsKey(key)) {
return null;
}
Object value = metadata.get(key);
if (value instanceof String) {
return Long.parseLong(value.toString());
} else if (value instanceof Number) {
return ((Number) value).longValue();
}
throw runtime("Metadata entry with the key '%s' has a value of '%s' and type '%s'. " +
"It cannot be returned as a Long.", key, value, value.getClass().getName());
}
/**
* Returns the {@code Float} value associated with the given key.
* <br>
* Some embedding store implementations (still) store {@code Metadata} values as {@code String}s.
* In this case, the {@code String} value will be parsed into a {@code Float} when this method is called.
* <br>
* Some embedding store implementations store {@code Metadata} key-value pairs as JSON.
* In this case, type information is lost when serializing to JSON and then deserializing back from JSON.
* JSON libraries can, for example, serialize an {@code Integer} and then deserialize it as a {@code Long}.
* Or serialize a {@code Float} and then deserialize it as a {@code Double}, and so on.
* In such cases, the actual value will be cast to a {@code Float} when this method is called.
*
* @param key the key
* @return the {@code Float} value associated with the given key, or {@code null} if the key is not present.
* @throws RuntimeException if the value is not {@link Number}
*/
public Float getFloat(String key) {
if (!containsKey(key)) {
return null;
}
Object value = metadata.get(key);
if (value instanceof String) {
return Float.parseFloat(value.toString());
} else if (value instanceof Number) {
return ((Number) value).floatValue();
}
throw runtime("Metadata entry with the key '%s' has a value of '%s' and type '%s'. " +
"It cannot be returned as a Float.", key, value, value.getClass().getName());
}
/**
* Returns the {@code Double} value associated with the given key.
* <br>
* Some embedding store implementations (still) store {@code Metadata} values as {@code String}s.
* In this case, the {@code String} value will be parsed into a {@code Double} when this method is called.
* <br>
* Some embedding store implementations store {@code Metadata} key-value pairs as JSON.
* In this case, type information is lost when serializing to JSON and then deserializing back from JSON.
* JSON libraries can, for example, serialize an {@code Integer} and then deserialize it as a {@code Long}.
* Or serialize a {@code Float} and then deserialize it as a {@code Double}, and so on.
* In such cases, the actual value will be cast to a {@code Double} when this method is called.
*
* @param key the key
* @return the {@code Double} value associated with the given key, or {@code null} if the key is not present.
* @throws RuntimeException if the value is not {@link Number}
*/
public Double getDouble(String key) {
if (!containsKey(key)) {
return null;
}
Object value = metadata.get(key);
if (value instanceof String) {
return Double.parseDouble(value.toString());
} else if (value instanceof Number) {
return ((Number) value).doubleValue();
}
throw runtime("Metadata entry with the key '%s' has a value of '%s' and type '%s'. " +
"It cannot be returned as a Double.", key, value, value.getClass().getName());
}
/**
* Check whether this {@code Metadata} contains a given key.
*
* @param key the key
* @return {@code true} if this metadata contains a given key; {@code false} otherwise.
*/
public boolean containsKey(String key) {
return metadata.containsKey(key);
}
/**
* Adds a key-value pair to the metadata.
*
* @param key the key
* @param value the value
* @return {@code this}
* @deprecated as of 0.31.0, use {@link #put(String, String)}, {@link #put(String, int)}, {@link #put(String, long)},
* {@link #put(String, float)}, {@link #put(String, double)} instead.
*/
@Deprecated
public Metadata add(String key, Object value) {
return put(key, value.toString());
}
/**
* Adds a key-value pair to the metadata.
*
* @param key the key
* @param value the value
* @return {@code this}
* @deprecated as of 0.31.0, use {@link #put(String, String)}, {@link #put(String, int)}, {@link #put(String, long)},
* {@link #put(String, float)}, {@link #put(String, double)} instead.
*/
@Deprecated
public Metadata add(String key, String value) {
validate(key, value);
this.metadata.put(key, value);
return this;
}
/**
* Adds a key-value pair to the metadata.
*
* @param key the key
* @param value the value
* @return {@code this}
*/
public Metadata put(String key, String value) {
validate(key, value);
this.metadata.put(key, value);
return this;
}
/**
* Adds a key-value pair to the metadata.
*
* @param key the key
* @param value the value
* @return {@code this}
*/
public Metadata put(String key, UUID value) {
validate(key, value);
this.metadata.put(key, value);
return this;
}
/**
* Adds a key-value pair to the metadata.
*
* @param key the key
* @param value the value
* @return {@code this}
*/
public Metadata put(String key, int value) {
validate(key, value);
this.metadata.put(key, value);
return this;
}
/**
* Adds a key-value pair to the metadata.
*
* @param key the key
* @param value the value
* @return {@code this}
*/
public Metadata put(String key, long value) {
validate(key, value);
this.metadata.put(key, value);
return this;
}
/**
* Adds a key-value pair to the metadata.
*
* @param key the key
* @param value the value
* @return {@code this}
*/
public Metadata put(String key, float value) {
validate(key, value);
this.metadata.put(key, value);
return this;
}
/**
* Adds a key-value pair to the metadata.
*
* @param key the key
* @param value the value
* @return {@code this}
*/
public Metadata put(String key, double value) {
validate(key, value);
this.metadata.put(key, value);
return this;
}
/**
* Removes the given key from the metadata.
*
* @param key the key
* @return {@code this}
*/
public Metadata remove(String key) {
this.metadata.remove(key);
return this;
}
/**
* Copies the metadata.
*
* @return a copy of this Metadata object.
*/
public Metadata copy() {
return new Metadata(metadata);
}
/**
* Get a copy of the metadata as a map of key-value pairs.
*
* @return the metadata as a map of key-value pairs.
* @deprecated as of 0.31.0, use {@link #toMap()} instead.
*/
@Deprecated
public Map<String, String> asMap() {
Map<String, String> map = new HashMap<>();
for (Map.Entry<String, Object> entry : metadata.entrySet()) {
map.put(entry.getKey(), String.valueOf(entry.getValue()));
}
return map;
}
/**
* Get a copy of the metadata as a map of key-value pairs.
*
* @return the metadata as a map of key-value pairs.
*/
public Map<String, Object> toMap() {
return new HashMap<>(metadata);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Metadata that = (Metadata) o;
return Objects.equals(this.metadata, that.metadata);
}
@Override
public int hashCode() {
return Objects.hash(metadata);
}
@Override
public String toString() {
return "Metadata {" +
" metadata = " + metadata +
" }";
}
/**
* Constructs a Metadata object from a single key-value pair.
*
* @param key the key
* @param value the value
* @return a Metadata object
*/
public static Metadata from(String key, String value) {
return new Metadata().put(key, value);
}
/**
* @param key the key
* @param value the value
* @return a Metadata object
* @deprecated Use {@link #from(String, String)} instead
*/
@Deprecated
public static Metadata from(String key, Object value) {
return new Metadata().add(key, value);
}
/**
* Constructs a Metadata object from a map of key-value pairs.
*
* @param metadata the map of key-value pairs
* @return a Metadata object
*/
public static Metadata from(Map<String, ?> metadata) {
return new Metadata(metadata);
}
/**
* Constructs a Metadata object from a single key-value pair.
*
* @param key the key
* @param value the value
* @return a Metadata object
*/
public static Metadata metadata(String key, String value) {
return from(key, value);
}
/**
* @param key the key
* @param value the value
* @return a Metadata object
* @deprecated Use {@link #metadata(String, String)} instead
*/
@Deprecated
public static Metadata metadata(String key, Object value) {
return from(key, value);
}
}
|
0
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core/domain/PoolingMode.java
|
package ai.driftkit.embedding.core.domain;
/**
* Pooling mode for BERT embeddings.
* Determines how to aggregate token embeddings into a single vector.
*/
public enum PoolingMode {
/**
* Use the CLS token embedding as the sentence embedding.
* The CLS token is the first token and is trained to represent the entire sequence.
*/
CLS,
/**
* Average all token embeddings to create the sentence embedding.
* This often provides better results than CLS for similarity tasks.
*/
MEAN,
/**
* Take the maximum value for each dimension across all token embeddings.
*/
MAX
}
|
0
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core/domain/Response.java
|
package ai.driftkit.embedding.core.domain;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.experimental.Accessors;
/**
* Response wrapper for embedding operations.
* Contains the result and optional token usage information.
*
* @param <T> the type of content in the response
*/
@Getter
@Accessors(fluent = true)
@NoArgsConstructor
@AllArgsConstructor
public class Response<T> {
private T content;
private TokenUsage tokenUsage;
private String finishReason;
/**
* Creates a response with content only.
*
* @param content the content
* @param <T> the type of content
* @return the response
*/
public static <T> Response<T> from(T content) {
return new Response<>(content, null, null);
}
/**
* Creates a response with content and token usage.
*
* @param content the content
* @param tokenUsage the token usage information
* @param <T> the type of content
* @return the response
*/
public static <T> Response<T> from(T content, TokenUsage tokenUsage) {
return new Response<>(content, tokenUsage, null);
}
/**
* Creates a response with content, token usage, and finish reason.
*
* @param content the content
* @param tokenUsage the token usage information
* @param finishReason the reason why the operation finished
* @param <T> the type of content
* @return the response
*/
public static <T> Response<T> from(T content, TokenUsage tokenUsage, String finishReason) {
return new Response<>(content, tokenUsage, finishReason);
}
}
|
0
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core/domain/TextSegment.java
|
package ai.driftkit.embedding.core.domain;
import lombok.AllArgsConstructor;
import lombok.Data;
@Data
@AllArgsConstructor
public class TextSegment {
private final String text;
private final Metadata metadata;
public static TextSegment from(String text) {
return new TextSegment(text, new Metadata());
}
public static TextSegment from(String text, Metadata metadata) {
return new TextSegment(text, metadata);
}
public String text() {
return text;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core/domain/TokenUsage.java
|
package ai.driftkit.embedding.core.domain;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.experimental.Accessors;
/**
* Token usage information for API calls.
* Tracks the number of tokens used in input, output, and total.
*/
@Getter
@Accessors(fluent = true)
@NoArgsConstructor
@AllArgsConstructor
public class TokenUsage {
private Integer inputTokenCount;
private Integer outputTokenCount;
private Integer totalTokenCount;
/**
* Creates token usage with input count only.
*
* @param inputTokenCount the input token count
*/
public TokenUsage(Integer inputTokenCount) {
this.inputTokenCount = inputTokenCount;
this.outputTokenCount = null;
this.totalTokenCount = inputTokenCount;
}
/**
* Creates token usage with input and output counts.
*
* @param inputTokenCount the input token count
* @param outputTokenCount the output token count
*/
public TokenUsage(Integer inputTokenCount, Integer outputTokenCount) {
this.inputTokenCount = inputTokenCount;
this.outputTokenCount = outputTokenCount;
this.totalTokenCount = (inputTokenCount != null ? inputTokenCount : 0) +
(outputTokenCount != null ? outputTokenCount : 0);
}
}
|
0
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core/domain/Usage.java
|
package ai.driftkit.embedding.core.domain;// Usage.java
import lombok.Data;
import com.fasterxml.jackson.annotation.JsonProperty;
@Data
public class Usage {
@JsonProperty("prompt_tokens")
private int promptTokens;
@JsonProperty("total_tokens")
private int totalTokens;
}
|
0
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core/local/AIOnnxBertBiEncoder.java
|
package ai.driftkit.embedding.core.local;
import ai.djl.huggingface.tokenizers.Encoding;
import ai.djl.huggingface.tokenizers.HuggingFaceTokenizer;
import ai.onnxruntime.OnnxTensor;
import ai.onnxruntime.OrtEnvironment;
import ai.onnxruntime.OrtException;
import ai.onnxruntime.OrtSession;
import ai.onnxruntime.OrtSession.Result;
import ai.driftkit.embedding.core.domain.PoolingMode;
import ai.driftkit.embedding.core.util.DriftKitExceptions;
import ai.driftkit.embedding.core.util.ValidationUtils;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.RequiredArgsConstructor;
import lombok.experimental.Accessors;
import java.nio.LongBuffer;
import java.nio.file.Path;
import java.util.*;
import java.util.stream.Collectors;
public class AIOnnxBertBiEncoder {
private static final int MAX_SEQUENCE_LENGTH = 510; // 512 - 2 (special tokens [CLS] and [SEP])
public static final String CLS = "[CLS]";
public static final String SEP = "[SEP]";
private final OrtEnvironment environment;
private final OrtSession session;
private final Set<String> expectedInputs;
private final HuggingFaceTokenizer tokenizer;
private final PoolingMode poolingMode;
private final boolean addCls;
public AIOnnxBertBiEncoder(String modelPath, String tokenizerPath, PoolingMode poolingMode, boolean addCls) {
this.addCls = addCls;
try {
this.environment = OrtEnvironment.getEnvironment();
this.session = environment.createSession(modelPath);
this.expectedInputs = session.getInputNames();
this.tokenizer = HuggingFaceTokenizer.builder().optTokenizerPath(Path.of(tokenizerPath))
.optPadding(false)
.optAddSpecialTokens(addCls)
//.optWithOverflowingTokens(true)
.optTruncation(true)
.build();
this.poolingMode = ValidationUtils.ensureNotNull(poolingMode, "poolingMode");
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public EmbeddingAndTokenCount embed(String text) {
List<String> tokens = tokenizer.tokenize(text);
List<List<String>> partitions = partition(tokens, MAX_SEQUENCE_LENGTH);
List<float[]> embeddings = new ArrayList<>();
for (List<String> partition : partitions) {
try (Result result = encode(partition)) {
float[] embedding = toEmbedding(result);
embeddings.add(embedding);
} catch (OrtException e) {
throw new RuntimeException(e);
}
}
List<Integer> weights = partitions.stream()
.map(List::size)
.collect(Collectors.toList());
float[] embedding = normalize(weightedAverage(embeddings, weights));
return new EmbeddingAndTokenCount(embedding, tokens.size());
}
private List<List<String>> partition(List<String> tokens, int partitionSize) {
List<List<String>> partitions = new ArrayList<>();
int startIdx = 0;
int lastIdx = 0;
if (tokens.size() > 2 && tokens.get(0).equals(CLS) && tokens.get(tokens.size() - 1).equals(SEP)) {
startIdx = 1;
lastIdx = -1;
}
for (int from = startIdx; from < tokens.size() - 1; from += partitionSize) {
int to = Math.min(tokens.size() + lastIdx, from + partitionSize);
List<String> partition = new ArrayList<>(tokens.subList(from, to));
if (addCls && partition.size() < 2 || !partition.get(0).equals("CLS") || !partition.get(tokens.size() - 1).equals("SEP")) {
partition.addFirst("CLS");
partition.addLast("SEP");
}
partitions.add(partition);
}
return partitions;
}
private Result encode(List<String> tokens) throws OrtException {
Encoding encoding = tokenizer.encode(toText(tokens), true, false);
long[] inputIds = encoding.getIds();
long[] attentionMask = encoding.getAttentionMask();
long[] tokenTypeIds = encoding.getTypeIds();
long[] shape = {1, inputIds.length};
try (
OnnxTensor inputIdsTensor = OnnxTensor.createTensor(environment, LongBuffer.wrap(inputIds), shape);
OnnxTensor attentionMaskTensor = OnnxTensor.createTensor(environment, LongBuffer.wrap(attentionMask), shape);
OnnxTensor tokenTypeIdsTensor = OnnxTensor.createTensor(environment, LongBuffer.wrap(tokenTypeIds), shape)
) {
Map<String, OnnxTensor> inputs = new HashMap<>();
inputs.put("input_ids", inputIdsTensor);
inputs.put("attention_mask", attentionMaskTensor);
if (expectedInputs.contains("token_type_ids")) {
inputs.put("token_type_ids", tokenTypeIdsTensor);
}
return session.run(inputs);
}
}
private String toText(List<String> tokens) {
String text = tokenizer.buildSentence(tokens);
List<String> tokenized = tokenizer.tokenize(text);
List<String> tokenizedWithoutSpecialTokens = new LinkedList<>(tokenized);
if (addCls) {
tokenizedWithoutSpecialTokens.remove(0);
tokenizedWithoutSpecialTokens.remove(tokenizedWithoutSpecialTokens.size() - 1);
}
if (tokenizedWithoutSpecialTokens.equals(tokens)) {
return text;
} else {
return String.join("", tokens);
}
}
private float[] toEmbedding(Result result) throws OrtException {
float[][] vectors = ((float[][][]) result.get(0).getValue())[0];
return pool(vectors);
}
private float[] pool(float[][] vectors) {
switch (poolingMode) {
case CLS:
return clsPool(vectors);
case MEAN:
return meanPool(vectors);
default:
throw DriftKitExceptions.illegalArgument("Unknown pooling mode: " + poolingMode);
}
}
private static float[] clsPool(float[][] vectors) {
return vectors[0];
}
private static float[] meanPool(float[][] vectors) {
int numVectors = vectors.length;
int vectorLength = vectors[0].length;
float[] averagedVector = new float[vectorLength];
for (float[] vector : vectors) {
for (int j = 0; j < vectorLength; j++) {
averagedVector[j] += vector[j];
}
}
for (int j = 0; j < vectorLength; j++) {
averagedVector[j] /= numVectors;
}
return averagedVector;
}
private float[] weightedAverage(List<float[]> embeddings, List<Integer> weights) {
if (embeddings.size() == 1) {
return embeddings.get(0);
}
int dimensions = embeddings.get(0).length;
float[] averagedEmbedding = new float[dimensions];
int totalWeight = 0;
for (int i = 0; i < embeddings.size(); i++) {
int weight = weights.get(i);
totalWeight += weight;
for (int j = 0; j < dimensions; j++) {
averagedEmbedding[j] += embeddings.get(i)[j] * weight;
}
}
for (int j = 0; j < dimensions; j++) {
averagedEmbedding[j] /= totalWeight;
}
return averagedEmbedding;
}
private static float[] normalize(float[] vector) {
float sumSquare = 0;
for (float v : vector) {
sumSquare += v * v;
}
float norm = (float) Math.sqrt(sumSquare);
float[] normalizedVector = new float[vector.length];
for (int i = 0; i < vector.length; i++) {
normalizedVector[i] = vector[i] / norm;
}
return normalizedVector;
}
public int countTokens(String text) {
return tokenizer.tokenize(text).size();
}
@Getter
@Accessors(fluent = true)
@NoArgsConstructor
@AllArgsConstructor
public static class EmbeddingAndTokenCount {
float[] embedding;
int tokenCount;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core/local/BertGenericEmbeddingModel.java
|
package ai.driftkit.embedding.core.local;
import ai.driftkit.config.EtlConfig;
import ai.driftkit.config.EtlConfig.EmbeddingServiceConfig;
import ai.driftkit.embedding.core.service.EmbeddingModel;
import ai.driftkit.embedding.core.domain.Embedding;
import ai.driftkit.embedding.core.domain.TextSegment;
import ai.driftkit.embedding.core.domain.PoolingMode;
import ai.driftkit.embedding.core.domain.Response;
import lombok.NoArgsConstructor;
import lombok.Setter;
import java.util.Arrays;
import java.util.List;
@NoArgsConstructor
public class BertGenericEmbeddingModel implements EmbeddingModel {
@Setter
private AIOnnxBertBiEncoder encoder;
@Override
public boolean supportsName(String name) {
return "local".equals(name);
}
@Override
public AIOnnxBertBiEncoder model() {
return encoder;
}
@Override
public void configure(EmbeddingServiceConfig config) {
this.encoder = new AIOnnxBertBiEncoder(
config.get(EtlConfig.MODEL_PATH),
config.get(EtlConfig.TOKENIZER_PATH),
PoolingMode.MEAN,
false
);
}
public Response<Embedding> embed(TextSegment segment) {
List<Embedding> content = embedAll(Arrays.asList(segment)).content();
return Response.from(content.get(0));
}
}
|
0
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core/openai/EmbeddingOpenAIApiClient.java
|
package ai.driftkit.embedding.core.openai;// OpenAIApiClient.java
import ai.driftkit.embedding.core.domain.EmbeddingRequest;
import ai.driftkit.embedding.core.domain.EmbeddingResponse;
import feign.Headers;
import feign.RequestLine;
public interface EmbeddingOpenAIApiClient {
@RequestLine("POST /v1/embeddings")
@Headers("Content-Type: application/json")
EmbeddingResponse getEmbeddings(EmbeddingRequest request);
}
|
0
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core/openai/OpenAIAuthInterceptor.java
|
package ai.driftkit.embedding.core.openai;// OpenAIAuthInterceptor.java
import feign.RequestInterceptor;
import feign.RequestTemplate;
public class OpenAIAuthInterceptor implements RequestInterceptor {
private final String apiKey;
public OpenAIAuthInterceptor(String apiKey) {
this.apiKey = apiKey;
}
@Override
public void apply(RequestTemplate template) {
template.header("Authorization", "Bearer " + apiKey);
template.header("Content-Type", "application/json");
}
}
|
0
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core/openai/OpenAIEmbeddingModel.java
|
package ai.driftkit.embedding.core.openai;
import ai.driftkit.config.EtlConfig;
import ai.driftkit.config.EtlConfig.EmbeddingServiceConfig;
import ai.driftkit.embedding.core.service.EmbeddingModel;
import ai.driftkit.embedding.core.domain.*;
import ai.driftkit.embedding.core.local.AIOnnxBertBiEncoder;
import ai.driftkit.embedding.core.domain.Response;
import ai.driftkit.embedding.core.domain.TokenUsage;
import feign.Feign;
import feign.jackson.JacksonDecoder;
import feign.jackson.JacksonEncoder;
import lombok.NoArgsConstructor;
import java.util.ArrayList;
import java.util.List;
@NoArgsConstructor
public class OpenAIEmbeddingModel implements EmbeddingModel {
private EmbeddingOpenAIApiClient apiClient;
private String modelName;
public OpenAIEmbeddingModel(EmbeddingOpenAIApiClient apiClient, String modelName) {
this.apiClient = apiClient;
this.modelName = modelName;
}
@Override
public boolean supportsName(String name) {
return "openai".equals(name);
}
@Override
public AIOnnxBertBiEncoder model() {
throw new UnsupportedOperationException("Not supported in OpenAIEmbeddingModel");
}
@Override
public void configure(EmbeddingServiceConfig config) {
this.modelName = config.getConfig().get(EtlConfig.MODEL_NAME);
this.apiClient = Feign.builder()
.encoder(new JacksonEncoder())
.decoder(new JacksonDecoder())
.requestInterceptor(new OpenAIAuthInterceptor(config.get(EtlConfig.API_KEY)))
.target(EmbeddingOpenAIApiClient.class, config.get(EtlConfig.HOST, "https://api.openai.com"));
}
@Override
public Response<List<Embedding>> embedAll(List<TextSegment> segments) {
List<String> texts = new ArrayList<>();
for (TextSegment segment : segments) {
texts.add(segment.text());
}
EmbeddingRequest request = new EmbeddingRequest();
request.setModel(modelName);
request.setInput(texts);
EmbeddingResponse response = apiClient.getEmbeddings(request);
List<Embedding> embeddings = new ArrayList<>();
for (EmbeddingData data : response.getData()) {
float[] embeddingValues = data.getEmbedding();
embeddings.add(Embedding.from(embeddingValues));
}
int inputTokenCount = response.getUsage().getTotalTokens();
return Response.from(embeddings, new TokenUsage(inputTokenCount));
}
@Override
public int estimateTokenCount(String text) {
// OpenAI provides approximate token counts
// For more accurate counts, integrate with a tokenizer
return text.length() / 4; // Approximate estimation
}
public Response<Embedding> embed(TextSegment segment) {
List<TextSegment> segments = new ArrayList<>();
segments.add(segment);
Response<List<Embedding>> responseList = embedAll(segments);
Embedding embedding = responseList.content().get(0);
TokenUsage tokenUsage = responseList.tokenUsage();
return Response.from(embedding, tokenUsage);
}
}
|
0
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core/service/EmbeddingFactory.java
|
package ai.driftkit.embedding.core.service;
import ai.driftkit.config.EtlConfig.EmbeddingServiceConfig;
import org.apache.commons.lang3.StringUtils;
import java.util.Map;
import java.util.ServiceLoader;
public class EmbeddingFactory {
public static EmbeddingModel fromName(String name, Map<String, String> config) throws Exception {
if (StringUtils.isBlank(name)) {
throw new IllegalArgumentException("Name must not be null");
}
ServiceLoader<EmbeddingModel> loader = ServiceLoader.load(EmbeddingModel.class);
for (EmbeddingModel store : loader) {
if (store.supportsName(name)) {
store.configure(new EmbeddingServiceConfig(name, config));
return store;
}
}
throw new IllegalArgumentException("Unknown or unavailable prompt service: " + name);
}
}
|
0
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core/service/EmbeddingModel.java
|
package ai.driftkit.embedding.core.service;
import ai.driftkit.config.EtlConfig.EmbeddingServiceConfig;
import ai.driftkit.embedding.core.domain.Embedding;
import ai.driftkit.embedding.core.domain.TextSegment;
import ai.driftkit.embedding.core.local.AIOnnxBertBiEncoder;
import ai.driftkit.embedding.core.local.AIOnnxBertBiEncoder.EmbeddingAndTokenCount;
import ai.driftkit.embedding.core.domain.Response;
import ai.driftkit.embedding.core.domain.TokenUsage;
import org.apache.commons.collections4.CollectionUtils;
import java.util.ArrayList;
import java.util.List;
public interface EmbeddingModel {
boolean supportsName(String name);
AIOnnxBertBiEncoder model();
void configure(EmbeddingServiceConfig config);
default Response<Embedding> embed(TextSegment segment) {
Response<List<Embedding>> resp = embedAll(List.of(segment));
List<Embedding> content = resp.content();
if (CollectionUtils.isEmpty(content)) {
return null;
}
return Response.from(content.get(0), resp.tokenUsage(), resp.finishReason());
}
default Response<List<Embedding>> embedAll(List<TextSegment> segments) {
int inputTokenCount = 0;
List<Embedding> embeddings = new ArrayList<>();
for (TextSegment segment : segments) {
EmbeddingAndTokenCount embeddingAndTokenCount = model().embed(segment.text());
embeddings.add(Embedding.from(embeddingAndTokenCount.embedding()));
inputTokenCount += embeddingAndTokenCount.tokenCount();
}
return Response.from(embeddings, new TokenUsage(inputTokenCount));
}
default int estimateTokenCount(String text) {
return model().countTokens(text);
}
}
|
0
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core/util/DriftKitExceptions.java
|
package ai.driftkit.embedding.core.util;
/**
* Utility class for exception handling.
*/
public final class DriftKitExceptions {
private DriftKitExceptions() {
// Utility class
}
/**
* Creates an IllegalArgumentException with the given message.
*
* @param message the exception message
* @return the exception
*/
public static IllegalArgumentException illegalArgument(String message) {
return new IllegalArgumentException(message);
}
/**
* Creates an IllegalArgumentException with a formatted message.
*
* @param format the format string
* @param args the arguments
* @return the exception
*/
public static IllegalArgumentException illegalArgument(String format, Object... args) {
return new IllegalArgumentException(String.format(format, args));
}
/**
* Creates an IllegalArgumentException with the given message and cause.
*
* @param message the exception message
* @param cause the cause
* @return the exception
*/
public static IllegalArgumentException illegalArgument(String message, Throwable cause) {
return new IllegalArgumentException(message, cause);
}
/**
* Creates an IllegalStateException with the given message.
*
* @param message the exception message
* @return the exception
*/
public static IllegalStateException illegalState(String message) {
return new IllegalStateException(message);
}
/**
* Creates a RuntimeException with the given message.
*
* @param message the exception message
* @return the exception
*/
public static RuntimeException runtime(String message) {
return new RuntimeException(message);
}
/**
* Creates a RuntimeException with a formatted message.
*
* @param format the format string
* @param args the arguments
* @return the exception
*/
public static RuntimeException runtime(String format, Object... args) {
return new RuntimeException(String.format(format, args));
}
/**
* Creates a RuntimeException with the given message and cause.
*
* @param message the exception message
* @param cause the cause
* @return the exception
*/
public static RuntimeException runtime(String message, Throwable cause) {
return new RuntimeException(message, cause);
}
}
|
0
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core
|
java-sources/ai/driftkit/driftkit-embedding-core/0.8.1/ai/driftkit/embedding/core/util/ValidationUtils.java
|
package ai.driftkit.embedding.core.util;
/**
* Utility class for validation operations.
*/
public final class ValidationUtils {
private ValidationUtils() {
// Utility class
}
/**
* Ensures that the given object is not null.
*
* @param object the object to check
* @param parameterName the name of the parameter (for error messages)
* @param <T> the type of the object
* @return the object if not null
* @throws IllegalArgumentException if the object is null
*/
public static <T> T ensureNotNull(T object, String parameterName) {
if (object == null) {
throw new IllegalArgumentException(parameterName + " cannot be null");
}
return object;
}
/**
* Ensures that the given string is not null or empty.
*
* @param string the string to check
* @param parameterName the name of the parameter (for error messages)
* @return the string if not null or empty
* @throws IllegalArgumentException if the string is null or empty
*/
public static String ensureNotBlank(String string, String parameterName) {
if (string == null || string.trim().isEmpty()) {
throw new IllegalArgumentException(parameterName + " cannot be null or empty");
}
return string;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-embedding-spring-ai/0.8.1/ai/driftkit/embedding
|
java-sources/ai/driftkit/driftkit-embedding-spring-ai/0.8.1/ai/driftkit/embedding/springai/SpringAiEmbeddingAdapter.java
|
package ai.driftkit.embedding.springai;
import ai.djl.modality.nlp.preprocess.SimpleTokenizer;
import ai.driftkit.common.service.TextTokenizer;
import ai.driftkit.config.EtlConfig.EmbeddingServiceConfig;
import ai.driftkit.embedding.core.domain.Embedding;
import ai.driftkit.embedding.core.domain.TextSegment;
import ai.driftkit.embedding.core.local.AIOnnxBertBiEncoder;
import ai.driftkit.embedding.core.service.EmbeddingModel;
import ai.driftkit.embedding.core.domain.Response;
import ai.driftkit.embedding.core.domain.TokenUsage;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import org.springframework.ai.embedding.EmbeddingRequest;
import org.springframework.ai.embedding.EmbeddingResponse;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
/**
* Adapter that bridges Spring AI EmbeddingModel with DriftKit EmbeddingModel interface.
* This allows using any Spring AI embedding model implementation within the DriftKit framework.
*/
@Slf4j
public class SpringAiEmbeddingAdapter implements EmbeddingModel {
private final org.springframework.ai.embedding.EmbeddingModel springAiEmbeddingModel;
private final String modelName;
private final SimpleTokenizer tokenizer = new SimpleTokenizer();
public SpringAiEmbeddingAdapter(org.springframework.ai.embedding.EmbeddingModel springAiEmbeddingModel, String modelName) {
if (springAiEmbeddingModel == null) {
throw new IllegalArgumentException("Spring AI EmbeddingModel cannot be null");
}
if (StringUtils.isBlank(modelName)) {
throw new IllegalArgumentException("Model name cannot be blank");
}
this.springAiEmbeddingModel = springAiEmbeddingModel;
this.modelName = modelName;
}
@Override
public boolean supportsName(String name) {
return this.modelName.equalsIgnoreCase(name);
}
@Override
public AIOnnxBertBiEncoder model() {
throw new UnsupportedOperationException(
"SpringAiEmbeddingAdapter does not support local ONNX models. " +
"It uses remote embedding services through Spring AI providers."
);
}
@Override
public void configure(EmbeddingServiceConfig config) {
// Configuration is handled by Spring AI itself
log.info("Configured Spring AI Embedding adapter with model name: {}", modelName);
}
@Override
public Response<List<Embedding>> embedAll(List<TextSegment> segments) {
if (CollectionUtils.isEmpty(segments)) {
log.debug("Empty segments provided, returning empty embeddings");
return Response.from(Collections.emptyList());
}
// Extract texts and estimate tokens
List<String> texts = new ArrayList<>();
int totalEstimatedTokens = 0;
for (TextSegment segment : segments) {
if (segment == null || StringUtils.isEmpty(segment.text())) {
log.warn("Skipping null or empty segment");
continue;
}
texts.add(segment.text());
totalEstimatedTokens += estimateTokenCount(segment.text());
}
if (texts.isEmpty()) {
log.debug("No valid texts to embed, returning empty embeddings");
return Response.from(Collections.emptyList());
}
try {
log.debug("Embedding {} text segments", texts.size());
// Create embedding request
EmbeddingRequest request = new EmbeddingRequest(texts, null);
// Call Spring AI model
EmbeddingResponse springAiResponse = springAiEmbeddingModel.embedForResponse(texts);
if (springAiResponse == null || springAiResponse.getResults() == null) {
throw new RuntimeException("Received null response from Spring AI embedding model");
}
// Convert Spring AI embeddings to DriftKit embeddings
List<Embedding> embeddings = new ArrayList<>();
for (org.springframework.ai.embedding.Embedding springAiEmbedding : springAiResponse.getResults()) {
if (springAiEmbedding == null || springAiEmbedding.getOutput() == null) {
log.warn("Received null embedding from Spring AI, skipping");
continue;
}
float[] floatArray = springAiEmbedding.getOutput();
embeddings.add(Embedding.from(floatArray));
}
// Extract token usage if available
TokenUsage tokenUsage = extractTokenUsage(springAiResponse, totalEstimatedTokens);
log.debug("Successfully embedded {} segments", embeddings.size());
return Response.from(embeddings, tokenUsage);
} catch (Exception e) {
log.error("Failed to generate embeddings", e);
throw new RuntimeException("Failed to generate embeddings: " + e.getMessage(), e);
}
}
@Override
public Response<Embedding> embed(TextSegment segment) {
if (segment == null) {
throw new IllegalArgumentException("TextSegment cannot be null");
}
Response<List<Embedding>> response = embedAll(List.of(segment));
List<Embedding> embeddings = response.content();
if (CollectionUtils.isEmpty(embeddings)) {
log.warn("No embeddings generated for segment");
return null;
}
return Response.from(embeddings.get(0), response.tokenUsage());
}
@Override
public int estimateTokenCount(String text) {
if (StringUtils.isEmpty(text)) {
return 0;
}
// Use SimpleTokenizer's logic for consistent token estimation
return (int) (text.length() * TextTokenizer.DEFAULT_TOKEN_COST);
}
/**
* Extracts token usage from Spring AI response.
*
* @param response the Spring AI embedding response
* @param estimatedTokens the estimated token count
* @return TokenUsage object
*/
private TokenUsage extractTokenUsage(EmbeddingResponse response, int estimatedTokens) {
try {
if (response.getMetadata() != null &&
response.getMetadata().getUsage() != null) {
Integer totalTokens = response.getMetadata().getUsage().getTotalTokens();
if (totalTokens != null && totalTokens > 0) {
return new TokenUsage(totalTokens);
}
}
} catch (Exception e) {
log.debug("Could not extract token usage from response, using estimated count", e);
}
// Fall back to estimated tokens
return new TokenUsage(estimatedTokens);
}
}
|
0
|
java-sources/ai/driftkit/driftkit-embedding-spring-ai-starter/0.8.1/ai/driftkit/embedding/springai
|
java-sources/ai/driftkit/driftkit-embedding-spring-ai-starter/0.8.1/ai/driftkit/embedding/springai/autoconfigure/SpringAiEmbeddingAutoConfiguration.java
|
package ai.driftkit.embedding.springai.autoconfigure;
import ai.driftkit.embedding.core.service.EmbeddingModel;
import ai.driftkit.embedding.springai.SpringAiEmbeddingAdapter;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.ObjectProvider;
import org.springframework.boot.autoconfigure.AutoConfiguration;
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
/**
* Spring Boot auto-configuration for Spring AI embedding model integration.
* This configuration automatically creates DriftKit EmbeddingModel adapters
* for any Spring AI EmbeddingModel beans found in the application context.
*/
@Slf4j
@AutoConfiguration
@ConditionalOnClass({org.springframework.ai.embedding.EmbeddingModel.class, EmbeddingModel.class})
@EnableConfigurationProperties(SpringAiEmbeddingProperties.class)
public class SpringAiEmbeddingAutoConfiguration {
/**
* Creates a DriftKit EmbeddingModel adapter for Spring AI EmbeddingModel bean.
* The adapter allows using Spring AI embedding models through the DriftKit interface.
*/
@Configuration
@ConditionalOnProperty(prefix = "driftkit.embedding.spring-ai", name = "enabled", havingValue = "true", matchIfMissing = true)
public static class SpringAiEmbeddingAdapterConfiguration {
/**
* Creates a default DriftKit EmbeddingModel bean if configured.
* This allows @Autowired EmbeddingModel to work when only Spring AI is configured.
*/
@Bean
@ConditionalOnBean(org.springframework.ai.embedding.EmbeddingModel.class)
@ConditionalOnMissingBean(EmbeddingModel.class)
@ConditionalOnProperty(prefix = "driftkit.embedding.spring-ai", name = "auto-create-adapter", havingValue = "true", matchIfMissing = true)
public EmbeddingModel springAiEmbeddingAdapter(
org.springframework.ai.embedding.EmbeddingModel springAiModel,
SpringAiEmbeddingProperties properties) {
String modelName = properties.getModelName();
if (modelName == null) {
throw new IllegalArgumentException("modelName is required");
}
log.info("Creating Spring AI embedding adapter with name: {}", modelName);
return new SpringAiEmbeddingAdapter(springAiModel, modelName);
}
}
}
|
0
|
java-sources/ai/driftkit/driftkit-embedding-spring-ai-starter/0.8.1/ai/driftkit/embedding/springai
|
java-sources/ai/driftkit/driftkit-embedding-spring-ai-starter/0.8.1/ai/driftkit/embedding/springai/autoconfigure/SpringAiEmbeddingProperties.java
|
package ai.driftkit.embedding.springai.autoconfigure;
import lombok.Data;
import org.springframework.boot.context.properties.ConfigurationProperties;
/**
* Configuration properties for Spring AI embedding integration.
*/
@Data
@ConfigurationProperties(prefix = "driftkit.embedding.spring-ai")
public class SpringAiEmbeddingProperties {
/**
* Whether Spring AI embedding integration is enabled.
*/
private boolean enabled = true;
/**
* The name to use for the Spring AI embedding model adapter.
* This name will be used when creating the adapter and for model lookup.
*/
private String modelName = "spring-ai-default";
/**
* Whether to automatically create a default DriftKit EmbeddingModel bean
* when a Spring AI EmbeddingModel is detected.
*/
private boolean autoCreateAdapter = true;
}
|
0
|
java-sources/ai/driftkit/driftkit-embedding-spring-boot-starter/0.8.1/ai/driftkit/embedding
|
java-sources/ai/driftkit/driftkit-embedding-spring-boot-starter/0.8.1/ai/driftkit/embedding/autoconfigure/EmbeddingAutoConfiguration.java
|
package ai.driftkit.embedding.autoconfigure;
import ai.driftkit.config.EtlConfig;
import ai.driftkit.config.EtlConfig.EmbeddingServiceConfig;
import ai.driftkit.embedding.core.service.EmbeddingFactory;
import ai.driftkit.embedding.core.service.EmbeddingModel;
import lombok.extern.slf4j.Slf4j;
import org.springframework.boot.autoconfigure.AutoConfiguration;
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Bean;
/**
* Auto-configuration for embedding services.
*
* This configuration automatically creates an EmbeddingModel bean
* from the EtlConfig.embedding configuration when available.
*/
@Slf4j
@AutoConfiguration
@ConditionalOnBean(EtlConfig.class)
@ConditionalOnProperty(name = "driftkit.embedding.name")
public class EmbeddingAutoConfiguration {
@Bean
@ConditionalOnMissingBean(EmbeddingModel.class)
public EmbeddingModel embeddingModel(EtlConfig config) {
try {
EmbeddingServiceConfig embeddingConfig = config.getEmbedding();
if (embeddingConfig == null) {
log.warn("No embedding configuration found in EtlConfig");
return null;
}
log.info("Initializing embedding service: {}", embeddingConfig.getName());
EmbeddingModel embeddingModel = EmbeddingFactory.fromName(
embeddingConfig.getName(),
embeddingConfig.getConfig()
);
log.info("Successfully initialized embedding service: {}", embeddingConfig.getName());
return embeddingModel;
} catch (Exception e) {
log.error("Failed to initialize embedding service from configuration", e);
throw new RuntimeException("Failed to initialize embedding service", e);
}
}
}
|
0
|
java-sources/ai/driftkit/driftkit-rag-core/0.8.1/ai/driftkit/rag/core
|
java-sources/ai/driftkit/driftkit-rag-core/0.8.1/ai/driftkit/rag/core/domain/LoadedDocument.java
|
package ai.driftkit.rag.core.domain;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.util.HashMap;
import java.util.Map;
/**
* Represents a document loaded from a source before processing.
* Contains raw content and metadata about the document.
*/
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
public class LoadedDocument {
/**
* Document loading state
*/
public enum State {
PENDING, // Document is queued for loading
IN_PROGRESS, // Document is currently being loaded
LOADED, // Document loaded successfully
ERROR // Document failed to load
}
/**
* Unique identifier for the document
*/
private String id;
/**
* Current state of the document
*/
@Builder.Default
private State state = State.LOADED;
/**
* Raw content of the document
*/
private String content;
/**
* Source of the document (e.g., file path, URL)
*/
private String source;
/**
* MIME type of the document
*/
private String mimeType;
/**
* Additional metadata about the document
*/
@Builder.Default
private Map<String, Object> metadata = new HashMap<>();
/**
* Add metadata entry
*/
public LoadedDocument withMetadata(String key, Object value) {
if (metadata == null) {
metadata = new HashMap<>();
}
metadata.put(key, value);
return this;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-rag-core/0.8.1/ai/driftkit/rag/core
|
java-sources/ai/driftkit/driftkit-rag-core/0.8.1/ai/driftkit/rag/core/loader/DocumentLoader.java
|
package ai.driftkit.rag.core.loader;
import ai.driftkit.rag.core.domain.LoadedDocument;
import java.util.List;
import java.util.stream.Stream;
/**
* Interface for loading documents from various sources.
* Implementations should handle specific document sources like file systems, URLs, databases, etc.
*/
public interface DocumentLoader {
/**
* Load all documents from the configured source.
*
* @return List of loaded documents
* @throws Exception if loading fails
*/
List<LoadedDocument> load() throws Exception;
/**
* Load documents as a stream for memory-efficient processing.
*
* @return Stream of loaded documents
* @throws Exception if loading fails
*/
default Stream<LoadedDocument> loadStream() throws Exception {
return load().stream();
}
/**
* Check if the loader supports lazy loading via streams.
*
* @return true if streaming is supported
*/
default boolean supportsStreaming() {
return false;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-rag-core/0.8.1/ai/driftkit/rag/core
|
java-sources/ai/driftkit/driftkit-rag-core/0.8.1/ai/driftkit/rag/core/loader/FileSystemLoader.java
|
package ai.driftkit.rag.core.loader;
import ai.driftkit.rag.core.domain.LoadedDocument;
import ai.driftkit.vector.spring.parser.UnifiedParser;
import ai.driftkit.vector.spring.parser.UnifiedParser.ByteArrayParserInput;
import ai.driftkit.vector.spring.domain.ContentType;
import ai.driftkit.vector.spring.domain.ParsedContent;
import lombok.Builder;
import lombok.NonNull;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.io.FilenameUtils;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.*;
import java.util.stream.Stream;
/**
* Document loader that reads files from the file system.
* Supports various file types including text, PDF, images, etc. via UnifiedParser.
*/
@Slf4j
@Builder
@RequiredArgsConstructor
public class FileSystemLoader implements DocumentLoader {
@NonNull
private final Path rootPath;
@NonNull
private final UnifiedParser parser;
@Builder.Default
private final boolean recursive = true;
@Builder.Default
private final Set<String> extensions = Set.of(
// Text formats
"txt", "md", "json", "xml", "html", "csv",
// Document formats
"pdf", "doc", "docx", "odt",
// Image formats
"jpg", "jpeg", "png", "gif", "bmp",
// Audio/Video formats (if transcription is needed)
"mp3", "wav", "mp4", "avi"
);
@Builder.Default
private final Set<String> excludePatterns = Set.of();
@Builder.Default
private final long maxFileSizeBytes = 50 * 1024 * 1024; // 50MB default
@Builder.Default
private final boolean includeHidden = false;
/**
* Map file extensions to ContentType.
*/
private static final Map<String, ContentType> EXTENSION_TO_CONTENT_TYPE = Map.ofEntries(
// Text formats
Map.entry("txt", ContentType.TEXT),
Map.entry("md", ContentType.TEXT),
Map.entry("json", ContentType.TEXT),
Map.entry("xml", ContentType.XML),
Map.entry("html", ContentType.HTML),
Map.entry("csv", ContentType.TEXT),
// Document formats
Map.entry("pdf", ContentType.PDF),
Map.entry("doc", ContentType.MICROSOFT_WORD),
Map.entry("docx", ContentType.MICROSOFT_WORD),
Map.entry("odt", ContentType.ODF_TEXT),
// Image formats
Map.entry("jpg", ContentType.JPG),
Map.entry("jpeg", ContentType.JPG),
Map.entry("png", ContentType.PNG),
Map.entry("gif", ContentType.PNG), // UnifiedParser will handle as image
Map.entry("bmp", ContentType.PNG) // UnifiedParser will handle as image
);
/**
* Load all documents from the file system.
*/
@Override
public List<LoadedDocument> load() throws Exception {
log.info("Loading documents from: {}", rootPath);
List<LoadedDocument> documents = new ArrayList<>();
try (Stream<LoadedDocument> stream = loadStream()) {
stream.forEach(documents::add);
}
log.info("Loaded {} documents", documents.size());
return documents;
}
/**
* Load documents as a stream for memory-efficient processing.
*/
@Override
public Stream<LoadedDocument> loadStream() throws Exception {
if (!Files.exists(rootPath)) {
throw new IOException("Path does not exist: " + rootPath);
}
if (Files.isRegularFile(rootPath)) {
// Single file
return Stream.of(loadFile(rootPath));
} else if (Files.isDirectory(rootPath)) {
// Directory
return loadDirectory();
} else {
throw new IOException("Path is neither file nor directory: " + rootPath);
}
}
/**
* This loader supports streaming.
*/
@Override
public boolean supportsStreaming() {
return true;
}
/**
* Load documents from a directory.
*/
private Stream<LoadedDocument> loadDirectory() throws IOException {
int maxDepth = recursive ? Integer.MAX_VALUE : 1;
return Files.walk(rootPath, maxDepth)
.filter(Files::isRegularFile)
.filter(this::shouldIncludeFile)
.map(path -> {
try {
return loadFile(path);
} catch (Exception e) {
log.error("Failed to load file: {}", path, e);
// Return error document instead of null
return createErrorDocument(path, e);
}
});
}
/**
* Check if a file should be included based on filters.
*/
private boolean shouldIncludeFile(Path path) {
String fileName = path.getFileName().toString();
// Check hidden files
if (!includeHidden && fileName.startsWith(".")) {
return false;
}
// Check extension
String extension = FilenameUtils.getExtension(fileName).toLowerCase();
if (!extensions.isEmpty() && !extensions.contains(extension)) {
return false;
}
// Check exclude patterns
String pathStr = path.toString();
for (String pattern : excludePatterns) {
if (pathStr.contains(pattern)) {
return false;
}
}
// Check file size
try {
long size = Files.size(path);
if (size > maxFileSizeBytes) {
log.warn("Skipping file {} - size {} exceeds limit {}", path, size, maxFileSizeBytes);
return false;
}
} catch (IOException e) {
log.warn("Failed to check file size: {}", path, e);
return false;
}
return true;
}
/**
* Load a single file using UnifiedParser.
*/
private LoadedDocument loadFile(Path path) throws IOException {
log.trace("Loading file: {}", path);
String fileName = path.getFileName().toString();
String extension = FilenameUtils.getExtension(fileName).toLowerCase();
// Determine content type
ContentType contentType = EXTENSION_TO_CONTENT_TYPE.getOrDefault(extension, ContentType.TEXT);
// Read file as bytes
byte[] fileBytes = Files.readAllBytes(path);
// Create parser input
ByteArrayParserInput parserInput = new ByteArrayParserInput(fileBytes, fileName, contentType);
// Parse using UnifiedParser
ParsedContent parsed = parser.parse(parserInput);
// Build metadata
Map<String, Object> metadata = new HashMap<>();
metadata.put("fileName", fileName);
metadata.put("filePath", path.toString());
metadata.put("fileSize", Files.size(path));
metadata.put("lastModified", Files.getLastModifiedTime(path).toMillis());
metadata.put("extension", extension);
metadata.put("contentType", contentType.name());
// Add parsing metadata if available
if (parsed.getMetadata() != null) {
metadata.put("parsingMetadata", parsed.getMetadata());
}
metadata.put("parsingTime", parsed.getParsingEndTime() - parsed.getParsingStatedTime());
return LoadedDocument.builder()
.id(parsed.getId())
.content(parsed.getParsedContent())
.source(path.toString())
.mimeType(contentType.getMimeType())
.metadata(metadata)
.state(LoadedDocument.State.LOADED)
.build();
}
/**
* Create an error document when file loading fails.
* This allows tracking which files failed to load and why.
*/
private LoadedDocument createErrorDocument(Path path, Exception error) {
String fileName = path.getFileName().toString();
String extension = FilenameUtils.getExtension(fileName).toLowerCase();
Map<String, Object> metadata = new HashMap<>();
metadata.put("fileName", fileName);
metadata.put("filePath", path.toString());
metadata.put("extension", extension);
metadata.put("errorMessage", error.getMessage());
metadata.put("errorType", error.getClass().getName());
// Try to get file size if possible
try {
metadata.put("fileSize", Files.size(path));
metadata.put("lastModified", Files.getLastModifiedTime(path).toMillis());
} catch (IOException e) {
log.trace("Could not get file attributes for error document: {}", path);
}
return LoadedDocument.builder()
.id("error-" + path.toString().hashCode())
.content(null) // Empty content for error documents
.source(path.toString())
.mimeType("application/octet-stream") // Unknown mime type
.metadata(metadata)
.state(LoadedDocument.State.ERROR)
.build();
}
}
|
0
|
java-sources/ai/driftkit/driftkit-rag-core/0.8.1/ai/driftkit/rag/core
|
java-sources/ai/driftkit/driftkit-rag-core/0.8.1/ai/driftkit/rag/core/loader/UrlLoader.java
|
package ai.driftkit.rag.core.loader;
import ai.driftkit.rag.core.domain.LoadedDocument;
import ai.driftkit.vector.spring.parser.UnifiedParser;
import ai.driftkit.vector.spring.parser.UnifiedParser.ByteArrayParserInput;
import ai.driftkit.vector.spring.parser.UnifiedParser.StringParserInput;
import ai.driftkit.vector.spring.parser.UnifiedParser.ParserInput;
import ai.driftkit.vector.spring.domain.ContentType;
import ai.driftkit.vector.spring.domain.ParsedContent;
import com.fasterxml.jackson.annotation.JsonTypeName;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import lombok.NonNull;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import java.io.IOException;
import java.net.URI;
import java.net.http.HttpClient;
import java.net.http.HttpRequest;
import java.net.http.HttpResponse;
import java.time.Duration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.stream.Stream;
/**
* Document loader that fetches content from URLs.
* Supports web pages, PDFs, and other content accessible via HTTP/HTTPS.
*/
@Slf4j
@Builder
@RequiredArgsConstructor
public class UrlLoader implements DocumentLoader {
@NonNull
private final List<String> urls;
@NonNull
private final UnifiedParser parser;
@Builder.Default
private final boolean followRedirects = true;
@Builder.Default
private final int timeoutSeconds = 30;
@Builder.Default
private final int maxContentSizeBytes = 50 * 1024 * 1024; // 50MB default
@Builder.Default
private final Map<String, String> headers = Map.of(
"User-Agent", "DriftKit-RAG/1.0"
);
// Thread-safe HttpClient
private static final HttpClient httpClient = HttpClient.newBuilder()
.connectTimeout(Duration.ofSeconds(10))
.followRedirects(HttpClient.Redirect.NORMAL)
.build();
/**
* Custom ParserInput for URLs
*/
@Data
@NoArgsConstructor
@JsonTypeName("URL")
public static class UrlParserInput extends ParserInput<byte[]> {
private String url;
private String detectedMimeType;
private Map<String, String> responseHeaders;
public UrlParserInput(byte[] content, String url, ContentType contentType,
String detectedMimeType, Map<String, String> responseHeaders) {
super(content, contentType);
this.url = url;
this.detectedMimeType = detectedMimeType;
this.responseHeaders = responseHeaders;
}
}
/**
* Load all documents from the configured URLs.
*/
@Override
public List<LoadedDocument> load() throws Exception {
log.info("Loading {} URLs", urls.size());
return urls.stream()
.map(this::loadUrl)
.toList();
}
/**
* Load documents as a stream for memory-efficient processing.
*/
@Override
public Stream<LoadedDocument> loadStream() throws Exception {
return urls.stream()
.map(this::loadUrl);
}
/**
* This loader supports streaming.
*/
@Override
public boolean supportsStreaming() {
return true;
}
/**
* Load content from a single URL.
*/
private LoadedDocument loadUrl(String urlString) {
try {
log.debug("Loading URL: {}", urlString);
// Build HTTP request
HttpRequest.Builder requestBuilder = HttpRequest.newBuilder()
.uri(URI.create(urlString))
.timeout(Duration.ofSeconds(timeoutSeconds))
.GET();
// Add custom headers
headers.forEach(requestBuilder::header);
HttpRequest request = requestBuilder.build();
// Send request
HttpResponse<byte[]> response = httpClient.send(request, HttpResponse.BodyHandlers.ofByteArray());
if (response.statusCode() >= 200 && response.statusCode() < 300) {
// Get content type from response
String contentTypeHeader = response.headers().firstValue("Content-Type")
.orElse("text/html");
// Extract MIME type (remove charset and other parameters)
String mimeType = contentTypeHeader.split(";")[0].trim().toLowerCase();
// Map response headers
Map<String, String> responseHeaders = new HashMap<>();
response.headers().map().forEach((key, values) -> {
if (!values.isEmpty()) {
responseHeaders.put(key, values.get(0));
}
});
byte[] content = response.body();
// Check content size
if (content.length > maxContentSizeBytes) {
log.warn("Content from {} exceeds size limit: {} bytes", urlString, content.length);
return createErrorDocument(urlString, response.statusCode(),
new IOException("Content size exceeds limit: " + content.length + " bytes"));
}
// Determine ContentType enum based on MIME type
ContentType contentType = determineContentType(mimeType);
// Create appropriate parser input
ParserInput<?> parserInput;
if (contentType == ContentType.HTML || contentType == ContentType.TEXT ||
contentType == ContentType.XML) {
// For text-based content, convert to string
String textContent = new String(content, getCharset(contentTypeHeader));
parserInput = new StringParserInput(textContent, contentType);
} else {
// For binary content (PDF, images, etc.)
parserInput = new ByteArrayParserInput(content, urlString, contentType);
}
// Parse using UnifiedParser
ParsedContent parsed = parser.parse(parserInput);
// Build metadata
Map<String, Object> metadata = new HashMap<>();
metadata.put("url", urlString);
metadata.put("httpStatus", response.statusCode());
metadata.put("contentLength", content.length);
metadata.put("contentType", contentTypeHeader);
metadata.put("mimeType", mimeType);
metadata.put("responseHeaders", responseHeaders);
// Add parsing metadata if available
if (parsed.getMetadata() != null) {
metadata.put("parsingMetadata", parsed.getMetadata());
}
metadata.put("parsingTime", parsed.getParsingEndTime() - parsed.getParsingStatedTime());
return LoadedDocument.builder()
.id(parsed.getId())
.content(parsed.getParsedContent())
.source(urlString)
.mimeType(mimeType)
.metadata(metadata)
.state(LoadedDocument.State.LOADED)
.build();
} else {
log.error("Failed to load URL: {} - HTTP {}", urlString, response.statusCode());
return createErrorDocument(urlString, response.statusCode(),
new IOException("HTTP error: " + response.statusCode()));
}
} catch (Exception e) {
log.error("Failed to load URL: {}", urlString, e);
return createErrorDocument(urlString, -1, e);
}
}
/**
* Determine ContentType enum from MIME type.
*/
private ContentType determineContentType(String mimeType) {
return switch (mimeType) {
case "text/html", "application/xhtml+xml" -> ContentType.HTML;
case "text/plain" -> ContentType.TEXT;
case "application/xml", "text/xml" -> ContentType.XML;
case "application/pdf" -> ContentType.PDF;
case "image/jpeg", "image/jpg" -> ContentType.JPG;
case "image/png" -> ContentType.PNG;
case "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"application/msword" -> ContentType.MICROSOFT_WORD;
case "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"application/vnd.ms-excel" -> ContentType.MICROSOFT_EXCEL;
case "application/vnd.openxmlformats-officedocument.presentationml.presentation",
"application/vnd.ms-powerpoint" -> ContentType.MICROSOFT_POWERPOINT;
default -> ContentType.TEXT; // Default fallback
};
}
/**
* Extract charset from Content-Type header.
*/
private String getCharset(String contentType) {
String[] parts = contentType.split(";");
for (String part : parts) {
String trimmed = part.trim();
if (trimmed.startsWith("charset=")) {
return trimmed.substring(8).replace("\"", "");
}
}
return "UTF-8"; // Default charset
}
/**
* Create a loader for a single URL.
*/
public static UrlLoader fromUrl(String url, UnifiedParser parser) {
return UrlLoader.builder()
.urls(List.of(url))
.parser(parser)
.build();
}
/**
* Create a loader for multiple URLs.
*/
public static UrlLoader fromUrls(List<String> urls, UnifiedParser parser) {
return UrlLoader.builder()
.urls(urls)
.parser(parser)
.build();
}
/**
* Create an error document when URL loading fails.
* This allows tracking which URLs failed to load and why.
*/
private LoadedDocument createErrorDocument(String urlString, int httpStatus, Exception error) {
Map<String, Object> metadata = new HashMap<>();
metadata.put("url", urlString);
if (httpStatus > 0) {
metadata.put("httpStatus", httpStatus);
}
metadata.put("errorMessage", error.getMessage());
metadata.put("errorType", error.getClass().getName());
return LoadedDocument.builder()
.id("error-" + urlString.hashCode())
.content(null) // No content for error documents
.source(urlString)
.mimeType("application/octet-stream") // Unknown mime type
.metadata(metadata)
.state(LoadedDocument.State.ERROR)
.build();
}
}
|
0
|
java-sources/ai/driftkit/driftkit-rag-core/0.8.1/ai/driftkit/rag/core
|
java-sources/ai/driftkit/driftkit-rag-core/0.8.1/ai/driftkit/rag/core/reranker/ModelBasedReranker.java
|
package ai.driftkit.rag.core.reranker;
import ai.driftkit.common.domain.client.ModelClient;
import ai.driftkit.common.domain.client.ModelTextRequest;
import ai.driftkit.common.domain.client.ModelTextResponse;
import ai.driftkit.common.domain.client.ResponseFormat;
import ai.driftkit.common.domain.client.Role;
import ai.driftkit.common.domain.client.ModelImageResponse.ModelContentMessage;
import ai.driftkit.common.domain.client.ModelImageResponse.ModelContentMessage.ModelContentElement;
import ai.driftkit.common.domain.client.ModelTextRequest.MessageType;
import ai.driftkit.common.utils.JsonUtils;
import ai.driftkit.context.core.util.DefaultPromptLoader;
import ai.driftkit.common.domain.Prompt;
import ai.driftkit.context.core.service.PromptService;
import ai.driftkit.rag.core.retriever.Retriever.RetrievalResult;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonPropertyDescription;
import lombok.Builder;
import lombok.Data;
import lombok.NonNull;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import java.util.*;
/**
* Reranker that uses a language model to rerank documents based on relevance.
* Uses structured output for reliable parsing.
*/
@Slf4j
@Builder
@RequiredArgsConstructor
public class ModelBasedReranker implements Reranker {
@NonNull
private final ModelClient modelClient;
@NonNull
private final PromptService promptService;
@Builder.Default
private final String promptId = "rag.rerank";
@Builder.Default
private final String model = "gpt-4o";
@Builder.Default
private final float temperature = 0.0f;
/**
* Structured output class for reranking scores.
*/
@Data
public static class RerankingScores {
@JsonProperty(value = "document_scores", required = true)
@JsonPropertyDescription("Map of document IDs to relevance scores (0.0 to 1.0)")
private Map<String, Float> documentScores;
}
/**
* Rerank documents based on the query.
*/
@Override
public List<RerankResult> rerank(String query, List<RetrievalResult> results, RerankConfig config) throws Exception {
if (results.isEmpty()) {
return List.of();
}
log.debug("Reranking {} documents for query: {}", results.size(), query);
// Prepare documents text for the prompt
StringBuilder documentsText = new StringBuilder();
Map<String, RetrievalResult> resultMap = new HashMap<>();
int docIndex = 0;
for (RetrievalResult result : results) {
String docId = "doc" + docIndex++;
documentsText.append("Document ID ").append(docId).append(":\n");
documentsText.append(result.document().getPageContent()).append("\n\n");
resultMap.put(docId, result);
}
// Prepare variables for prompt
Map<String, Object> variables = new HashMap<>();
variables.put("query", query);
variables.put("documents", documentsText.toString());
// Load prompt using DefaultPromptLoader
String finalPrompt = DefaultPromptLoader.loadPrompt(promptId, variables);
// Create request with structured output
ModelTextRequest request = ModelTextRequest.builder()
.messages(List.of(
ModelContentMessage.builder()
.role(Role.user)
.content(List.of(
ModelContentElement.builder()
.type(MessageType.text)
.text(finalPrompt)
.build()
))
.build()
))
.model(model)
.temperature((double) temperature)
.responseFormat(ResponseFormat.jsonSchema(RerankingScores.class))
.build();
ModelTextResponse response = modelClient.textToText(request);
// Parse the structured response
String responseContent = response.getChoices().get(0).getMessage().getContent();
RerankingScores rerankingScores = JsonUtils.fromJson(responseContent, RerankingScores.class);
if (rerankingScores == null || rerankingScores.getDocumentScores() == null) {
log.error("Failed to parse reranking response: {}", responseContent);
throw new RuntimeException("Invalid reranking response");
}
Map<String, Float> scores = rerankingScores.getDocumentScores();
// Create reranked results
List<RerankResult> rerankedResults = new ArrayList<>();
for (Map.Entry<String, Float> entry : scores.entrySet()) {
RetrievalResult originalResult = resultMap.get(entry.getKey());
if (originalResult != null) {
// Create RerankResult with both original and rerank scores
RerankResult rerankedResult = RerankResult.from(originalResult, entry.getValue());
rerankedResults.add(rerankedResult);
}
}
// Sort by rerank score descending
rerankedResults.sort((a, b) -> Float.compare(b.rerankScore(), a.rerankScore()));
// Apply topK limit from config
int limit = config.topK() > 0 ? Math.min(config.topK(), rerankedResults.size()) : rerankedResults.size();
List<RerankResult> finalResults = rerankedResults.subList(0, limit);
log.debug("Reranking complete. Returning {} documents", finalResults.size());
return finalResults;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-rag-core/0.8.1/ai/driftkit/rag/core
|
java-sources/ai/driftkit/driftkit-rag-core/0.8.1/ai/driftkit/rag/core/reranker/Reranker.java
|
package ai.driftkit.rag.core.reranker;
import ai.driftkit.rag.core.retriever.Retriever.RetrievalResult;
import ai.driftkit.vector.core.domain.Document;
import java.util.List;
import java.util.Map;
/**
* Interface for reranking retrieved documents to improve relevance.
* Implementations can use various reranking models and strategies.
*/
public interface Reranker {
/**
* Configuration for reranking operations.
*/
record RerankConfig(
int topK,
Map<String, Object> modelParams
) {
public static RerankConfig defaultConfig() {
return new RerankConfig(10, Map.of());
}
}
/**
* Result of a reranking operation containing the document,
* original retrieval score, and new rerank score.
*/
record RerankResult(
Document document,
float originalScore,
float rerankScore,
Map<String, Object> metadata
) {
/**
* Create a RerankResult from a RetrievalResult and new rerank score.
*/
public static RerankResult from(RetrievalResult retrieval, float rerankScore) {
return new RerankResult(
retrieval.document(),
retrieval.score(),
rerankScore,
retrieval.metadata()
);
}
/**
* Convert back to RetrievalResult using the rerank score.
*/
public RetrievalResult toRetrievalResult() {
return new RetrievalResult(document, rerankScore, metadata);
}
}
/**
* Rerank a list of retrieved documents based on the original query.
*
* @param query The original search query
* @param results The retrieved documents to rerank
* @param config Reranking configuration
* @return Reranked list of documents with both original and rerank scores
* @throws Exception if reranking fails
*/
List<RerankResult> rerank(String query, List<RetrievalResult> results, RerankConfig config) throws Exception;
/**
* Rerank with default configuration.
*
* @param query The original search query
* @param results The retrieved documents to rerank
* @return Reranked list of documents with both original and rerank scores
* @throws Exception if reranking fails
*/
default List<RerankResult> rerank(String query, List<RetrievalResult> results) throws Exception {
return rerank(query, results, RerankConfig.defaultConfig());
}
}
|
0
|
java-sources/ai/driftkit/driftkit-rag-core/0.8.1/ai/driftkit/rag/core
|
java-sources/ai/driftkit/driftkit-rag-core/0.8.1/ai/driftkit/rag/core/retriever/Retriever.java
|
package ai.driftkit.rag.core.retriever;
import ai.driftkit.vector.core.domain.Document;
import java.util.List;
import java.util.Map;
/**
* Interface for retrieving relevant documents based on a query.
* Implementations can use various retrieval strategies.
*/
public interface Retriever {
/**
* Configuration for retrieval operations.
*/
record RetrievalConfig(
int topK,
float minScore,
Map<String, Object> filters
) {
public static RetrievalConfig defaultConfig() {
return new RetrievalConfig(10, 0.0f, Map.of());
}
}
/**
* Result of a retrieval operation containing documents and their scores.
*/
record RetrievalResult(
Document document,
float score,
Map<String, Object> metadata
) {}
/**
* Retrieve relevant documents based on the query.
*
* @param query The search query
* @param index The index to search in
* @param config Retrieval configuration
* @return List of retrieved documents with scores
* @throws Exception if retrieval fails
*/
List<RetrievalResult> retrieve(String query, String index, RetrievalConfig config) throws Exception;
/**
* Retrieve relevant documents with default configuration.
*
* @param query The search query
* @param index The index to search in
* @return List of retrieved documents with scores
* @throws Exception if retrieval fails
*/
default List<RetrievalResult> retrieve(String query, String index) throws Exception {
return retrieve(query, index, RetrievalConfig.defaultConfig());
}
}
|
0
|
java-sources/ai/driftkit/driftkit-rag-core/0.8.1/ai/driftkit/rag/core
|
java-sources/ai/driftkit/driftkit-rag-core/0.8.1/ai/driftkit/rag/core/retriever/VectorStoreRetriever.java
|
package ai.driftkit.rag.core.retriever;
import ai.driftkit.embedding.core.domain.Embedding;
import ai.driftkit.embedding.core.domain.Response;
import ai.driftkit.embedding.core.domain.TextSegment;
import ai.driftkit.embedding.core.service.EmbeddingModel;
import ai.driftkit.vector.core.domain.BaseVectorStore;
import ai.driftkit.vector.core.domain.Document;
import ai.driftkit.vector.core.domain.DocumentsResult;
import ai.driftkit.vector.core.domain.EmbeddingVectorStore;
import ai.driftkit.vector.core.domain.TextVectorStore;
import lombok.Builder;
import lombok.NonNull;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Default retriever implementation that uses a vector store directly.
*/
@Slf4j
@Builder
@RequiredArgsConstructor
public class VectorStoreRetriever implements Retriever {
@NonNull
private final BaseVectorStore vectorStore;
private final EmbeddingModel embeddingModel; // Required only for EmbeddingVectorStore
@Builder.Default
private final String queryPrefix = "";
/**
* Retrieve relevant documents from the vector store.
*/
@Override
public List<RetrievalResult> retrieve(String query, String index, RetrievalConfig config) throws Exception {
log.debug("Retrieving documents for query: {} from index: {}", query, index);
String effectiveQuery = queryPrefix.isEmpty() ? query : queryPrefix + query;
DocumentsResult results;
if (vectorStore instanceof TextVectorStore textStore) {
// TextVectorStore handles embedding internally
log.trace("Using TextVectorStore for retrieval");
results = textStore.findRelevant(index, effectiveQuery, config.topK());
} else if (vectorStore instanceof EmbeddingVectorStore embeddingStore) {
// We need to create embedding ourselves
if (embeddingModel == null) {
throw new IllegalStateException("EmbeddingModel is required for EmbeddingVectorStore");
}
log.trace("Using EmbeddingVectorStore for retrieval");
// Generate query embedding
Response<Embedding> response = embeddingModel.embed(TextSegment.from(effectiveQuery));
float[] queryVector = response.content().vector();
results = embeddingStore.findRelevant(index, queryVector, config.topK());
} else {
throw new IllegalStateException("BaseVectorStore without text/embedding support is not suitable for retrieval");
}
// Convert to RetrievalResult and apply filters
List<RetrievalResult> retrievalResults = new ArrayList<>();
for (DocumentsResult.ResultEntry entry : results.getResult()) {
Document doc = entry.getDocument();
Float score = entry.getValue();
// Apply score filter
if (score < config.minScore()) {
continue;
}
// Apply metadata filters
if (!matchesFilters(doc, config.filters())) {
continue;
}
Map<String, Object> metadata = new HashMap<>();
metadata.put("retrievalScore", score);
metadata.put("index", index);
retrievalResults.add(new RetrievalResult(doc, score, metadata));
}
log.debug("Retrieved {} documents after filtering", retrievalResults.size());
return retrievalResults;
}
/**
* Check if document matches all filter criteria.
*/
private boolean matchesFilters(Document doc, Map<String, Object> filters) {
if (filters == null || filters.isEmpty()) {
return true;
}
Map<String, Object> metadata = doc.getMetadata();
if (metadata == null) {
return false;
}
for (Map.Entry<String, Object> filter : filters.entrySet()) {
Object docValue = metadata.get(filter.getKey());
Object filterValue = filter.getValue();
if (docValue == null || !docValue.equals(filterValue)) {
return false;
}
}
return true;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-rag-core/0.8.1/ai/driftkit/rag/core
|
java-sources/ai/driftkit/driftkit-rag-core/0.8.1/ai/driftkit/rag/core/splitter/RecursiveCharacterTextSplitter.java
|
package ai.driftkit.rag.core.splitter;
import ai.driftkit.common.utils.DocumentSplitter;
import ai.driftkit.rag.core.domain.LoadedDocument;
import ai.driftkit.rag.core.domain.LoadedDocument.State;
import ai.driftkit.vector.core.domain.Document;
import lombok.Builder;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
/**
* Text splitter that uses recursive character-based splitting with overlap.
* Wraps the existing DocumentSplitter from driftkit-common.
*/
@Slf4j
@Builder
@RequiredArgsConstructor
public class RecursiveCharacterTextSplitter implements TextSplitter {
@Builder.Default
private final int chunkSize = 512;
@Builder.Default
private final int chunkOverlap = 128;
@Builder.Default
private final boolean preserveMetadata = true;
@Builder.Default
private final boolean addChunkMetadata = true;
/**
* Split a loaded document into chunks.
*/
@Override
public List<Document> split(LoadedDocument document) {
if (StringUtils.isBlank(document.getContent()) || document.getState() != State.LOADED) {
log.warn("Document {} has no content to split", document.getId());
return List.of();
}
// Use existing DocumentSplitter
List<String> chunks = DocumentSplitter.splitDocumentIntoShingles(
document.getContent(),
chunkSize,
chunkOverlap
);
log.debug("Split document {} into {} chunks", document.getId(), chunks.size());
List<Document> documents = new ArrayList<>();
for (int i = 0; i < chunks.size(); i++) {
String chunk = chunks.get(i);
String chunkId = document.getId() + "-chunk-" + i;
// Create metadata for chunk
Map<String, Object> metadata = new HashMap<>();
// Preserve original document metadata if requested
if (preserveMetadata && document.getMetadata() != null) {
metadata.putAll(document.getMetadata());
}
// Add chunk-specific metadata
if (addChunkMetadata) {
metadata.put("sourceDocumentId", document.getId());
metadata.put("sourceDocumentSource", document.getSource());
metadata.put("chunkIndex", i);
metadata.put("totalChunks", chunks.size());
metadata.put("chunkSize", chunk.length());
metadata.put("splitterType", "recursive_character");
metadata.put("splitterChunkSize", chunkSize);
metadata.put("splitterOverlap", chunkOverlap);
}
Document doc = new Document(
chunkId,
null, // No embedding yet
chunk,
metadata
);
documents.add(doc);
}
return documents;
}
/**
* Create a splitter with default settings.
*/
public static RecursiveCharacterTextSplitter withDefaults() {
return RecursiveCharacterTextSplitter.builder().build();
}
/**
* Create a splitter with custom chunk size.
*/
public static RecursiveCharacterTextSplitter withChunkSize(int chunkSize) {
return RecursiveCharacterTextSplitter.builder()
.chunkSize(chunkSize)
.build();
}
/**
* Create a splitter with custom chunk size and overlap.
*/
public static RecursiveCharacterTextSplitter withChunkSizeAndOverlap(int chunkSize, int overlap) {
return RecursiveCharacterTextSplitter.builder()
.chunkSize(chunkSize)
.chunkOverlap(overlap)
.build();
}
}
|
0
|
java-sources/ai/driftkit/driftkit-rag-core/0.8.1/ai/driftkit/rag/core
|
java-sources/ai/driftkit/driftkit-rag-core/0.8.1/ai/driftkit/rag/core/splitter/SemanticTextSplitter.java
|
package ai.driftkit.rag.core.splitter;
import ai.driftkit.embedding.core.domain.Embedding;
import ai.driftkit.embedding.core.domain.Response;
import ai.driftkit.embedding.core.domain.TextSegment;
import ai.driftkit.embedding.core.service.EmbeddingModel;
import ai.driftkit.rag.core.domain.LoadedDocument;
import ai.driftkit.rag.core.domain.LoadedDocument.State;
import ai.driftkit.vector.core.domain.Document;
import lombok.Builder;
import lombok.NonNull;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import java.text.BreakIterator;
import java.util.*;
/**
* Text splitter that uses semantic similarity to create chunks.
* Groups sentences together based on their semantic similarity.
*/
@Slf4j
@Builder
@RequiredArgsConstructor
public class SemanticTextSplitter implements TextSplitter {
@NonNull
private final EmbeddingModel embeddingModel;
@Builder.Default
private final int targetChunkSize = 512;
@Builder.Default
private final float similarityThreshold = 0.7f;
@Builder.Default
private final int maxChunkSize = 1024;
@Builder.Default
private final int minChunkSize = 100;
@Builder.Default
private final boolean preserveMetadata = true;
@Builder.Default
private final boolean addChunkMetadata = true;
/**
* Split a loaded document into semantically coherent chunks.
*/
@Override
public List<Document> split(LoadedDocument document) {
if (StringUtils.isBlank(document.getContent()) || document.getState() != State.LOADED) {
log.warn("Document {} has no content to split", document.getId());
return List.of();
}
// First, split into sentences
List<String> sentences = splitIntoSentences(document.getContent());
if (sentences.isEmpty()) {
return List.of();
}
log.debug("Document {} split into {} sentences", document.getId(), sentences.size());
// Group sentences into semantically coherent chunks
List<List<String>> chunks = groupSentencesSemantically(sentences);
log.debug("Grouped sentences into {} semantic chunks", chunks.size());
// Convert to Document objects
List<Document> documents = new ArrayList<>();
for (int i = 0; i < chunks.size(); i++) {
List<String> sentenceGroup = chunks.get(i);
String chunkText = String.join(" ", sentenceGroup);
String chunkId = document.getId() + "-semantic-" + i;
// Create metadata for chunk
Map<String, Object> metadata = new HashMap<>();
// Preserve original document metadata if requested
if (preserveMetadata && document.getMetadata() != null) {
metadata.putAll(document.getMetadata());
}
// Add chunk-specific metadata
if (addChunkMetadata) {
metadata.put("sourceDocumentId", document.getId());
metadata.put("sourceDocumentSource", document.getSource());
metadata.put("chunkIndex", i);
metadata.put("totalChunks", chunks.size());
metadata.put("chunkSize", chunkText.length());
metadata.put("sentenceCount", sentenceGroup.size());
metadata.put("splitterType", "semantic");
metadata.put("splitterTargetSize", targetChunkSize);
metadata.put("splitterSimilarityThreshold", similarityThreshold);
}
Document doc = new Document(
chunkId,
null, // No embedding yet
chunkText,
metadata
);
documents.add(doc);
}
return documents;
}
/**
* Split text into sentences.
*/
private List<String> splitIntoSentences(String text) {
List<String> sentences = new ArrayList<>();
BreakIterator sentenceIterator = BreakIterator.getSentenceInstance(Locale.getDefault());
sentenceIterator.setText(text);
int start = sentenceIterator.first();
int end = sentenceIterator.next();
while (end != BreakIterator.DONE) {
String sentence = text.substring(start, end).trim();
if (!sentence.isEmpty()) {
sentences.add(sentence);
}
start = end;
end = sentenceIterator.next();
}
return sentences;
}
/**
* Group sentences into semantically coherent chunks.
*/
private List<List<String>> groupSentencesSemantically(List<String> sentences) {
List<List<String>> chunks = new ArrayList<>();
List<String> currentChunk = new ArrayList<>();
int currentSize = 0;
for (int i = 0; i < sentences.size(); i++) {
String sentence = sentences.get(i);
int sentenceSize = sentence.length();
// If adding this sentence would exceed max size, finalize current chunk
if (!currentChunk.isEmpty() && currentSize + sentenceSize > maxChunkSize) {
chunks.add(new ArrayList<>(currentChunk));
currentChunk.clear();
currentSize = 0;
}
// Add sentence to current chunk
currentChunk.add(sentence);
currentSize += sentenceSize;
// Check if we should start a new chunk based on size or semantic boundary
if (currentSize >= targetChunkSize) {
// Check semantic similarity with next sentence
if (i < sentences.size() - 1) {
String nextSentence = sentences.get(i + 1);
float similarity = calculateSimilarity(
String.join(" ", currentChunk),
nextSentence
);
// If similarity is low, this is a good breaking point
if (similarity < similarityThreshold) {
chunks.add(new ArrayList<>(currentChunk));
currentChunk.clear();
currentSize = 0;
}
} else {
// Last sentence, finalize chunk
chunks.add(new ArrayList<>(currentChunk));
currentChunk.clear();
currentSize = 0;
}
}
}
// Add any remaining sentences
if (!currentChunk.isEmpty()) {
// If the last chunk is too small, merge with previous
if (!chunks.isEmpty() && currentSize < minChunkSize) {
List<String> lastChunk = chunks.getLast();
lastChunk.addAll(currentChunk);
} else {
chunks.add(currentChunk);
}
}
return chunks;
}
/**
* Calculate semantic similarity between two texts.
*/
private float calculateSimilarity(String text1, String text2) {
try {
// Generate embeddings
Response<Embedding> response1 = embeddingModel.embed(TextSegment.from(text1));
Response<Embedding> response2 = embeddingModel.embed(TextSegment.from(text2));
float[] vector1 = response1.content().vector();
float[] vector2 = response2.content().vector();
// Calculate cosine similarity
return cosineSimilarity(vector1, vector2);
} catch (Exception e) {
log.warn("Failed to calculate similarity, using default threshold", e);
return similarityThreshold; // Default to threshold to avoid breaking
}
}
/**
* Calculate cosine similarity between two vectors.
*/
private float cosineSimilarity(float[] vector1, float[] vector2) {
if (vector1.length != vector2.length) {
throw new IllegalArgumentException("Vectors must have the same length");
}
float dotProduct = 0.0f;
float norm1 = 0.0f;
float norm2 = 0.0f;
for (int i = 0; i < vector1.length; i++) {
dotProduct += vector1[i] * vector2[i];
norm1 += vector1[i] * vector1[i];
norm2 += vector2[i] * vector2[i];
}
return dotProduct / (float) (Math.sqrt(norm1) * Math.sqrt(norm2));
}
}
|
0
|
java-sources/ai/driftkit/driftkit-rag-core/0.8.1/ai/driftkit/rag/core
|
java-sources/ai/driftkit/driftkit-rag-core/0.8.1/ai/driftkit/rag/core/splitter/TextSplitter.java
|
package ai.driftkit.rag.core.splitter;
import ai.driftkit.rag.core.domain.LoadedDocument;
import ai.driftkit.vector.core.domain.Document;
import java.util.List;
/**
* Interface for splitting text documents into smaller chunks.
* Implementations should handle different splitting strategies.
*/
public interface TextSplitter {
/**
* Split a loaded document into multiple chunks.
* Each chunk will become a separate document in the vector store.
*
* @param document The document to split
* @return List of document chunks ready for embedding
*/
List<Document> split(LoadedDocument document);
/**
* Split multiple documents into chunks.
*
* @param documents The documents to split
* @return List of all document chunks
*/
default List<Document> splitAll(List<LoadedDocument> documents) {
return documents.stream()
.flatMap(doc -> split(doc).stream())
.toList();
}
}
|
0
|
java-sources/ai/driftkit/driftkit-rag-core/0.8.1/ai/driftkit/rag
|
java-sources/ai/driftkit/driftkit-rag-core/0.8.1/ai/driftkit/rag/ingestion/IngestionException.java
|
package ai.driftkit.rag.ingestion;
import lombok.Getter;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
/**
* Exception that accumulates multiple errors during ingestion process.
*/
@Getter
public class IngestionException extends Exception {
private final List<ErrorDetail> errors;
/**
* Error detail for a specific document.
*/
public record ErrorDetail(
String documentId,
String source,
Exception error,
ErrorType errorType
) {}
/**
* Type of error that occurred.
*/
public enum ErrorType {
DOCUMENT_PROCESSING,
RESULT_HANDLER,
PIPELINE_INITIALIZATION
}
public IngestionException(String message, List<ErrorDetail> errors) {
super(message + " (" + errors.size() + " errors)");
this.errors = Collections.unmodifiableList(new ArrayList<>(errors));
}
public IngestionException(String message, Throwable cause) {
super(message, cause);
this.errors = List.of(new ErrorDetail("unknown", "unknown",
cause instanceof Exception ? (Exception) cause : new RuntimeException(cause),
ErrorType.PIPELINE_INITIALIZATION));
}
/**
* Check if there were any critical errors (non-result handler errors).
*/
public boolean hasCriticalErrors() {
return errors.stream()
.anyMatch(e -> e.errorType() != ErrorType.RESULT_HANDLER);
}
/**
* Get only critical errors.
*/
public List<ErrorDetail> getCriticalErrors() {
return errors.stream()
.filter(e -> e.errorType() != ErrorType.RESULT_HANDLER)
.toList();
}
@Override
public String getMessage() {
StringBuilder sb = new StringBuilder(super.getMessage());
sb.append("\nError details:");
for (ErrorDetail error : errors) {
sb.append("\n - Document: ").append(error.documentId())
.append(" (").append(error.errorType()).append("): ")
.append(error.error().getMessage());
}
return sb.toString();
}
}
|
0
|
java-sources/ai/driftkit/driftkit-rag-core/0.8.1/ai/driftkit/rag
|
java-sources/ai/driftkit/driftkit-rag-core/0.8.1/ai/driftkit/rag/ingestion/IngestionPipeline.java
|
package ai.driftkit.rag.ingestion;
import ai.driftkit.common.util.Retrier;
import ai.driftkit.common.utils.AIUtils;
import ai.driftkit.embedding.core.domain.Embedding;
import ai.driftkit.embedding.core.domain.Response;
import ai.driftkit.embedding.core.domain.TextSegment;
import ai.driftkit.embedding.core.service.EmbeddingModel;
import ai.driftkit.rag.core.domain.LoadedDocument;
import ai.driftkit.rag.core.loader.DocumentLoader;
import ai.driftkit.rag.core.splitter.TextSplitter;
import ai.driftkit.vector.core.domain.BaseVectorStore;
import ai.driftkit.vector.core.domain.Document;
import ai.driftkit.vector.core.domain.EmbeddingVectorStore;
import ai.driftkit.vector.core.domain.TextVectorStore;
import lombok.Builder;
import lombok.NonNull;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import java.util.ArrayList;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Consumer;
import java.util.stream.Stream;
/**
* Pipeline for ingesting documents into a vector store.
* Uses streaming and virtual threads for efficient processing.
*/
@Slf4j
@Builder
@RequiredArgsConstructor
public class IngestionPipeline {
@NonNull
private final DocumentLoader documentLoader;
@NonNull
private final TextSplitter textSplitter;
private final EmbeddingModel embeddingClient; // Optional - only needed for EmbeddingVectorStore
@NonNull
private final BaseVectorStore vectorStore;
@NonNull
@Builder.Default
private final String indexName = "default";
@Builder.Default
private final int maxRetries = 3;
@Builder.Default
private final long retryDelayMs = 1000;
@Builder.Default
private final boolean useVirtualThreads = true;
/**
* Progress listener for tracking ingestion progress.
*/
public interface ProgressListener {
void onDocumentLoaded(String documentId, String source);
void onDocumentProcessed(String documentId, int chunks);
void onDocumentFailed(String documentId, Exception error);
void onChunkStored(String chunkId);
void onProgress(long processed, long total);
/**
* Called when an error occurs in the result handler.
* Default implementation logs the error.
*/
default void onResultHandlerError(String documentId, Exception error) {
log.error("Error in result handler for document: {}", documentId, error);
}
}
/**
* Result of processing a single document.
*/
public record DocumentResult(
String documentId,
String source,
int chunksCreated,
int chunksStored,
List<Exception> errors,
long processingTimeMs
) {
public boolean isSuccess() {
return errors.isEmpty() && chunksCreated == chunksStored;
}
}
/**
* Validate configuration on build.
*/
private static void validate(IngestionPipeline pipeline) {
if (pipeline.vectorStore instanceof EmbeddingVectorStore && pipeline.embeddingClient == null) {
throw new IllegalArgumentException(
"EmbeddingModel is required when using EmbeddingVectorStore"
);
}
}
/**
* Run the ingestion pipeline with streaming processing.
* Each document is processed independently and can be acknowledged.
*
* @param progressListener Optional listener for progress updates
* @return Stream of results for each processed document
*/
public Stream<DocumentResult> run(ProgressListener progressListener) {
log.info("Starting streaming document ingestion pipeline for index: {}", indexName);
try {
// Get document stream - either lazy or eager loading
Stream<LoadedDocument> documentStream = documentLoader.supportsStreaming()
? documentLoader.loadStream()
: documentLoader.load().stream();
// Process each document independently
return documentStream
.map(doc -> processDocumentWithRetry(doc, progressListener))
.onClose(() -> log.info("Ingestion pipeline completed"));
} catch (Exception e) {
log.error("Failed to start ingestion pipeline", e);
throw new RuntimeException("Failed to start ingestion pipeline", e);
}
}
/**
* Run the ingestion pipeline without progress tracking.
*/
public Stream<DocumentResult> run() {
return run(null);
}
/**
* Run the ingestion pipeline with a consumer for handling results.
* This allows for acknowledgment pattern.
*
* @throws IngestionException if any errors occur during processing or in result handlers
*/
public void run(Consumer<DocumentResult> resultHandler, ProgressListener progressListener) throws IngestionException {
List<IngestionException.ErrorDetail> accumulatedErrors = new ArrayList<>();
try (Stream<DocumentResult> results = run(progressListener)) {
results.forEach(result -> {
// Check for document processing errors
if (!result.errors().isEmpty()) {
for (Exception error : result.errors()) {
accumulatedErrors.add(new IngestionException.ErrorDetail(
result.documentId(),
result.source(),
error,
IngestionException.ErrorType.DOCUMENT_PROCESSING
));
}
}
// Handle result
try {
resultHandler.accept(result);
} catch (Exception e) {
log.error("Error in result handler for document: {}", result.documentId(), e);
// Notify listener if available
if (progressListener != null) {
progressListener.onResultHandlerError(result.documentId(), e);
}
// Accumulate error
accumulatedErrors.add(new IngestionException.ErrorDetail(
result.documentId(),
result.source(),
e,
IngestionException.ErrorType.RESULT_HANDLER
));
}
});
} catch (Exception e) {
// Pipeline initialization or streaming error
throw new IngestionException("Failed to execute ingestion pipeline", e);
}
// If there were any errors, throw exception with all details
if (!accumulatedErrors.isEmpty()) {
throw new IngestionException("Ingestion completed with errors", accumulatedErrors);
}
}
/**
* Process a single document with retry logic.
*/
private DocumentResult processDocumentWithRetry(LoadedDocument document, ProgressListener listener) {
long startTime = System.currentTimeMillis();
String docId = document.getId() != null ? document.getId() : AIUtils.generateId();
if (listener != null) {
listener.onDocumentLoaded(docId, document.getSource());
}
Exception lastError = null;
try {
// Use Retrier to handle the retry logic
DocumentResult result = Retrier.retry(() -> {
DocumentResult processResult = processDocument(document, docId, listener);
if (processResult.isSuccess()) {
return processResult;
}
// If there were errors, throw exception to trigger retry
if (!processResult.errors().isEmpty()) {
throw new RuntimeException("Document processing had errors", processResult.errors().getFirst());
}
return processResult; // Partial success, don't retry
}, maxRetries, retryDelayMs, 1);
return result;
} catch (Exception e) {
lastError = e;
log.error("All retry attempts failed for document: {}", docId, e);
}
// All retries failed
if (listener != null) {
listener.onDocumentFailed(docId, lastError);
}
return new DocumentResult(
docId,
document.getSource(),
0,
0,
List.of(lastError != null ? lastError : new RuntimeException("Processing failed")),
System.currentTimeMillis() - startTime
);
}
/**
* Process a single document.
*/
private DocumentResult processDocument(LoadedDocument document, String docId, ProgressListener listener) {
long startTime = System.currentTimeMillis();
List<Exception> errors = new ArrayList<>();
AtomicInteger chunksCreated = new AtomicInteger(0);
AtomicInteger chunksStored = new AtomicInteger(0);
try {
// Split document into chunks
List<Document> chunks = textSplitter.split(document);
chunksCreated.set(chunks.size());
log.debug("Document {} split into {} chunks", docId, chunks.size());
// Process chunks based on vector store type
if (vectorStore instanceof TextVectorStore textStore) {
// TextVectorStore handles embedding internally
processChunksForTextStore(chunks, docId, document, textStore, chunksStored, errors, listener);
} else if (vectorStore instanceof EmbeddingVectorStore embeddingStore) {
// We need to create embeddings ourselves
processChunksForEmbeddingStore(chunks, docId, document, embeddingStore, chunksStored, errors, listener);
} else {
// BaseVectorStore - store without embeddings (shouldn't happen in RAG context)
throw new IllegalStateException("BaseVectorStore without text/embedding support is not suitable for RAG");
}
if (listener != null) {
listener.onDocumentProcessed(docId, chunksCreated.get());
}
} catch (Exception e) {
log.error("Error processing document: {}", docId, e);
errors.add(e);
}
return new DocumentResult(
docId,
document.getSource(),
chunksCreated.get(),
chunksStored.get(),
errors,
System.currentTimeMillis() - startTime
);
}
/**
* Process chunks for TextVectorStore (no embedding needed).
*/
private void processChunksForTextStore(
List<Document> chunks,
String docId,
LoadedDocument sourceDoc,
TextVectorStore textStore,
AtomicInteger chunksStored,
List<Exception> errors,
ProgressListener listener) {
processChunksAsync(chunks, docId, sourceDoc, chunksStored, errors, listener, (chunk, chunkId, chunkIndex) -> {
// Create document without embedding (TextVectorStore will handle it)
Document doc = new Document(
chunkId,
null, // No vector needed
chunk.getPageContent(),
chunk.getMetadata()
);
// Add source metadata
enrichDocumentMetadata(doc, sourceDoc, docId, chunkIndex);
// Store in vector store
textStore.addDocument(indexName, doc);
return doc;
});
}
/**
* Process chunks for EmbeddingVectorStore (we create embeddings).
*/
private void processChunksForEmbeddingStore(
List<Document> chunks,
String docId,
LoadedDocument sourceDoc,
EmbeddingVectorStore embeddingStore,
AtomicInteger chunksStored,
List<Exception> errors,
ProgressListener listener) {
processChunksAsync(chunks, docId, sourceDoc, chunksStored, errors, listener, (chunk, chunkId, chunkIndex) -> {
// Generate embedding
Response<Embedding> response = embeddingClient.embed(
TextSegment.from(chunk.getPageContent())
);
float[] vector = response.content().vector();
// Create document with embedding
Document embeddedDoc = new Document(
chunkId,
vector,
chunk.getPageContent(),
chunk.getMetadata()
);
// Add source metadata
enrichDocumentMetadata(embeddedDoc, sourceDoc, docId, chunkIndex);
// Store in vector store
embeddingStore.addDocument(indexName, embeddedDoc);
return embeddedDoc;
});
}
/**
* Common async processing logic for chunks.
*/
private void processChunksAsync(
List<Document> chunks,
String docId,
LoadedDocument sourceDoc,
AtomicInteger chunksStored,
List<Exception> errors,
ProgressListener listener,
ChunkProcessor processor) {
List<CompletableFuture<Void>> futures = new ArrayList<>();
for (int i = 0; i < chunks.size(); i++) {
Document chunk = chunks.get(i);
String chunkId = docId + "-" + i;
final int chunkIndex = i;
Runnable task = () -> {
try {
// Process chunk using the provided processor
processor.process(chunk, chunkId, chunkIndex);
chunksStored.incrementAndGet();
if (listener != null) {
listener.onChunkStored(chunkId);
}
log.trace("Successfully stored chunk: {}", chunkId);
} catch (Exception e) {
log.error("Failed to process chunk: {}", chunkId, e);
synchronized (errors) {
errors.add(new RuntimeException("Failed to process chunk " + chunkId, e));
}
}
};
CompletableFuture<Void> future = useVirtualThreads
? CompletableFuture.runAsync(task, command -> Thread.ofVirtual().start(command))
: CompletableFuture.runAsync(task);
futures.add(future);
}
// Wait for all chunks
CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).join();
}
/**
* Enrich document with source metadata.
*/
private void enrichDocumentMetadata(Document doc, LoadedDocument sourceDoc, String docId, int chunkIndex) {
doc.getMetadata().put("source", sourceDoc.getSource());
doc.getMetadata().put("sourceDocId", docId);
doc.getMetadata().put("chunkIndex", chunkIndex);
}
/**
* Functional interface for processing individual chunks.
*/
@FunctionalInterface
private interface ChunkProcessor {
Document process(Document chunk, String chunkId, int chunkIndex) throws Exception;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-rag-core/0.8.1/ai/driftkit/rag
|
java-sources/ai/driftkit/driftkit-rag-core/0.8.1/ai/driftkit/rag/retrieval/RetrievalPipeline.java
|
package ai.driftkit.rag.retrieval;
import ai.driftkit.embedding.core.domain.Embedding;
import ai.driftkit.embedding.core.domain.Response;
import ai.driftkit.embedding.core.domain.TextSegment;
import ai.driftkit.embedding.core.service.EmbeddingModel;
import ai.driftkit.rag.core.reranker.Reranker;
import ai.driftkit.rag.core.reranker.Reranker.RerankConfig;
import ai.driftkit.rag.core.reranker.Reranker.RerankResult;
import ai.driftkit.rag.core.retriever.Retriever;
import ai.driftkit.rag.core.retriever.Retriever.RetrievalConfig;
import ai.driftkit.rag.core.retriever.Retriever.RetrievalResult;
import ai.driftkit.vector.core.domain.BaseVectorStore;
import ai.driftkit.vector.core.domain.Document;
import ai.driftkit.vector.core.domain.DocumentsResult;
import ai.driftkit.vector.core.domain.EmbeddingVectorStore;
import ai.driftkit.vector.core.domain.TextVectorStore;
import lombok.Builder;
import lombok.NonNull;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
/**
* Pipeline for retrieving relevant documents from a vector store.
* Supports optional custom retrieval strategies and reranking.
*/
@Slf4j
@Builder
@RequiredArgsConstructor
public class RetrievalPipeline {
private final EmbeddingModel embeddingClient; // Optional - only needed for EmbeddingVectorStore
@NonNull
private final BaseVectorStore vectorStore;
@NonNull
@Builder.Default
private final String indexName = "default";
private final Retriever retriever; // Optional - custom retrieval strategy
private final Reranker reranker; // Optional - reranking model
@Builder.Default
private final int topK = 10;
@Builder.Default
private final float minScore = 0.0f;
@Builder.Default
private final Map<String, Object> filters = Map.of();
@Builder.Default
private final boolean useVirtualThreads = true;
@Builder.Default
private final String queryPrefix = ""; // Optional prefix for queries (e.g., "Instruct: Retrieve semantically similar text.\nQuery: ")
/**
* Validate configuration on build.
*/
private static void validate(RetrievalPipeline pipeline) {
if (pipeline.vectorStore instanceof EmbeddingVectorStore &&
pipeline.embeddingClient == null &&
pipeline.retriever == null) {
throw new IllegalArgumentException(
"EmbeddingModel is required when using EmbeddingVectorStore without custom Retriever"
);
}
}
/**
* Retrieve relevant documents for the given query.
*
* @param query The search query
* @return List of relevant documents
* @throws Exception if retrieval fails
*/
public List<Document> retrieve(String query) throws Exception {
return retrieve(query, RetrievalConfig.defaultConfig());
}
/**
* Retrieve relevant documents with custom configuration.
*
* @param query The search query
* @param config Custom retrieval configuration
* @return List of relevant documents
* @throws Exception if retrieval fails
*/
public List<Document> retrieve(String query, RetrievalConfig config) throws Exception {
log.debug("Starting retrieval for query: {}", query);
// Use custom config or fall back to builder defaults
int effectiveTopK = config.topK() > 0 ? config.topK() : topK;
float effectiveMinScore = config.minScore() >= 0 ? config.minScore() : minScore;
Map<String, Object> effectiveFilters = config.filters() != null ? config.filters() : filters;
// Step 1: Retrieve documents
List<RetrievalResult> retrievalResults;
if (retriever != null) {
// Use custom retriever
log.debug("Using custom retriever");
retrievalResults = retriever.retrieve(
query,
indexName,
new RetrievalConfig(effectiveTopK, effectiveMinScore, effectiveFilters)
);
} else {
// Use vector store directly
retrievalResults = retrieveFromVectorStore(
query,
effectiveTopK,
effectiveMinScore,
effectiveFilters
);
}
log.debug("Retrieved {} documents", retrievalResults.size());
// Step 2: Optional reranking
if (reranker != null && !retrievalResults.isEmpty()) {
log.debug("Applying reranking");
RerankConfig rerankConfig = new RerankConfig(effectiveTopK, Map.of());
List<RerankResult> rerankResults = reranker.rerank(query, retrievalResults, rerankConfig);
log.debug("Reranking complete, {} documents remaining", rerankResults.size());
// Convert RerankResult back to RetrievalResult for consistency
retrievalResults = rerankResults.stream()
.map(RerankResult::toRetrievalResult)
.toList();
}
// Convert to Document list
return retrievalResults.stream()
.map(RetrievalResult::document)
.toList();
}
/**
* Retrieve documents asynchronously.
*
* @param query The search query
* @return CompletableFuture with results
*/
public CompletableFuture<List<Document>> retrieveAsync(String query) {
return retrieveAsync(query, RetrievalConfig.defaultConfig());
}
/**
* Retrieve documents asynchronously with custom configuration.
*
* @param query The search query
* @param config Custom retrieval configuration
* @return CompletableFuture with results
*/
public CompletableFuture<List<Document>> retrieveAsync(String query, RetrievalConfig config) {
if (useVirtualThreads) {
return CompletableFuture.supplyAsync(
() -> {
try {
return retrieve(query, config);
} catch (Exception e) {
throw new RuntimeException("Retrieval failed", e);
}
},
command -> Thread.ofVirtual().start(command)
);
} else {
return CompletableFuture.supplyAsync(
() -> {
try {
return retrieve(query, config);
} catch (Exception e) {
throw new RuntimeException("Retrieval failed", e);
}
}
);
}
}
/**
* Retrieve documents directly from vector store.
*/
private List<RetrievalResult> retrieveFromVectorStore(
String query,
int k,
float minScoreThreshold,
Map<String, Object> filterMetadata) throws Exception {
String effectiveQuery = queryPrefix.isEmpty() ? query : queryPrefix + query;
DocumentsResult results;
if (vectorStore instanceof TextVectorStore textStore) {
// TextVectorStore handles embedding internally
log.trace("Using TextVectorStore for retrieval");
results = textStore.findRelevant(indexName, effectiveQuery, k);
} else if (vectorStore instanceof EmbeddingVectorStore embeddingStore) {
// We need to create embedding ourselves
log.trace("Using EmbeddingVectorStore for retrieval");
// Generate query embedding
Response<Embedding> response = embeddingClient.embed(TextSegment.from(effectiveQuery));
float[] queryVector = response.content().vector();
results = embeddingStore.findRelevant(indexName, queryVector, k);
} else {
throw new IllegalStateException("BaseVectorStore without text/embedding support is not suitable for retrieval");
}
// Convert to RetrievalResult and apply filters
List<RetrievalResult> retrievalResults = new ArrayList<>();
for (DocumentsResult.ResultEntry entry : results.getResult()) {
Document doc = entry.getDocument();
Float score = entry.getValue();
// Apply score filter
if (score < minScoreThreshold) {
continue;
}
// Apply metadata filters
if (!matchesFilters(doc, filterMetadata)) {
continue;
}
retrievalResults.add(new RetrievalResult(
doc,
score,
Map.of("retrievalScore", score)
));
}
return retrievalResults;
}
/**
* Check if document matches all filter criteria.
*/
private boolean matchesFilters(Document doc, Map<String, Object> filters) {
if (filters == null || filters.isEmpty()) {
return true;
}
Map<String, Object> metadata = doc.getMetadata();
if (metadata == null) {
return false;
}
for (Map.Entry<String, Object> filter : filters.entrySet()) {
Object docValue = metadata.get(filter.getKey());
Object filterValue = filter.getValue();
if (docValue == null || !docValue.equals(filterValue)) {
return false;
}
}
return true;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-rag-spring-boot-starter/0.8.1/ai/driftkit/rag/spring
|
java-sources/ai/driftkit/driftkit-rag-spring-boot-starter/0.8.1/ai/driftkit/rag/spring/autoconfigure/RagAutoConfiguration.java
|
package ai.driftkit.rag.spring.autoconfigure;
import ai.driftkit.common.domain.client.ModelClient;
import ai.driftkit.context.core.service.PromptService;
import ai.driftkit.embedding.core.service.EmbeddingModel;
import ai.driftkit.rag.core.reranker.ModelBasedReranker;
import ai.driftkit.rag.core.retriever.VectorStoreRetriever;
import ai.driftkit.rag.core.splitter.RecursiveCharacterTextSplitter;
import ai.driftkit.rag.core.splitter.SemanticTextSplitter;
import ai.driftkit.rag.ingestion.IngestionPipeline;
import ai.driftkit.rag.retrieval.RetrievalPipeline;
import ai.driftkit.rag.spring.service.DocumentLoaderFactory;
import ai.driftkit.rag.spring.service.RagService;
import ai.driftkit.vector.core.domain.BaseVectorStore;
import ai.driftkit.vector.spring.parser.UnifiedParser;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.AutoConfiguration;
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
/**
* Spring Boot Auto Configuration for DriftKit RAG.
*/
@Slf4j
@AutoConfiguration
@ConditionalOnClass({IngestionPipeline.class, RetrievalPipeline.class})
@EnableConfigurationProperties(RagProperties.class)
public class RagAutoConfiguration {
/**
* Document loader factory.
*/
@Bean
@ConditionalOnMissingBean
@ConditionalOnBean(UnifiedParser.class)
public DocumentLoaderFactory documentLoaderFactory(UnifiedParser parser) {
log.info("Creating DocumentLoaderFactory");
return new DocumentLoaderFactory(parser);
}
/**
* Configuration for text splitters.
*/
@Configuration
@ConditionalOnClass(RecursiveCharacterTextSplitter.class)
public static class TextSplitterConfiguration {
@Bean
@ConditionalOnMissingBean
@ConditionalOnProperty(prefix = "driftkit.rag.splitter", name = "type", havingValue = "recursive", matchIfMissing = true)
public RecursiveCharacterTextSplitter recursiveCharacterTextSplitter(RagProperties properties) {
RagProperties.SplitterProperties splitterProps = properties.getSplitter();
log.info("Creating RecursiveCharacterTextSplitter with chunk size: {}, overlap: {}",
splitterProps.getChunkSize(), splitterProps.getChunkOverlap());
return RecursiveCharacterTextSplitter.builder()
.chunkSize(splitterProps.getChunkSize())
.chunkOverlap(splitterProps.getChunkOverlap())
.preserveMetadata(splitterProps.isPreserveMetadata())
.addChunkMetadata(splitterProps.isAddChunkMetadata())
.build();
}
@Bean
@ConditionalOnMissingBean
@ConditionalOnProperty(prefix = "driftkit.rag.splitter", name = "type", havingValue = "semantic")
@ConditionalOnBean(EmbeddingModel.class)
public SemanticTextSplitter semanticTextSplitter(
RagProperties properties,
EmbeddingModel embeddingModel) {
RagProperties.SplitterProperties splitterProps = properties.getSplitter();
log.info("Creating SemanticTextSplitter with target size: {}, similarity threshold: {}",
splitterProps.getTargetChunkSize(), splitterProps.getSimilarityThreshold());
return SemanticTextSplitter.builder()
.embeddingModel(embeddingModel)
.targetChunkSize(splitterProps.getTargetChunkSize())
.similarityThreshold(splitterProps.getSimilarityThreshold())
.maxChunkSize(splitterProps.getMaxChunkSize())
.minChunkSize(splitterProps.getMinChunkSize())
.preserveMetadata(splitterProps.isPreserveMetadata())
.addChunkMetadata(splitterProps.isAddChunkMetadata())
.build();
}
}
/**
* Configuration for reranker.
*/
@Configuration
@ConditionalOnClass(ModelBasedReranker.class)
public static class RerankerConfiguration {
@Bean
@ConditionalOnMissingBean
@ConditionalOnProperty(prefix = "driftkit.rag.reranker", name = "enabled", havingValue = "true", matchIfMissing = true)
@ConditionalOnBean({ModelClient.class, PromptService.class})
public ModelBasedReranker modelBasedReranker(
RagProperties properties,
ModelClient modelClient,
PromptService promptService) {
RagProperties.RerankerProperties rerankerProps = properties.getReranker();
log.info("Creating ModelBasedReranker with model: {}", rerankerProps.getModel());
return ModelBasedReranker.builder()
.modelClient(modelClient)
.promptService(promptService)
.promptId(rerankerProps.getPromptId())
.model(rerankerProps.getModel())
.temperature(rerankerProps.getTemperature())
.build();
}
}
/**
* Configuration for retriever.
*/
@Configuration
@ConditionalOnClass(VectorStoreRetriever.class)
public static class RetrieverConfiguration {
@Bean
@ConditionalOnMissingBean
@ConditionalOnBean(BaseVectorStore.class)
public VectorStoreRetriever vectorStoreRetriever(
RagProperties properties,
BaseVectorStore vectorStore,
@Autowired(required = false) EmbeddingModel embeddingModel) {
RagProperties.RetrieverProperties retrieverProps = properties.getRetriever();
log.info("Creating VectorStoreRetriever with query prefix: {}", retrieverProps.getQueryPrefix());
return VectorStoreRetriever.builder()
.vectorStore(vectorStore)
.embeddingModel(embeddingModel)
.queryPrefix(retrieverProps.getQueryPrefix())
.build();
}
}
/**
* Main RAG service.
*/
@Bean
@ConditionalOnMissingBean
@ConditionalOnProperty(prefix = "driftkit.rag", name = "enabled", havingValue = "true", matchIfMissing = true)
public RagService ragService(
RagProperties properties,
DocumentLoaderFactory loaderFactory,
BaseVectorStore vectorStore,
@Autowired(required = false) RecursiveCharacterTextSplitter textSplitter,
@Autowired(required = false) EmbeddingModel embeddingModel,
@Autowired(required = false) VectorStoreRetriever retriever,
@Autowired(required = false) ModelBasedReranker reranker) {
log.info("Creating RagService");
return new RagService(
properties,
loaderFactory,
vectorStore,
textSplitter,
embeddingModel,
retriever,
reranker
);
}
}
|
0
|
java-sources/ai/driftkit/driftkit-rag-spring-boot-starter/0.8.1/ai/driftkit/rag/spring
|
java-sources/ai/driftkit/driftkit-rag-spring-boot-starter/0.8.1/ai/driftkit/rag/spring/autoconfigure/RagProperties.java
|
package ai.driftkit.rag.spring.autoconfigure;
import lombok.Data;
import org.springframework.boot.context.properties.ConfigurationProperties;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
/**
* Configuration properties for DriftKit RAG.
*/
@Data
@ConfigurationProperties(prefix = "driftkit.rag")
public class RagProperties {
/**
* Default index name for vector store operations.
*/
private String defaultIndex = "default";
/**
* Enable RAG features.
*/
private boolean enabled = true;
/**
* Text splitter configuration.
*/
private SplitterProperties splitter = new SplitterProperties();
/**
* Reranker configuration.
*/
private RerankerProperties reranker = new RerankerProperties();
/**
* Retriever configuration.
*/
private RetrieverProperties retriever = new RetrieverProperties();
/**
* Ingestion pipeline configuration.
*/
private IngestionProperties ingestion = new IngestionProperties();
@Data
public static class SplitterProperties {
/**
* Type of splitter: recursive or semantic.
*/
private String type = "recursive";
/**
* Chunk size for recursive splitter.
*/
private int chunkSize = 512;
/**
* Overlap between chunks for recursive splitter.
*/
private int chunkOverlap = 128;
/**
* Target chunk size for semantic splitter.
*/
private int targetChunkSize = 512;
/**
* Similarity threshold for semantic splitter.
*/
private float similarityThreshold = 0.7f;
/**
* Maximum chunk size.
*/
private int maxChunkSize = 1024;
/**
* Minimum chunk size.
*/
private int minChunkSize = 100;
/**
* Whether to preserve document metadata in chunks.
*/
private boolean preserveMetadata = true;
/**
* Whether to add chunk-specific metadata.
*/
private boolean addChunkMetadata = true;
}
@Data
public static class RerankerProperties {
/**
* Enable reranking.
*/
private boolean enabled = true;
/**
* Model to use for reranking.
*/
private String model = "gpt-4o";
/**
* Temperature for reranking model.
*/
private float temperature = 0.0f;
/**
* Prompt ID for reranking.
*/
private String promptId = "rag.rerank";
}
@Data
public static class RetrieverProperties {
/**
* Default number of results to retrieve.
*/
private int defaultTopK = 10;
/**
* Default minimum score threshold.
*/
private float defaultMinScore = 0.0f;
/**
* Query prefix to add to all queries.
*/
private String queryPrefix = "";
}
@Data
public static class IngestionProperties {
/**
* Maximum retries for failed documents.
*/
private int maxRetries = 3;
/**
* Retry delay in milliseconds.
*/
private long retryDelayMs = 1000;
/**
* Use virtual threads for processing.
*/
private boolean useVirtualThreads = true;
/**
* Default file extensions to process.
*/
private Set<String> defaultExtensions = Set.of(
"txt", "md", "json", "xml", "html", "csv",
"pdf", "doc", "docx", "odt",
"jpg", "jpeg", "png"
);
/**
* Maximum file size in bytes.
*/
private long maxFileSizeBytes = 50 * 1024 * 1024; // 50MB
}
}
|
0
|
java-sources/ai/driftkit/driftkit-rag-spring-boot-starter/0.8.1/ai/driftkit/rag/spring
|
java-sources/ai/driftkit/driftkit-rag-spring-boot-starter/0.8.1/ai/driftkit/rag/spring/service/DocumentLoaderFactory.java
|
package ai.driftkit.rag.spring.service;
import ai.driftkit.rag.core.domain.LoadedDocument;
import ai.driftkit.rag.core.loader.DocumentLoader;
import ai.driftkit.rag.core.loader.FileSystemLoader;
import ai.driftkit.rag.core.loader.UrlLoader;
import ai.driftkit.vector.spring.parser.UnifiedParser;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.stereotype.Service;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Stream;
/**
* Factory for creating DocumentLoader instances at runtime.
*/
@Slf4j
@Service
@RequiredArgsConstructor
public class DocumentLoaderFactory {
private final UnifiedParser parser;
/**
* Create a FileSystemLoader for the given path.
*/
public FileSystemLoader fileSystemLoader(String path) {
return fileSystemLoader(Paths.get(path));
}
/**
* Create a FileSystemLoader with custom configuration.
*/
public FileSystemLoader fileSystemLoader(Path path) {
log.debug("Creating FileSystemLoader for path: {}", path);
return FileSystemLoader.builder()
.rootPath(path)
.parser(parser)
.build();
}
/**
* Create a FileSystemLoader with full configuration.
*/
public FileSystemLoader fileSystemLoader(
Path path,
boolean recursive,
Set<String> extensions,
Set<String> excludePatterns) {
log.debug("Creating configured FileSystemLoader for path: {}", path);
return FileSystemLoader.builder()
.rootPath(path)
.parser(parser)
.recursive(recursive)
.extensions(extensions)
.excludePatterns(excludePatterns)
.build();
}
/**
* Create a UrlLoader for a single URL.
*/
public UrlLoader urlLoader(String url) {
return urlLoader(List.of(url));
}
/**
* Create a UrlLoader for multiple URLs.
*/
public UrlLoader urlLoader(List<String> urls) {
log.debug("Creating UrlLoader for {} URLs", urls.size());
return UrlLoader.builder()
.urls(urls)
.parser(parser)
.build();
}
/**
* Create a UrlLoader with custom configuration.
*/
public UrlLoader urlLoader(
List<String> urls,
Map<String, String> headers,
int timeoutSeconds) {
log.debug("Creating configured UrlLoader for {} URLs", urls.size());
return UrlLoader.builder()
.urls(urls)
.parser(parser)
.headers(headers)
.timeoutSeconds(timeoutSeconds)
.build();
}
/**
* Create a composite loader that combines multiple loaders.
*/
public DocumentLoader compositeLoader(DocumentLoader... loaders) {
return new CompositeDocumentLoader(List.of(loaders));
}
/**
* Composite loader that aggregates multiple loaders.
*/
@RequiredArgsConstructor
private static class CompositeDocumentLoader implements DocumentLoader {
private final List<DocumentLoader> loaders;
@Override
public List<LoadedDocument> load() throws Exception {
List<LoadedDocument> allDocs = new ArrayList<>();
for (DocumentLoader loader : loaders) {
allDocs.addAll(loader.load());
}
return allDocs;
}
@Override
public Stream<LoadedDocument> loadStream() throws Exception {
return loaders.stream()
.flatMap(loader -> {
try {
return loader.loadStream();
} catch (Exception e) {
log.error("Error loading from loader", e);
return Stream.empty();
}
});
}
@Override
public boolean supportsStreaming() {
return loaders.stream().allMatch(DocumentLoader::supportsStreaming);
}
}
}
|
0
|
java-sources/ai/driftkit/driftkit-rag-spring-boot-starter/0.8.1/ai/driftkit/rag/spring
|
java-sources/ai/driftkit/driftkit-rag-spring-boot-starter/0.8.1/ai/driftkit/rag/spring/service/RagService.java
|
package ai.driftkit.rag.spring.service;
import ai.driftkit.embedding.core.service.EmbeddingModel;
import ai.driftkit.rag.core.loader.DocumentLoader;
import ai.driftkit.rag.core.reranker.ModelBasedReranker;
import ai.driftkit.rag.core.retriever.Retriever;
import ai.driftkit.rag.core.retriever.VectorStoreRetriever;
import ai.driftkit.rag.core.splitter.RecursiveCharacterTextSplitter;
import ai.driftkit.rag.core.splitter.TextSplitter;
import ai.driftkit.rag.ingestion.IngestionPipeline;
import ai.driftkit.rag.retrieval.RetrievalPipeline;
import ai.driftkit.rag.spring.autoconfigure.RagProperties;
import ai.driftkit.vector.core.domain.BaseVectorStore;
import ai.driftkit.vector.core.domain.Document;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import java.nio.file.Path;
import java.util.List;
import java.util.Map;
import java.util.stream.Stream;
/**
* Main service for RAG operations.
* Provides convenient methods for document ingestion and retrieval.
*/
@Slf4j
@RequiredArgsConstructor
public class RagService {
private final RagProperties properties;
private final DocumentLoaderFactory loaderFactory;
private final BaseVectorStore vectorStore;
private final TextSplitter textSplitter;
private final EmbeddingModel embeddingModel;
private final VectorStoreRetriever retriever;
private final ModelBasedReranker reranker;
/**
* Ingest documents from file system.
*/
public Stream<IngestionPipeline.DocumentResult> ingestFromFileSystem(
String path,
String indexName) {
return ingestFromFileSystem(Path.of(path), indexName);
}
/**
* Ingest documents from file system with custom configuration.
*/
public Stream<IngestionPipeline.DocumentResult> ingestFromFileSystem(
Path path,
String indexName) {
if (StringUtils.isEmpty(indexName)) {
indexName = properties.getDefaultIndex();
}
DocumentLoader loader = loaderFactory.fileSystemLoader(path);
return ingest(loader, indexName);
}
/**
* Ingest documents from URLs.
*/
public Stream<IngestionPipeline.DocumentResult> ingestFromUrls(
List<String> urls,
String indexName) {
if (StringUtils.isEmpty(indexName)) {
indexName = properties.getDefaultIndex();
}
DocumentLoader loader = loaderFactory.urlLoader(urls);
return ingest(loader, indexName);
}
/**
* Ingest documents using a custom loader.
*/
public Stream<IngestionPipeline.DocumentResult> ingest(
DocumentLoader loader,
String indexName) {
log.info("Starting ingestion into index: {}", indexName);
IngestionPipeline pipeline = IngestionPipeline.builder()
.documentLoader(loader)
.textSplitter(textSplitter)
.embeddingClient(embeddingModel)
.vectorStore(vectorStore)
.indexName(indexName)
.maxRetries(properties.getIngestion().getMaxRetries())
.retryDelayMs(properties.getIngestion().getRetryDelayMs())
.useVirtualThreads(properties.getIngestion().isUseVirtualThreads())
.build();
return pipeline.run();
}
/**
* Retrieve documents with default configuration.
*/
public List<Document> retrieve(String query) {
return retrieve(query, properties.getDefaultIndex());
}
/**
* Retrieve documents from specific index.
*/
public List<Document> retrieve(String query, String indexName) {
return retrieve(
query,
indexName,
properties.getRetriever().getDefaultTopK(),
properties.getRetriever().getDefaultMinScore()
);
}
/**
* Retrieve documents with custom configuration.
*/
public List<Document> retrieve(
String query,
String indexName,
int topK,
float minScore) {
return retrieve(query, indexName, topK, minScore, Map.of());
}
/**
* Retrieve documents with full configuration.
*/
public List<Document> retrieve(
String query,
String indexName,
int topK,
float minScore,
Map<String, Object> filters) {
if (StringUtils.isEmpty(query)) {
log.warn("Empty query provided");
return List.of();
}
if (StringUtils.isEmpty(indexName)) {
indexName = properties.getDefaultIndex();
}
log.debug("Retrieving documents for query: {} from index: {}", query, indexName);
try {
RetrievalPipeline.RetrievalPipelineBuilder builder = RetrievalPipeline.builder()
.vectorStore(vectorStore)
.embeddingClient(embeddingModel)
.indexName(indexName)
.topK(topK)
.minScore(minScore)
.filters(filters)
.queryPrefix(properties.getRetriever().getQueryPrefix())
.useVirtualThreads(properties.getIngestion().isUseVirtualThreads());
// Add optional components if available
if (retriever != null) {
builder.retriever(retriever);
}
if (reranker != null && properties.getReranker().isEnabled()) {
builder.reranker(reranker);
}
RetrievalPipeline pipeline = builder.build();
return pipeline.retrieve(query);
} catch (Exception e) {
log.error("Failed to retrieve documents for query: {}", query, e);
return List.of();
}
}
/**
* Create a custom ingestion pipeline builder.
*/
public IngestionPipeline.IngestionPipelineBuilder ingestionBuilder() {
return IngestionPipeline.builder()
.vectorStore(vectorStore)
.embeddingClient(embeddingModel)
.textSplitter(textSplitter)
.maxRetries(properties.getIngestion().getMaxRetries())
.retryDelayMs(properties.getIngestion().getRetryDelayMs())
.useVirtualThreads(properties.getIngestion().isUseVirtualThreads());
}
/**
* Create a custom retrieval pipeline builder.
*/
public RetrievalPipeline.RetrievalPipelineBuilder retrievalBuilder() {
RetrievalPipeline.RetrievalPipelineBuilder builder = RetrievalPipeline.builder()
.vectorStore(vectorStore)
.embeddingClient(embeddingModel)
.topK(properties.getRetriever().getDefaultTopK())
.minScore(properties.getRetriever().getDefaultMinScore())
.queryPrefix(properties.getRetriever().getQueryPrefix())
.useVirtualThreads(properties.getIngestion().isUseVirtualThreads());
if (retriever != null) {
builder.retriever(retriever);
}
if (reranker != null && properties.getReranker().isEnabled()) {
builder.reranker(reranker);
}
return builder;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-vector-core/0.8.1/ai/driftkit/vector/core
|
java-sources/ai/driftkit/driftkit-vector-core/0.8.1/ai/driftkit/vector/core/domain/BaseVectorStore.java
|
package ai.driftkit.vector.core.domain;
import ai.driftkit.config.EtlConfig.VectorStoreConfig;
import org.apache.commons.collections4.CollectionUtils;
import java.util.List;
/**
* Base Vector Store interface containing all methods except findRelevant.
* This interface defines the core operations for document management in a vector store.
*/
public interface BaseVectorStore {
void configure(VectorStoreConfig config) throws Exception;
boolean supportsStoreName(String storeName);
/**
* Add documents with embeddings. Returns list of IDs of the stored documents.
*/
default String addDocument(String index, Document document) throws Exception {
List<String> result = addDocuments(index, List.of(document));
if (CollectionUtils.isEmpty(result)) {
return null;
}
return result.getFirst();
}
/**
* Add documents with embeddings. Returns list of IDs of the stored documents.
*/
List<String> addDocuments(String index, List<Document> documents) throws Exception;
/**
* Update a document (including its vector) for a given ID.
*/
void updateDocument(String id, String index, Document document) throws Exception;
/**
* Delete a document by ID.
*/
void deleteDocument(String id, String index) throws Exception;
/**
* Read a document by ID.
*/
Document readDocument(String id, String index) throws Exception;
}
|
0
|
java-sources/ai/driftkit/driftkit-vector-core/0.8.1/ai/driftkit/vector/core
|
java-sources/ai/driftkit/driftkit-vector-core/0.8.1/ai/driftkit/vector/core/domain/Document.java
|
package ai.driftkit.vector.core.domain;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.util.Map;
@Data
@NoArgsConstructor
@AllArgsConstructor
public class Document {
private String id;
private float[] vector;
private String pageContent;
private Map<String, Object> metadata;
public Document(String id, float[] vector, String content) {
this(id, vector, content,null);
}
}
|
0
|
java-sources/ai/driftkit/driftkit-vector-core/0.8.1/ai/driftkit/vector/core
|
java-sources/ai/driftkit/driftkit-vector-core/0.8.1/ai/driftkit/vector/core/domain/DocumentsResult.java
|
package ai.driftkit.vector.core.domain;
import com.fasterxml.jackson.annotation.JsonIgnore;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
/**
* DocumentsResult class revised for serialization compatibility
*/
@Data
@NoArgsConstructor
@AllArgsConstructor
public class DocumentsResult {
public static final DocumentsResult EMPTY = new DocumentsResult();
private List<ResultEntry> result = new ArrayList<>();
public DocumentsResult(Map<Document, Float> docs) {
docs.entrySet().forEach(e -> {
result.add(new ResultEntry(e.getKey(), e.getValue()));
});
}
@JsonIgnore
public Document first() {
return result.isEmpty() ? null : result.get(0).getDocument();
}
@JsonIgnore
public boolean isEmpty() {
return result == null || result.isEmpty();
}
@JsonIgnore
public void put(Document doc, Float value) {
result.add(new ResultEntry(doc, value));
}
@JsonIgnore
public List<Document> documents() {
List<Document> docs = new ArrayList<>();
for (ResultEntry entry : result) {
docs.add(entry.getDocument());
}
return docs;
}
@JsonIgnore
public int size() {
return isEmpty() ? 0 : result.size();
}
/**
* ResultEntry class to hold Document and its associated Float value
*/
@Data
@NoArgsConstructor
@AllArgsConstructor
public static class ResultEntry {
private Document document;
private Float value;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-vector-core/0.8.1/ai/driftkit/vector/core
|
java-sources/ai/driftkit/driftkit-vector-core/0.8.1/ai/driftkit/vector/core/domain/EmbeddingVectorStore.java
|
package ai.driftkit.vector.core.domain;
/**
* Vector Store interface that supports similarity search using embedding vectors.
* This interface extends BaseVectorStore and adds the ability to search using
* float array embeddings.
*/
public interface EmbeddingVectorStore extends BaseVectorStore {
/**
* Perform similarity search given a vector embedding, returning documents with scores,
* where the key is the Document and the value is the similarity score.
*
* @param index the index to search in
* @param embedding the embedding vector to search with
* @param k the number of top results to return
* @return DocumentsResult containing documents and their similarity scores
* @throws Exception if an error occurs during search
*/
DocumentsResult findRelevant(String index, float[] embedding, int k) throws Exception;
}
|
0
|
java-sources/ai/driftkit/driftkit-vector-core/0.8.1/ai/driftkit/vector/core
|
java-sources/ai/driftkit/driftkit-vector-core/0.8.1/ai/driftkit/vector/core/domain/Pair.java
|
package ai.driftkit.vector.core.domain;
import lombok.AllArgsConstructor;
import lombok.Data;
/**
* A simple Pair utility class
*/
@Data
@AllArgsConstructor
public class Pair<L, R> {
private L left;
private R right;
}
|
0
|
java-sources/ai/driftkit/driftkit-vector-core/0.8.1/ai/driftkit/vector/core
|
java-sources/ai/driftkit/driftkit-vector-core/0.8.1/ai/driftkit/vector/core/domain/TextVectorStore.java
|
package ai.driftkit.vector.core.domain;
/**
* Vector Store interface that supports similarity search using text queries.
* This interface extends BaseVectorStore and adds the ability to search using
* text strings, which are typically converted to embeddings internally.
*/
public interface TextVectorStore extends BaseVectorStore {
/**
* Perform similarity search given a text query, returning documents with scores,
* where the key is the Document and the value is the similarity score.
* The text query will be converted to an embedding internally.
*
* @param index the index to search in
* @param query the text query to search with
* @param k the number of top results to return
* @return DocumentsResult containing documents and their similarity scores
* @throws Exception if an error occurs during search
*/
DocumentsResult findRelevant(String index, String query, int k) throws Exception;
}
|
0
|
java-sources/ai/driftkit/driftkit-vector-core/0.8.1/ai/driftkit/vector/core
|
java-sources/ai/driftkit/driftkit-vector-core/0.8.1/ai/driftkit/vector/core/filebased/FileBasedVectorStore.java
|
package ai.driftkit.vector.core.filebased;
import ai.driftkit.config.EtlConfig.VectorStoreConfig;
import ai.driftkit.vector.core.domain.Document;
import ai.driftkit.vector.core.inmemory.InMemoryVectorStore;
import org.jetbrains.annotations.NotNull;
import java.io.*;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
public class FileBasedVectorStore extends InMemoryVectorStore {
private String configPath;
public boolean supportsStoreName(String storeName) {
return "filebased".equalsIgnoreCase(storeName);
}
@Override
public void configure(VectorStoreConfig config) throws Exception {
this.configPath = config.get("storageFile");
loadFromDisk();
}
private void loadFromDisk() {
File file = new File(configPath);
if (file.exists()) {
try (ObjectInputStream ois = new ObjectInputStream(new FileInputStream(file))) {
documentMap = (ConcurrentHashMap<String, Map<String, Document>>) ois.readObject();
} catch (IOException | ClassNotFoundException e) {
throw new RuntimeException("Failed to load vector store from disk", e);
}
} else {
documentMap = new ConcurrentHashMap<>();
}
}
private void saveToDisk() {
try (ObjectOutputStream oos = new ObjectOutputStream(new FileOutputStream(configPath))) {
oos.writeObject(documentMap);
} catch (IOException e) {
throw new RuntimeException("Failed to save vector store to disk", e);
}
}
public List<String> addDocuments(String indexName, List<Document> documents) {
List<String> ids = new ArrayList<>();
for (Document doc : documents) {
String id = doc.getId();
if (id == null || id.isEmpty()) {
id = UUID.randomUUID().toString();
doc.setId(id);
}
Map<String, Document> index = getIndexOrCreate(indexName);
index.put(id, doc);
ids.add(id);
}
saveToDisk();
return ids;
}
@NotNull
private Map<String, Document> getIndexOrCreate(String indexName) {
return documentMap.computeIfAbsent(indexName, e -> new ConcurrentHashMap<>());
}
}
|
0
|
java-sources/ai/driftkit/driftkit-vector-core/0.8.1/ai/driftkit/vector/core
|
java-sources/ai/driftkit/driftkit-vector-core/0.8.1/ai/driftkit/vector/core/inmemory/InMemoryVectorStore.java
|
package ai.driftkit.vector.core.inmemory;
import ai.driftkit.config.EtlConfig.VectorStoreConfig;
import ai.driftkit.vector.core.domain.Document;
import ai.driftkit.vector.core.domain.DocumentsResult;
import ai.driftkit.vector.core.domain.EmbeddingVectorStore;
import org.jetbrains.annotations.NotNull;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;
public class InMemoryVectorStore implements EmbeddingVectorStore {
protected Map<String, Map<String, Document>> documentMap = new ConcurrentHashMap<>();
public boolean supportsStoreName(String storeName) {
return "inmemory".equalsIgnoreCase(storeName);
}
@Override
public void configure(VectorStoreConfig config) throws Exception {
}
public List<String> addDocuments(String indexName, List<Document> documents) {
List<String> ids = new ArrayList<>();
for (Document doc : documents) {
String id = doc.getId();
if (id == null || id.isEmpty()) {
id = UUID.randomUUID().toString();
doc.setId(id);
}
Map<String, Document> index = getIndexOrCreate(indexName);
index.put(id, doc);
ids.add(id);
}
return ids;
}
public DocumentsResult findRelevant(String index, float[] queryEmbedding, int topK) {
return query(index, queryEmbedding, topK, null);
}
public DocumentsResult query(String indexName, float[] queryEmbedding, int topK, Map<String, Object> filters) {
Map<String, Document> index = getIndexOrCreate(indexName);
// Step 1: Apply metadata filters
List<Document> filteredDocuments = index.values().stream()
.filter(doc -> filters == null || (doc.getVector() != null && matchesFilters(doc.getMetadata(), filters)))
.collect(Collectors.toList());
// Step 2: Extract embeddings and IDs
List<float[]> docVectors = filteredDocuments.stream()
.map(Document::getVector)
.collect(Collectors.toList());
List<String> docIds = filteredDocuments.stream()
.map(Document::getId)
.collect(Collectors.toList());
// Step 3: Calculate top-k similarities
List<SimilarityResult> topResults = getTopKSimilarities(queryEmbedding, docVectors, docIds, topK);
// Step 4: Prepare results
LinkedHashMap<Document, Float> resultMap = new LinkedHashMap<>();
for (SimilarityResult result : topResults) {
if (result.getSimilarity() == 0) {
continue;
}
Document doc = index.get(result.getDocumentId());
resultMap.put(doc, result.getSimilarity());
}
return new DocumentsResult(resultMap);
}
private boolean matchesFilters(Map<String, Object> metadata, Map<String, Object> filters) {
for (Map.Entry<String, Object> filter : filters.entrySet()) {
if (!metadata.containsKey(filter.getKey()) || !metadata.get(filter.getKey()).equals(filter.getValue())) {
return false;
}
}
return true;
}
private List<SimilarityResult> getTopKSimilarities(float[] queryEmbedding, List<float[]> docEmbeddings, List<String> docIds, int k) {
List<SimilarityResult> results = new ArrayList<>();
for (int i = 0; i < docEmbeddings.size(); i++) {
float[] docEmbedding = docEmbeddings.get(i);
float similarity = cosineSimilarity(queryEmbedding, docEmbedding);
results.add(new SimilarityResult(similarity, docIds.get(i)));
}
// Sort results by similarity in descending order
results.sort((r1, r2) -> Float.compare(r2.getSimilarity(), r1.getSimilarity()));
// Return top-k results
return results.stream().filter(e -> e.getSimilarity() > 0).limit(k).collect(Collectors.toList());
}
private float cosineSimilarity(float[] v1, float[] v2) {
float dotProduct = 0f;
float normA = 0f;
float normB = 0f;
for (int i = 0; i < v1.length; i++) {
dotProduct += v1[i] * v2[i];
normA += v1[i] * v1[i];
normB += v2[i] * v2[i];
}
return dotProduct / (float) (Math.sqrt(normA) * Math.sqrt(normB) + 1e-10f);
}
public void updateDocument(String id, String indexName, Document document) {
Map<String, Document> index = getIndexOrCreate(indexName);
if (!index.containsKey(id)) {
throw new NoSuchElementException("No document found with ID: " + id);
}
index.put(id, document);
}
public void deleteDocument(String id, String indexName) {
Map<String, Document> index = getIndexOrCreate(indexName);
if (!index.containsKey(id)) {
throw new NoSuchElementException("No document found with ID: " + id);
}
index.remove(id);
}
public Document readDocument(String id, String indexName) {
return getIndexOrCreate(indexName).get(id);
}
@NotNull
private Map<String, Document> getIndexOrCreate(String indexName) {
return documentMap.computeIfAbsent(indexName, e -> new ConcurrentHashMap<>());
}
private static class SimilarityResult {
private final float similarity;
private final String documentId;
public SimilarityResult(float similarity, String documentId) {
this.similarity = similarity;
this.documentId = documentId;
}
public float getSimilarity() {
return similarity;
}
public String getDocumentId() {
return documentId;
}
}
}
|
0
|
java-sources/ai/driftkit/driftkit-vector-core/0.8.1/ai/driftkit/vector/core/pinecone
|
java-sources/ai/driftkit/driftkit-vector-core/0.8.1/ai/driftkit/vector/core/pinecone/client/PineconeVectorStore.java
|
package ai.driftkit.vector.core.pinecone.client;
import ai.driftkit.config.EtlConfig;
import ai.driftkit.config.EtlConfig.VectorStoreConfig;
import ai.driftkit.vector.core.domain.Document;
import ai.driftkit.vector.core.domain.DocumentsResult;
import ai.driftkit.vector.core.domain.EmbeddingVectorStore;
import ai.driftkit.vector.core.pinecone.client.PineconeVectorStore.PineconeQueryResponse.Match;
import ai.driftkit.vector.core.pinecone.client.PineconeVectorStore.PineconeUpsertRequest.VectorEntry;
import feign.*;
import feign.jackson.JacksonDecoder;
import feign.jackson.JacksonEncoder;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import okhttp3.OkHttpClient;
import org.apache.commons.collections4.CollectionUtils;
import java.nio.ByteBuffer;
import java.util.*;
@Slf4j
public class PineconeVectorStore implements EmbeddingVectorStore {
private VectorStoreConfig config;
private PineconeApi api;
@Override
public void configure(VectorStoreConfig config) {
this.config = config;
this.api = Feign.builder()
.client(new feign.okhttp.OkHttpClient(new OkHttpClient.Builder().build()))
.encoder(new JacksonEncoder())
.decoder(new JacksonDecoder())
.target(PineconeApi.class, config.get(EtlConfig.ENDPOINT));
log.info("Configured PineconeVectorStore");
}
@Override
public boolean supportsStoreName(String storeName) {
return "pinecone".equalsIgnoreCase(storeName);
}
@Override
public List<String> addDocuments(String index, List<Document> documents) throws Exception {
String namespace = index;
List<VectorEntry> entries = new ArrayList<>();
List<String> ids = new ArrayList<>();
for (Document doc : documents) {
String id = doc.getId();
if (id == null || id.isEmpty()) {
id = UUID.randomUUID().toString();
doc.setId(id);
}
Map<String, Object> metadata = fromDocument(doc);
entries.add(new VectorEntry(id, doc.getVector(), metadata));
ids.add(id);
}
PineconeUpsertRequest req = new PineconeUpsertRequest(entries, namespace);
api.upsert(config.get(EtlConfig.API_KEY), req);
return ids;
}
@Override
public DocumentsResult findRelevant(String index, float[] embedding, int k) throws Exception {
String namespace = index;
PineconeQueryRequest req = new PineconeQueryRequest(embedding, k, namespace, true);
PineconeQueryResponse resp = api.query(config.get(EtlConfig.API_KEY), req);
if (resp.getMatches() == null) return DocumentsResult.EMPTY;
LinkedHashMap<Document, Float> resultMap = new LinkedHashMap<>();
for (PineconeQueryResponse.Match m : resp.getMatches()) {
Document doc = toDocument(m.getId(), m.getMetadata());
resultMap.put(doc, m.getScore());
}
return new DocumentsResult(resultMap);
}
@Override
public void updateDocument(String id, String index, Document document) throws Exception {
try {
String namespace = index;
VectorEntry entry = new VectorEntry(id, document.getVector(), fromDocument(document));
PineconeUpsertRequest req = new PineconeUpsertRequest(Collections.singletonList(entry), namespace);
api.upsert(config.get(EtlConfig.API_KEY), req);
} catch (Exception e) {
if (e instanceof FeignException ex) {
Optional<ByteBuffer> body = ex.responseBody();
if (body.isPresent()) {
String bs = new String(body.get().array());
throw new RuntimeException(bs, e);
}
}
throw new RuntimeException(e);
}
}
@Override
public void deleteDocument(String id, String index) throws Exception {
String namespace = index;
PineconeDeleteRequest deleteRequest = new PineconeDeleteRequest(Collections.singletonList(id), namespace, false, null);
api.delete(config.get(EtlConfig.API_KEY), deleteRequest);
}
@Override
public Document readDocument(String id, String index) throws Exception {
PineconeIdQueryRequest queryParams = new PineconeIdQueryRequest();
queryParams.setId(id);
queryParams.setNamespace(index);
queryParams.setTopK(1);
queryParams.setIncludeMetadata(true);
PineconeQueryResponse resp = api.fetch(config.get(EtlConfig.API_KEY), queryParams);
if (CollectionUtils.isEmpty(resp.getMatches())) return null;
Match m = resp.getMatches().getFirst();
return toDocument(m.getId(), m.getMetadata());
}
private Document toDocument(String id, Map<String, Object> metadata) {
String pageContent = (String) metadata.getOrDefault("page_content", "");
Map<String, Object> metaCopy = new HashMap<>(metadata);
metaCopy.remove("page_content");
Document doc = new Document(id, null, pageContent);
doc.setMetadata(metaCopy);
return doc;
}
private Map<String, Object> fromDocument(Document doc) {
Map<String, Object> metadata = new HashMap<>(
doc.getMetadata() != null ? doc.getMetadata() : Collections.emptyMap()
);
metadata.put("page_content", doc.getPageContent());
return metadata;
}
// Feign client interface
public interface PineconeApi {
@RequestLine("POST /vectors/upsert")
@Headers({"Content-Type: application/json", "Api-Key: {apiKey}"})
Map<String, Object> upsert(@Param("apiKey") String apiKey, PineconeUpsertRequest request);
@RequestLine("POST /query")
@Headers({"Content-Type: application/json", "Api-Key: {apiKey}"})
PineconeQueryResponse query(@Param("apiKey") String apiKey, PineconeQueryRequest request);
@RequestLine("POST /query")
@Headers({"Content-Type: application/json", "Api-Key: {apiKey}"})
PineconeQueryResponse fetch(@Param("apiKey") String apiKey, @QueryMap PineconeIdQueryRequest idRequest);
@RequestLine("POST /vectors/delete")
@Headers({"Content-Type: application/json", "Api-Key: {apiKey}"})
Map<String, Object> delete(@Param("apiKey") String apiKey, PineconeDeleteRequest request);
}
@Data
@NoArgsConstructor
@AllArgsConstructor
public static class PineconeUpsertRequest {
private List<VectorEntry> vectors;
private String namespace;
@Data
@NoArgsConstructor
@AllArgsConstructor
public static class VectorEntry {
private String id;
private float[] values;
private Map<String, Object> metadata;
}
}
@Data
@NoArgsConstructor
@AllArgsConstructor
public static class PineconeQueryRequest {
private float[] vector;
private int topK;
private String namespace;
private boolean includeMetadata;
}
@Data
@NoArgsConstructor
@AllArgsConstructor
public static class PineconeIdQueryRequest {
private String id;
private int topK;
private String namespace;
private boolean includeMetadata;
}
@Data
@NoArgsConstructor
@AllArgsConstructor
public static class PineconeQueryResponse {
private List<Match> matches;
private String namespace;
@Data
@NoArgsConstructor
@AllArgsConstructor
public static class Match {
private String id;
private float score;
private Map<String, Object> metadata;
}
}
@Data
@NoArgsConstructor
@AllArgsConstructor
public static class PineconeFetchResponse {
private Map<String, VectorRecord> vectors;
private String namespace;
@Data
@NoArgsConstructor
@AllArgsConstructor
public static class VectorRecord {
private String id;
private float[] values;
private Map<String, Object> metadata;
}
}
@Data
@NoArgsConstructor
@AllArgsConstructor
public static class PineconeDeleteRequest {
private List<String> ids;
private String namespace;
private boolean deleteAll;
private Map<String, Object> filter;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-vector-core/0.8.1/ai/driftkit/vector/core
|
java-sources/ai/driftkit/driftkit-vector-core/0.8.1/ai/driftkit/vector/core/service/VectorStoreFactory.java
|
package ai.driftkit.vector.core.service;
import ai.driftkit.config.EtlConfig.VectorStoreConfig;
import ai.driftkit.vector.core.domain.BaseVectorStore;
import ai.driftkit.vector.core.domain.EmbeddingVectorStore;
import ai.driftkit.vector.core.domain.TextVectorStore;
/**
* This factory creates instances of vector stores from config.
*/
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.ServiceLoader;
import java.util.concurrent.ConcurrentHashMap;
/**
* This factory creates instances of vector stores from config.
*/
public class VectorStoreFactory {
private static final Map<String, BaseVectorStore> stores = new ConcurrentHashMap<>();
public static BaseVectorStore fromConfig(VectorStoreConfig config) throws Exception {
if (config == null || config.getName() == null) {
throw new IllegalArgumentException("Configuration and storeName must not be null");
}
return stores.computeIfAbsent(config.getName(), name -> {
String storeName = config.getName();
// Try to load EmbeddingVectorStore implementations
ServiceLoader<EmbeddingVectorStore> embeddingLoader = ServiceLoader.load(EmbeddingVectorStore.class);
for (EmbeddingVectorStore store : embeddingLoader) {
if (store.supportsStoreName(storeName)) {
try {
store.configure(config);
return store;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
// Try to load TextVectorStore implementations
ServiceLoader<TextVectorStore> textLoader = ServiceLoader.load(TextVectorStore.class);
for (TextVectorStore store : textLoader) {
if (store.supportsStoreName(storeName)) {
try {
store.configure(config);
return store;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
throw new IllegalArgumentException("Unknown or unavailable vector store: " + storeName);
});
}
}
|
0
|
java-sources/ai/driftkit/driftkit-vector-spring-ai/0.8.1/ai/driftkit/vector
|
java-sources/ai/driftkit/driftkit-vector-spring-ai/0.8.1/ai/driftkit/vector/springai/SpringAiVectorStoreAdapter.java
|
package ai.driftkit.vector.springai;
import ai.driftkit.config.EtlConfig.VectorStoreConfig;
import ai.driftkit.vector.core.domain.Document;
import ai.driftkit.vector.core.domain.DocumentsResult;
import ai.driftkit.vector.core.domain.TextVectorStore;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import org.springframework.ai.vectorstore.SearchRequest;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
/**
* Adapter that bridges Spring AI VectorStore with DriftKit VectorStore interface.
* This allows using any Spring AI vector store implementation within the DriftKit framework.
*/
@Slf4j
public class SpringAiVectorStoreAdapter implements TextVectorStore {
private final org.springframework.ai.vectorstore.VectorStore springAiVectorStore;
private final String storeName;
private VectorStoreConfig config;
public SpringAiVectorStoreAdapter(org.springframework.ai.vectorstore.VectorStore springAiVectorStore, String storeName) {
if (springAiVectorStore == null) {
throw new IllegalArgumentException("Spring AI VectorStore cannot be null");
}
if (StringUtils.isBlank(storeName)) {
throw new IllegalArgumentException("Store name cannot be blank");
}
this.springAiVectorStore = springAiVectorStore;
this.storeName = storeName;
}
@Override
public void configure(VectorStoreConfig config) throws Exception {
this.config = config;
log.info("Configured Spring AI Vector Store adapter with store name: {}", storeName);
}
@Override
public boolean supportsStoreName(String storeName) {
return this.storeName.equalsIgnoreCase(storeName);
}
@Override
public List<String> addDocuments(String index, List<Document> documents) throws Exception {
if (CollectionUtils.isEmpty(documents)) {
return new ArrayList<>();
}
if (StringUtils.isBlank(index)) {
throw new IllegalArgumentException("Index cannot be blank");
}
List<org.springframework.ai.document.Document> springAiDocs = new ArrayList<>();
List<String> documentIds = new ArrayList<>();
for (Document doc : documents) {
if (doc == null) {
continue;
}
String id = StringUtils.isNotEmpty(doc.getId()) ? doc.getId() : UUID.randomUUID().toString();
documentIds.add(id);
Map<String, Object> metadata = new HashMap<>();
if (doc.getMetadata() != null) {
metadata.putAll(doc.getMetadata());
}
metadata.put("index", index);
metadata.put("driftkit_id", id);
org.springframework.ai.document.Document springAiDoc = org.springframework.ai.document.Document.builder()
.id(id)
.text(doc.getPageContent())
.metadata(metadata)
.build();
springAiDocs.add(springAiDoc);
}
if (CollectionUtils.isEmpty(springAiDocs)) {
return documentIds;
}
springAiVectorStore.add(springAiDocs);
log.debug("Added {} documents to Spring AI vector store for index: {}", springAiDocs.size(), index);
return documentIds;
}
@Override
public DocumentsResult findRelevant(String index, String query, int k) throws Exception {
if (StringUtils.isBlank(query)) {
return DocumentsResult.EMPTY;
}
if (StringUtils.isBlank(index)) {
throw new IllegalArgumentException("Index cannot be blank");
}
if (k <= 0) {
throw new IllegalArgumentException("k must be positive");
}
// Create search request with text query
SearchRequest searchRequest = SearchRequest.builder()
.query(query)
.filterExpression(String.format("index == '%s'", escapeFilterValue(index)))
.topK(k)
.build();
List<org.springframework.ai.document.Document> results = springAiVectorStore.similaritySearch(searchRequest);
DocumentsResult result = new DocumentsResult();
// Spring AI returns results already sorted by relevance
for (org.springframework.ai.document.Document doc : results) {
Document driftkitDoc = convertToDriftKitDocument(doc);
if (driftkitDoc == null) {
continue;
}
// Spring AI doesn't expose similarity scores directly, so we use a placeholder
// The results are already ordered by relevance
float score = -1f;
result.put(driftkitDoc, score);
}
log.debug("Found {} relevant documents for index: {}", result.size(), index);
return result;
}
@Override
public void updateDocument(String id, String index, Document document) throws Exception {
if (StringUtils.isEmpty(id)) {
throw new IllegalArgumentException("Document ID cannot be empty");
}
if (document == null) {
throw new IllegalArgumentException("Document cannot be null");
}
if (StringUtils.isBlank(index)) {
throw new IllegalArgumentException("Index cannot be blank");
}
deleteDocument(id, index);
document.setId(id);
addDocuments(index, List.of(document));
log.debug("Updated document {} in index: {}", id, index);
}
@Override
public void deleteDocument(String id, String index) throws Exception {
if (StringUtils.isEmpty(id)) {
throw new IllegalArgumentException("Document ID cannot be empty");
}
if (StringUtils.isBlank(index)) {
throw new IllegalArgumentException("Index cannot be blank");
}
springAiVectorStore.delete(List.of(id));
log.debug("Deleted document {} from index: {}", id, index);
}
@Override
public Document readDocument(String id, String index) throws Exception {
if (StringUtils.isEmpty(id)) {
throw new IllegalArgumentException("Document ID cannot be empty");
}
if (StringUtils.isBlank(index)) {
throw new IllegalArgumentException("Index cannot be blank");
}
// For findById, we need to create a dummy query with proper filter
SearchRequest searchRequest = SearchRequest.builder()
.filterExpression(String.format("driftkit_id == '%s' && index == '%s'", escapeFilterValue(id), escapeFilterValue(index)))
.topK(1)
.build();
List<org.springframework.ai.document.Document> results = springAiVectorStore.similaritySearch(searchRequest);
if (CollectionUtils.isEmpty(results)) {
return null;
}
return convertToDriftKitDocument(results.get(0));
}
private List<Double> convertToDoubleList(float[] floatArray) {
if (floatArray == null) {
return new ArrayList<>();
}
List<Double> doubleList = new ArrayList<>(floatArray.length);
for (float f : floatArray) {
doubleList.add((double) f);
}
return doubleList;
}
private float[] convertToFloatArray(List<Double> doubleList) {
if (CollectionUtils.isEmpty(doubleList)) {
return null;
}
float[] floatArray = new float[doubleList.size()];
for (int i = 0; i < doubleList.size(); i++) {
floatArray[i] = doubleList.get(i).floatValue();
}
return floatArray;
}
private Document convertToDriftKitDocument(org.springframework.ai.document.Document springAiDoc) {
if (springAiDoc == null) {
return null;
}
String id = (String) springAiDoc.getMetadata().getOrDefault("driftkit_id", springAiDoc.getId());
Map<String, Object> metadata = new HashMap<>();
if (springAiDoc.getMetadata() != null) {
metadata.putAll(springAiDoc.getMetadata());
metadata.remove("driftkit_id");
metadata.remove("index");
}
return new Document(
id,
null,
springAiDoc.getText(),
metadata
);
}
private String escapeFilterValue(String value) {
return value.replace("'", "\\'");
}
private float calculateCosineSimilarity(float[] vectorA, float[] vectorB) {
if (vectorA == null || vectorB == null || vectorA.length != vectorB.length) {
return 0.0f;
}
float dotProduct = 0.0f;
float normA = 0.0f;
float normB = 0.0f;
for (int i = 0; i < vectorA.length; i++) {
dotProduct += vectorA[i] * vectorB[i];
normA += vectorA[i] * vectorA[i];
normB += vectorB[i] * vectorB[i];
}
if (normA == 0.0f || normB == 0.0f) {
return 0.0f;
}
return (float) (dotProduct / (Math.sqrt(normA) * Math.sqrt(normB)));
}
}
|
0
|
java-sources/ai/driftkit/driftkit-vector-spring-ai/0.8.1/ai/driftkit/vector
|
java-sources/ai/driftkit/driftkit-vector-spring-ai/0.8.1/ai/driftkit/vector/springai/SpringAiVectorStoreFactory.java
|
package ai.driftkit.vector.springai;
import ai.driftkit.vector.core.domain.TextVectorStore;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
/**
* Factory for creating Spring AI vector store adapters.
*/
@Slf4j
public class SpringAiVectorStoreFactory {
/**
* Creates a DriftKit TextVectorStore adapter for the given Spring AI VectorStore.
*
* @param springAiVectorStore The Spring AI vector store implementation
* @param storeName The name to identify this store (e.g., "pinecone", "qdrant", "weaviate")
* @return A DriftKit TextVectorStore adapter
*/
public static TextVectorStore create(org.springframework.ai.vectorstore.VectorStore springAiVectorStore, String storeName) {
if (springAiVectorStore == null) {
throw new IllegalArgumentException("Spring AI VectorStore cannot be null");
}
if (StringUtils.isEmpty(storeName)) {
throw new IllegalArgumentException("Store name cannot be empty");
}
log.info("Creating Spring AI VectorStore adapter for store: {}", storeName);
return new SpringAiVectorStoreAdapter(springAiVectorStore, storeName);
}
}
|
0
|
java-sources/ai/driftkit/driftkit-vector-spring-ai-starter/0.8.1/ai/driftkit/vector/springai
|
java-sources/ai/driftkit/driftkit-vector-spring-ai-starter/0.8.1/ai/driftkit/vector/springai/autoconfigure/SpringAiVectorStoreAutoConfiguration.java
|
package ai.driftkit.vector.springai.autoconfigure;
import ai.driftkit.vector.core.domain.TextVectorStore;
import ai.driftkit.vector.springai.SpringAiVectorStoreAdapter;
import ai.driftkit.vector.springai.SpringAiVectorStoreFactory;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.springframework.boot.autoconfigure.AutoConfiguration;
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
/**
* Auto-configuration for Spring AI vector store integration with DriftKit.
*/
@Slf4j
@AutoConfiguration
@ConditionalOnClass({org.springframework.ai.vectorstore.VectorStore.class, TextVectorStore.class})
@ConditionalOnProperty(prefix = "driftkit.vector.spring-ai", name = "enabled", havingValue = "true", matchIfMissing = true)
@EnableConfigurationProperties(SpringAiVectorStoreProperties.class)
public class SpringAiVectorStoreAutoConfiguration {
@Bean
@ConditionalOnBean(org.springframework.ai.vectorstore.VectorStore.class)
@ConditionalOnMissingBean(SpringAiVectorStoreAdapter.class)
public TextVectorStore springAiVectorStoreAdapter(org.springframework.ai.vectorstore.VectorStore springAiVectorStore,
SpringAiVectorStoreProperties properties) {
if (StringUtils.isBlank(properties.getStoreName())) {
throw new IllegalStateException("Spring AI Vector Store name cannot be blank");
}
log.info("Creating Spring AI VectorStore adapter with name: {}", properties.getStoreName());
return SpringAiVectorStoreFactory.create(springAiVectorStore, properties.getStoreName());
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.