index
int64 | repo_id
string | file_path
string | content
string |
|---|---|---|---|
0
|
java-sources/ai/driftkit/driftkit-vector-spring-ai-starter/0.8.1/ai/driftkit/vector/springai
|
java-sources/ai/driftkit/driftkit-vector-spring-ai-starter/0.8.1/ai/driftkit/vector/springai/autoconfigure/SpringAiVectorStoreProperties.java
|
package ai.driftkit.vector.springai.autoconfigure;
import lombok.Data;
import org.springframework.boot.context.properties.ConfigurationProperties;
/**
* Configuration properties for Spring AI vector store integration.
*/
@Data
@ConfigurationProperties(prefix = "driftkit.vector.spring-ai")
public class SpringAiVectorStoreProperties {
/**
* Whether to enable Spring AI vector store integration.
* Default: true
*/
private boolean enabled = true;
/**
* The name to use for the Spring AI vector store adapter.
* Default: "spring-ai"
*/
private String storeName = "spring-ai";
/**
* Whether to automatically register the adapter with VectorStoreFactory.
* Default: true
*/
private boolean autoRegister = true;
}
|
0
|
java-sources/ai/driftkit/driftkit-vector-spring-boot-starter/0.8.1/ai/driftkit/vector
|
java-sources/ai/driftkit/driftkit-vector-spring-boot-starter/0.8.1/ai/driftkit/vector/autoconfigure/VectorStoreAutoConfiguration.java
|
package ai.driftkit.vector.autoconfigure;
import ai.driftkit.config.EtlConfig;
import ai.driftkit.config.EtlConfig.VectorStoreConfig;
import ai.driftkit.vector.core.domain.BaseVectorStore;
import ai.driftkit.vector.core.service.VectorStoreFactory;
import lombok.extern.slf4j.Slf4j;
import org.springframework.boot.autoconfigure.AutoConfiguration;
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.data.mongodb.repository.config.EnableMongoRepositories;
/**
* Auto-configuration for vector store services.
*
* This configuration automatically creates a BaseVectorStore bean
* from the EtlConfig.vectorStore configuration when available.
*/
@Slf4j
@AutoConfiguration(after = ai.driftkit.config.autoconfigure.EtlConfigAutoConfiguration.class)
@ComponentScan(basePackages = {
"ai.driftkit.vector.spring.service",
"ai.driftkit.vector.spring.config",
"ai.driftkit.vector.spring.controller",
"ai.driftkit.vector.spring.parser"
})
@EnableMongoRepositories(basePackages = "ai.driftkit.vector.spring.repository")
public class VectorStoreAutoConfiguration {
public VectorStoreAutoConfiguration() {
log.info("Initializing DriftKit Vector Store Auto-Configuration");
}
@Bean
@ConditionalOnMissingBean(BaseVectorStore.class)
public BaseVectorStore vectorStore(EtlConfig config) {
try {
VectorStoreConfig vectorStoreConfig = config.getVectorStore();
if (vectorStoreConfig == null) {
log.warn("No vector store configuration found in EtlConfig");
return null;
}
log.info("Initializing vector store: {}", vectorStoreConfig.getName());
BaseVectorStore vectorStore = VectorStoreFactory.fromConfig(vectorStoreConfig);
log.info("Successfully initialized vector store: {}", vectorStoreConfig.getName());
return vectorStore;
} catch (Exception e) {
log.error("Failed to initialize vector store from configuration", e);
throw new RuntimeException("Failed to initialize vector store", e);
}
}
}
|
0
|
java-sources/ai/driftkit/driftkit-vector-spring-boot-starter/0.8.1/ai/driftkit/vector/spring
|
java-sources/ai/driftkit/driftkit-vector-spring-boot-starter/0.8.1/ai/driftkit/vector/spring/controller/IndexController.java
|
package ai.driftkit.vector.spring.controller;
import ai.driftkit.vector.spring.domain.ContentType;
import ai.driftkit.vector.spring.domain.Index;
import ai.driftkit.vector.spring.domain.IndexTask;
import ai.driftkit.vector.spring.domain.IndexTask.TaskStatus;
import ai.driftkit.vector.spring.parser.UnifiedParser.ByteArrayParserInput;
import ai.driftkit.vector.spring.parser.UnifiedParser.ParserInput;
import ai.driftkit.vector.spring.parser.UnifiedParser.StringParserInput;
import ai.driftkit.vector.spring.parser.UnifiedParser.YoutubeIdParserInput;
import ai.driftkit.vector.spring.service.IndexService;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.domain.Page;
import org.springframework.http.MediaType;
import org.springframework.web.bind.annotation.*;
import org.springframework.http.ResponseEntity;
import org.springframework.web.multipart.MultipartFile;
import java.util.List;
import java.util.Optional;
@Slf4j
@RestController
@RequestMapping(path = "/data/v1.0/admin/index")
public class IndexController {
@Autowired
private IndexService indexService;
@PostMapping(value = "/submit", consumes = MediaType.APPLICATION_JSON_VALUE)
public ResponseEntity<IndexResponse> indexJson(@RequestBody IndexRequest indexRequest) {
try {
// No file here, purely JSON scenario
String index = indexRequest.getIndex();
if (index == null || index.isEmpty()) {
return ResponseEntity.badRequest().body(null);
}
ParserInput parserInput;
if (indexRequest.getVideoId() != null) {
YoutubeIdParserInput youtube = new YoutubeIdParserInput();
youtube.setContentType(ContentType.YOUTUBE_TRANSCRIPT);
youtube.setInput(indexRequest.getInput());
youtube.setVideoId(indexRequest.getVideoId());
youtube.setPrimaryLang(indexRequest.getPrimaryLang());
parserInput = youtube;
} else {
// fallback to text
parserInput = new StringParserInput(indexRequest.getText(), ContentType.TEXT);
}
String taskId = indexService.submitIndexingTask(index, parserInput);
return ResponseEntity.ok(new IndexResponse(taskId));
} catch (Exception e) {
log.error("Error submitting indexing task: {}", e.getMessage(), e);
return ResponseEntity.status(500).body(null);
}
}
@PostMapping(value = "/submit", consumes = MediaType.MULTIPART_FORM_DATA_VALUE)
public ResponseEntity<IndexResponse> indexMultipart(
@RequestParam(name = "file", required = false) MultipartFile file,
@RequestParam(name = "index", required = false) String indexId
) {
try {
if (file == null || file.isEmpty() || indexId == null) {
return ResponseEntity.badRequest().body(null);
}
ParserInput parserInput = new ByteArrayParserInput(
file.getBytes(),
Optional.ofNullable(file.getOriginalFilename()).orElse(file.getName()),
ContentType.fromString(file.getContentType())
);
String taskId = indexService.submitIndexingTask(indexId, parserInput);
return ResponseEntity.ok(new IndexResponse(taskId));
} catch (Exception e) {
log.error("Error submitting indexing task: {}", e.getMessage(), e);
return ResponseEntity.status(500).body(null);
}
}
@GetMapping("/indexed/list")
public ResponseEntity<List<IndexTask>> getIndexedIndexes(@RequestParam(required = false) Integer page) {
Page<IndexTask> tasks = indexService.getTasks(Optional.ofNullable(page).orElse(0), 100);
return ResponseEntity.ok(tasks.getContent());
}
@DeleteMapping("/{id}")
public ResponseEntity<Index> deleteIndex(@PathVariable String id) {
indexService.deleteIndex(id);
return ResponseEntity.ok().build();
}
@PostMapping("/")
public ResponseEntity<Index> saveIndex(@RequestBody Index index) {
return ResponseEntity.ok(indexService.save(index));
}
@GetMapping("/list")
public ResponseEntity<List<Index>> getIndexes() {
return ResponseEntity.ok(indexService.getIndexList());
}
@GetMapping("/status/{taskId}")
public ResponseEntity<TaskStatusResponse> getStatus(@PathVariable String taskId) {
IndexTask task = indexService.getTask(taskId);
if (task == null) {
return ResponseEntity.notFound().build();
}
return ResponseEntity.ok(new TaskStatusResponse(taskId, task.getStatus()));
}
@GetMapping("/result/{taskId}")
public ResponseEntity<IndexTask> getResult(@PathVariable String taskId) {
IndexTask task = indexService.getTask(taskId);
if (task == null) {
return ResponseEntity.notFound().build();
}
return ResponseEntity.ok(task);
}
@Data
public static class IndexRequest {
private String text;
private String videoId;
private String primaryLang;
private List<String> input;
private String index;
}
@Data
@NoArgsConstructor
@AllArgsConstructor
public static class IndexResponse {
private String taskId;
}
@Data
@NoArgsConstructor
@AllArgsConstructor
public static class TaskStatusResponse {
private String taskId;
private TaskStatus status;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-vector-spring-boot-starter/0.8.1/ai/driftkit/vector/spring
|
java-sources/ai/driftkit/driftkit-vector-spring-boot-starter/0.8.1/ai/driftkit/vector/spring/domain/ContentType.java
|
package ai.driftkit.vector.spring.domain;
public enum ContentType {
PNG("image/png"),
JPG("image/jpeg"),
YOUTUBE_TRANSCRIPT(null),
MICROSOFT_WORD("application/vnd.openxmlformats-officedocument.wordprocessingml.document"),
MICROSOFT_EXCEL("application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"),
MICROSOFT_POWERPOINT("application/vnd.openxmlformats-officedocument.presentationml.presentation"),
PDF("application/pdf"),
RTF("application/rtf"),
TEXT("text/plain"),
HTML("text/html"),
XML("application/xml"),
ODF_TEXT("application/vnd.oasis.opendocument.text"),
ODF_SPREADSHEET("application/vnd.oasis.opendocument.spreadsheet"),
ODF_PRESENTATION("application/vnd.oasis.opendocument.presentation"),
SQLITE("application/vnd.sqlite3"),
ACCESS("application/vnd.ms-access");
private final String mimeType;
ContentType(String mimeType) {
this.mimeType = mimeType;
}
public String getMimeType() {
return mimeType;
}
public static ContentType fromString(String mimeType) {
for (ContentType type : ContentType.values()) {
if (type.mimeType == null) {
continue;
}
if (type.mimeType.equalsIgnoreCase(mimeType)) {
return type;
}
}
throw new IllegalArgumentException("Unsupported MIME type: " + mimeType);
}
}
|
0
|
java-sources/ai/driftkit/driftkit-vector-spring-boot-starter/0.8.1/ai/driftkit/vector/spring
|
java-sources/ai/driftkit/driftkit-vector-spring-boot-starter/0.8.1/ai/driftkit/vector/spring/domain/Index.java
|
package ai.driftkit.vector.spring.domain;
import ai.driftkit.common.domain.Language;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import org.springframework.data.annotation.Id;
import org.springframework.data.mongodb.core.mapping.Document;
@Data
@NoArgsConstructor
@AllArgsConstructor
@Builder
@Document(collection = "index")
public class Index {
@Id
private String id;
private String indexName;
private String description;
private boolean disabled;
private Language language;
private long createdTime;
}
|
0
|
java-sources/ai/driftkit/driftkit-vector-spring-boot-starter/0.8.1/ai/driftkit/vector/spring
|
java-sources/ai/driftkit/driftkit-vector-spring-boot-starter/0.8.1/ai/driftkit/vector/spring/domain/IndexTask.java
|
package ai.driftkit.vector.spring.domain;
import ai.driftkit.vector.spring.parser.UnifiedParser.ParserInput;
import lombok.*;
import org.springframework.data.annotation.Id;
import org.springframework.data.mongodb.core.mapping.Document;
import java.util.Map;
@Data
@NoArgsConstructor
@AllArgsConstructor
@Builder
@Document(collection = "index_tasks")
public class IndexTask {
@Id
private String taskId;
private String indexId;
private ParserInput parserInput;
private TaskStatus status;
private DocumentSaveResult result;
private Map<String, Object> metadata;
private long createdTime;
private long completedTime;
private String errorMessage;
public enum TaskStatus {
PENDING,
IN_PROGRESS,
COMPLETED,
FAILED
}
@Data
@NoArgsConstructor
@AllArgsConstructor
@Builder
public static class DocumentSaveResult {
private int saved;
private int failed;
private String errorMessage;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-vector-spring-boot-starter/0.8.1/ai/driftkit/vector/spring
|
java-sources/ai/driftkit/driftkit-vector-spring-boot-starter/0.8.1/ai/driftkit/vector/spring/domain/ParsedContent.java
|
package ai.driftkit.vector.spring.domain;
import ai.driftkit.vector.spring.parser.UnifiedParser.ParserInput;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonInclude.Include;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
import org.springframework.data.annotation.Id;
import org.springframework.data.mongodb.core.mapping.Document;
@Data
@Document
@NoArgsConstructor
@JsonInclude(Include.NON_NULL)
@JsonIgnoreProperties(ignoreUnknown = true)
public class ParsedContent {
@Id
private String id;
private ParserInput input;
private String parsedContent;
private Object metadata;
private long parsingStatedTime;
private long parsingEndTime;
private long createdTime;
}
|
0
|
java-sources/ai/driftkit/driftkit-vector-spring-boot-starter/0.8.1/ai/driftkit/vector/spring
|
java-sources/ai/driftkit/driftkit-vector-spring-boot-starter/0.8.1/ai/driftkit/vector/spring/parser/TextContentParser.java
|
package ai.driftkit.vector.spring.parser;
import ai.driftkit.vector.spring.domain.ContentType;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import org.apache.tika.metadata.Metadata;
import org.apache.tika.parser.AutoDetectParser;
import org.apache.tika.sax.BodyContentHandler;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import java.util.HashMap;
import java.util.Map;
@NoArgsConstructor
public class TextContentParser {
public static class ParseResult {
private final String content;
private final Map<String, String> metadata;
public ParseResult(String content, Map<String, String> metadata) {
this.content = content;
this.metadata = metadata;
}
public String getContent() {
return content;
}
public Map<String, String> getMetadata() {
return metadata;
}
}
public static ParseResult parse(byte[] inputBytes, ContentType mimeType) {
InputStream inputStream = new ByteArrayInputStream(inputBytes);
BodyContentHandler handler = new BodyContentHandler(-1); // No content length limit
Metadata metadata = new Metadata();
metadata.set(Metadata.CONTENT_TYPE, mimeType.getMimeType());
AutoDetectParser parser = new AutoDetectParser();
try {
parser.parse(inputStream, handler, metadata);
} catch (Exception e) {
throw new RuntimeException("Error parsing the document: %s".formatted(e.getMessage()), e);
}
Map<String, String> metadataMap = new HashMap<>();
for (String name : metadata.names()) {
metadataMap.put(name, metadata.get(name));
}
return new ParseResult(handler.toString(), metadataMap);
}
}
|
0
|
java-sources/ai/driftkit/driftkit-vector-spring-boot-starter/0.8.1/ai/driftkit/vector/spring
|
java-sources/ai/driftkit/driftkit-vector-spring-boot-starter/0.8.1/ai/driftkit/vector/spring/parser/UnifiedParser.java
|
package ai.driftkit.vector.spring.parser;
import ai.driftkit.common.domain.client.ModelClient;
import ai.driftkit.clients.core.ModelClientFactory;
import ai.driftkit.config.EtlConfig;
import ai.driftkit.config.EtlConfig.VaultConfig;
import ai.driftkit.common.domain.client.ModelImageResponse.ModelContentMessage;
import ai.driftkit.common.domain.client.ModelImageResponse.ModelContentMessage.ModelContentElement.ImageData;
import ai.driftkit.common.domain.client.ModelTextRequest;
import ai.driftkit.common.domain.client.ModelTextResponse;
import ai.driftkit.common.domain.client.Role;
import ai.driftkit.vector.spring.domain.ContentType;
import ai.driftkit.vector.spring.domain.ParsedContent;
import ai.driftkit.vector.spring.parser.TextContentParser.ParseResult;
import ai.driftkit.vector.spring.parser.YoutubeSubtitleParser.*;
import com.fasterxml.jackson.annotation.JsonSubTypes;
import com.fasterxml.jackson.annotation.JsonTypeInfo;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.io.IOException;
import java.util.List;
import java.util.Optional;
import java.util.UUID;
@Service
public class UnifiedParser {
public static final int MAX_INPUT_FILE_TO_STORE = 512_000;
private DefaultYoutubeTranscriptApi youtubeParser;
private ModelClient modelClient;
private VaultConfig modelConfig;
public UnifiedParser() {
}
@Autowired
public UnifiedParser(EtlConfig etlConfig) {
this.youtubeParser = (DefaultYoutubeTranscriptApi) TranscriptApiFactory.createDefault(etlConfig.getYoutubeProxy());
this.modelConfig = etlConfig.getVault().getFirst();
this.modelClient = ModelClientFactory.fromConfig(modelConfig);
}
public ParsedContent parse(ParserInput input) throws IOException {
ParsedContent content = new ParsedContent();
content.setId(UUID.randomUUID().toString());
content.setInput(input);
content.setParsingStatedTime(System.currentTimeMillis());
ContentType contentType = input.getContentType();
switch (contentType) {
case JPG:
case PNG:
ModelTextResponse textResponse = modelClient.imageToText(
ModelTextRequest.builder()
.temperature(modelConfig.getTemperature())
.model(Optional.ofNullable(modelConfig.getModel()).orElseThrow(() -> new RuntimeException("Model not configured")))
.messages(List.of(ModelContentMessage.create(
Role.user, "Please describe the image", new ImageData(
(byte[]) input.getInput(), contentType.getMimeType()
)
)))
.build()
);
content.setParsedContent(textResponse.getResponse());
content.setParsingEndTime(System.currentTimeMillis());
content.setCreatedTime(content.getParsingEndTime());
content.setMetadata(textResponse);
return content;
case YOUTUBE_TRANSCRIPT:
if (!(input instanceof YoutubeIdParserInput params)) {
throw new RuntimeException("Wrong type of ParserInput to parse Youtube Video");
}
String primaryLang = params.getPrimaryLang();
String[] languages = params.getInput().toArray(new String[0]);
Transcript transcript = youtubeParser.getTranscript(params.getVideoId(), primaryLang, languages);
Dialog dialog = youtubeParser.mapToDialog(transcript);
List<DialogTurn> turns = dialog.getTurns().stream()
.filter(e -> !e.getText().contains("<c>"))
.toList();
StringBuilder builder = new StringBuilder();
for (DialogTurn turn : turns) {
builder.append(turn.getSpeaker()).append(": ").append(turn.getText()).append('\n');
}
content.setParsedContent(builder.toString());
content.setMetadata(dialog);
content.setParsingEndTime(System.currentTimeMillis());
content.setCreatedTime(content.getParsingEndTime());
return content;
case TEXT:
case HTML:
case XML:
String text;
if (input instanceof StringParserInput textInput) {
text = textInput.getInput();
} else if (input instanceof ByteArrayParserInput baInput) {
text = new String(baInput.getInput());
} else {
throw new RuntimeException("Wrong type of ParserInput to parse content %s".formatted(contentType));
}
if (contentType == ContentType.HTML) {
Document doc = Jsoup.parse(text);
text = doc.body().text();
}
content.setParsedContent(text);
content.setParsingEndTime(System.currentTimeMillis());
content.setCreatedTime(content.getParsingEndTime());
if (text.length() > MAX_INPUT_FILE_TO_STORE) {
StringParserInput br = new StringParserInput(null, contentType);
content.setInput(br);
}
return content;
case MICROSOFT_WORD:
case MICROSOFT_EXCEL:
case MICROSOFT_POWERPOINT:
case PDF:
case RTF:
case ODF_TEXT:
case ODF_SPREADSHEET:
case ODF_PRESENTATION:
case SQLITE:
case ACCESS:
if (!(input instanceof ByteArrayParserInput param)) {
throw new RuntimeException("Wrong type of ParserInput to parse content %s".formatted(contentType));
}
ParseResult parsingResult = TextContentParser.parse(param.getInput(), contentType);
content.setParsedContent(parsingResult.getContent());
content.setMetadata(parsingResult.getMetadata());
content.setParsingEndTime(System.currentTimeMillis());
content.setCreatedTime(content.getParsingEndTime());
if (param.getInput().length > MAX_INPUT_FILE_TO_STORE) {
ByteArrayParserInput br = new ByteArrayParserInput(null, null, ContentType.PDF);
content.setInput(br);
}
return content;
}
throw new RuntimeException("Parser is not found for [%s]".formatted(input.contentType));
}
@Data
@NoArgsConstructor
@AllArgsConstructor
@JsonTypeInfo(
use = JsonTypeInfo.Id.NAME,
include = JsonTypeInfo.As.PROPERTY,
property = "contentType"
)
@JsonSubTypes({
@JsonSubTypes.Type(value = YoutubeIdParserInput.class, name = "YOUTUBE_TRANSCRIPT"),
@JsonSubTypes.Type(value = ByteArrayParserInput.class, name = "MICROSOFT_WORD"),
@JsonSubTypes.Type(value = ByteArrayParserInput.class, name = "MICROSOFT_EXCEL"),
@JsonSubTypes.Type(value = ByteArrayParserInput.class, name = "MICROSOFT_POWERPOINT"),
@JsonSubTypes.Type(value = ByteArrayParserInput.class, name = "PDF"),
@JsonSubTypes.Type(value = ByteArrayParserInput.class, name = "RTF"),
@JsonSubTypes.Type(value = StringParserInput.class, name = "TEXT"),
@JsonSubTypes.Type(value = StringParserInput.class, name = "HTML"),
@JsonSubTypes.Type(value = StringParserInput.class, name = "XML"),
@JsonSubTypes.Type(value = ByteArrayParserInput.class, name = "ODF_TEXT"),
@JsonSubTypes.Type(value = ByteArrayParserInput.class, name = "ODF_SPREADSHEET"),
@JsonSubTypes.Type(value = ByteArrayParserInput.class, name = "ODF_PRESENTATION"),
@JsonSubTypes.Type(value = ByteArrayParserInput.class, name = "SQLITE"),
@JsonSubTypes.Type(value = ByteArrayParserInput.class, name = "PNG"),
@JsonSubTypes.Type(value = ByteArrayParserInput.class, name = "JPG"),
@JsonSubTypes.Type(value = ByteArrayParserInput.class, name = "ACCESS")
})
public static class ParserInput<T> {
T input;
ContentType contentType;
}
@Data
@NoArgsConstructor
public static class ByteArrayParserInput extends ParserInput<byte[]> {
private String fileName;
public ByteArrayParserInput(byte[] input, String fileName, ContentType contentType) {
super(input, contentType);
this.fileName = fileName;
}
}
@Data
@NoArgsConstructor
public static class StringParserInput extends ParserInput<String> {
public StringParserInput(String input, ContentType contentType) {
super(input, contentType);
}
}
@Data
@NoArgsConstructor
public static class StringListParserInput extends ParserInput<List<String>> {
public StringListParserInput(List<String> input, ContentType contentType) {
super(input, contentType);
}
}
@Data
@NoArgsConstructor
public static class YoutubeIdParserInput extends ParserInput<List<String>> {
String videoId;
String primaryLang;
public YoutubeIdParserInput(String videoId, List<String> languages) {
super(languages, ContentType.YOUTUBE_TRANSCRIPT);
this.videoId = videoId;
this.primaryLang = languages.get(0);
if (languages.size() > 1) {
setInput(languages.subList(1, languages.size()));
}
}
}
}
|
0
|
java-sources/ai/driftkit/driftkit-vector-spring-boot-starter/0.8.1/ai/driftkit/vector/spring
|
java-sources/ai/driftkit/driftkit-vector-spring-boot-starter/0.8.1/ai/driftkit/vector/spring/parser/YoutubeSubtitleParser.java
|
package ai.driftkit.vector.spring.parser;
import ai.driftkit.config.EtlConfig.YoutubeProxyConfig;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.util.List;
public class YoutubeSubtitleParser {
@Data
@NoArgsConstructor
@AllArgsConstructor
public static class Transcript {
private String videoId;
private List<String> subtitles;
}
@Data
@NoArgsConstructor
@AllArgsConstructor
public static class Dialog {
private List<DialogTurn> turns;
}
@Data
@NoArgsConstructor
@AllArgsConstructor
public static class DialogTurn {
private String speaker;
private String text;
private long timestamp;
}
public static class DefaultYoutubeTranscriptApi {
public Transcript getTranscript(String videoId, String primaryLang, String[] languages) {
// Stub implementation
return new Transcript(videoId, List.of("Sample transcript"));
}
public Dialog mapToDialog(Transcript transcript) {
// Stub implementation
DialogTurn turn = new DialogTurn("Speaker", "Sample text", System.currentTimeMillis());
return new Dialog(List.of(turn));
}
}
public static class TranscriptApiFactory {
public static DefaultYoutubeTranscriptApi createDefault(YoutubeProxyConfig config) {
return new DefaultYoutubeTranscriptApi();
}
}
}
|
0
|
java-sources/ai/driftkit/driftkit-vector-spring-boot-starter/0.8.1/ai/driftkit/vector/spring
|
java-sources/ai/driftkit/driftkit-vector-spring-boot-starter/0.8.1/ai/driftkit/vector/spring/repository/IndexRepository.java
|
package ai.driftkit.vector.spring.repository;
import ai.driftkit.vector.spring.domain.Index;
import org.springframework.data.mongodb.repository.MongoRepository;
import org.springframework.stereotype.Repository;
@Repository
public interface IndexRepository extends MongoRepository<Index, String> {
}
|
0
|
java-sources/ai/driftkit/driftkit-vector-spring-boot-starter/0.8.1/ai/driftkit/vector/spring
|
java-sources/ai/driftkit/driftkit-vector-spring-boot-starter/0.8.1/ai/driftkit/vector/spring/repository/IndexTaskRepository.java
|
package ai.driftkit.vector.spring.repository;
import ai.driftkit.vector.spring.domain.IndexTask;
import org.springframework.data.mongodb.repository.MongoRepository;
import org.springframework.stereotype.Repository;
@Repository
public interface IndexTaskRepository extends MongoRepository<IndexTask, String> {
}
|
0
|
java-sources/ai/driftkit/driftkit-vector-spring-boot-starter/0.8.1/ai/driftkit/vector/spring
|
java-sources/ai/driftkit/driftkit-vector-spring-boot-starter/0.8.1/ai/driftkit/vector/spring/repository/ParsedContentRepository.java
|
package ai.driftkit.vector.spring.repository;
import ai.driftkit.vector.spring.domain.ParsedContent;
import org.springframework.data.mongodb.repository.MongoRepository;
import org.springframework.stereotype.Repository;
@Repository
public interface ParsedContentRepository extends MongoRepository<ParsedContent, String> {
}
|
0
|
java-sources/ai/driftkit/driftkit-vector-spring-boot-starter/0.8.1/ai/driftkit/vector/spring
|
java-sources/ai/driftkit/driftkit-vector-spring-boot-starter/0.8.1/ai/driftkit/vector/spring/service/IndexService.java
|
package ai.driftkit.vector.spring.service;
import ai.driftkit.vector.spring.domain.Index;
import ai.driftkit.vector.spring.domain.IndexTask;
import ai.driftkit.vector.spring.domain.IndexTask.TaskStatus;
import ai.driftkit.vector.spring.domain.IndexTask.DocumentSaveResult;
import ai.driftkit.vector.spring.parser.UnifiedParser.ParserInput;
import ai.driftkit.vector.spring.repository.IndexRepository;
import ai.driftkit.vector.spring.repository.IndexTaskRepository;
import ai.driftkit.common.utils.AIUtils;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.domain.Page;
import org.springframework.data.domain.PageRequest;
import org.springframework.data.domain.Sort;
import org.springframework.data.domain.Sort.Direction;
import org.springframework.stereotype.Service;
import jakarta.annotation.PostConstruct;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.*;
@Slf4j
@Service
public class IndexService {
@Autowired
private IndexTaskRepository indexTaskRepository;
@Autowired
private IndexRepository indexRepository;
private ExecutorService executorService;
@PostConstruct
public void init() {
this.executorService = new ThreadPoolExecutor(
0,
Math.max(1, Runtime.getRuntime().availableProcessors() / 2),
60L, TimeUnit.SECONDS,
new SynchronousQueue<Runnable>(),
new ThreadPoolExecutor.CallerRunsPolicy()
);
}
public void deleteIndex(String id) {
if (indexRepository != null) {
indexRepository.deleteById(id);
}
}
public Index save(Index index) {
if (indexRepository != null) {
return indexRepository.save(index);
}
return index;
}
public List<Index> getIndexList() {
if (indexRepository != null) {
return indexRepository.findAll();
}
return List.of();
}
public Page<IndexTask> getTasks(int page, int limit) {
if (indexTaskRepository != null) {
return indexTaskRepository.findAll(PageRequest.of(page, limit, Sort.by(Direction.DESC, "createdTime")));
}
return Page.empty();
}
public IndexTask getTask(String taskId) {
if (indexTaskRepository != null) {
return indexTaskRepository.findById(taskId).orElse(null);
}
return null;
}
public String submitIndexingTask(String indexId, ParserInput input) {
IndexTask task = IndexTask.builder()
.taskId(AIUtils.generateId())
.indexId(indexId)
.parserInput(input)
.status(TaskStatus.PENDING)
.createdTime(System.currentTimeMillis())
.build();
executeTask(task);
return task.getTaskId();
}
public void executeTask(IndexTask task) {
if (indexTaskRepository != null) {
indexTaskRepository.save(task);
}
if (indexRepository != null) {
Optional<Index> indexOpt = indexRepository.findById(task.getIndexId());
if (indexOpt.isEmpty()) {
task.setStatus(TaskStatus.FAILED);
task.setErrorMessage("Index not found: " + task.getIndexId());
if (indexTaskRepository != null) {
indexTaskRepository.save(task);
}
return;
}
}
executorService.submit(() -> {
try {
task.setStatus(TaskStatus.IN_PROGRESS);
if (indexTaskRepository != null) {
indexTaskRepository.save(task);
}
// Simplified processing - in real implementation would use workflows
DocumentSaveResult result = DocumentSaveResult.builder()
.saved(1)
.failed(0)
.build();
task.setResult(result);
task.setCompletedTime(System.currentTimeMillis());
task.setStatus(TaskStatus.COMPLETED);
if (indexTaskRepository != null) {
indexTaskRepository.save(task);
}
log.info("Indexing task [{}] completed successfully.", task.getTaskId());
} catch (Exception e) {
task.setStatus(TaskStatus.FAILED);
task.setErrorMessage(e.getMessage());
task.setCompletedTime(System.currentTimeMillis());
if (indexTaskRepository != null) {
indexTaskRepository.save(task);
}
log.error("Indexing task [{}] failed: {}", task.getTaskId(), e.getMessage(), e);
}
});
}
}
|
0
|
java-sources/ai/driftkit/driftkit-vector-spring-boot-starter/0.8.1/ai/driftkit/vector/spring
|
java-sources/ai/driftkit/driftkit-vector-spring-boot-starter/0.8.1/ai/driftkit/vector/spring/service/ParserService.java
|
package ai.driftkit.vector.spring.service;
import ai.driftkit.vector.spring.domain.ParsedContent;
import ai.driftkit.vector.spring.parser.UnifiedParser;
import ai.driftkit.vector.spring.parser.UnifiedParser.ParserInput;
import ai.driftkit.vector.spring.repository.ParsedContentRepository;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.io.IOException;
@Service
public class ParserService {
@Autowired
private ParsedContentRepository parsedContentRepository;
@Autowired
private UnifiedParser parser;
public ParsedContent parse(ParserInput input, boolean save) throws IOException {
ParsedContent parse = parser.parse(input);
if (save) {
save(parse);
}
return parse;
}
public void save(ParsedContent parse) {
parsedContentRepository.save(parse);
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-controllers/0.8.1/ai/driftkit/workflow/controllers
|
java-sources/ai/driftkit/driftkit-workflow-controllers/0.8.1/ai/driftkit/workflow/controllers/autoconfigure/ControllersAutoConfiguration.java
|
package ai.driftkit.workflow.controllers.autoconfigure;
import ai.driftkit.workflow.engine.spring.autoconfigure.WorkflowMongoRepositoriesAutoConfiguration;
import lombok.extern.slf4j.Slf4j;
import org.springframework.boot.autoconfigure.AutoConfiguration;
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.web.servlet.config.annotation.WebMvcConfigurer;
/**
* Auto-configuration for workflow controllers.
* Automatically enables MongoDB repositories and services when MongoDB is available.
*/
@Slf4j
@AutoConfiguration(after = WorkflowMongoRepositoriesAutoConfiguration.class)
@ConditionalOnClass(WebMvcConfigurer.class)
@ConditionalOnProperty(
prefix = "driftkit.workflow.controllers",
name = "enabled",
havingValue = "true",
matchIfMissing = true
)
@ComponentScan(basePackages = {
"ai.driftkit.workflow.controllers.controller",
"ai.driftkit.workflow.controllers.service"
})
public class ControllersAutoConfiguration {
public ControllersAutoConfiguration() {
log.info("Enabling workflow controllers auto-configuration");
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-controllers/0.8.1/ai/driftkit/workflow/controllers
|
java-sources/ai/driftkit/driftkit-workflow-controllers/0.8.1/ai/driftkit/workflow/controllers/autoconfigure/WorkflowControllersAutoConfiguration.java
|
package ai.driftkit.workflow.controllers.autoconfigure;
import ai.driftkit.workflow.controllers.controller.*;
import ai.driftkit.workflow.controllers.service.*;
import ai.driftkit.workflow.engine.spring.service.WorkflowService;
import lombok.extern.slf4j.Slf4j;
import org.springframework.boot.autoconfigure.AutoConfiguration;
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
import org.springframework.boot.autoconfigure.condition.ConditionalOnWebApplication;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.context.annotation.Configuration;
/**
* Auto-configuration for workflow REST controllers.
* This module provides web endpoints for workflow operations.
*/
@Slf4j
@AutoConfiguration
@ConditionalOnWebApplication
@ConditionalOnClass({WorkflowService.class})
@ComponentScan(basePackages = {
"ai.driftkit.workflow.controllers.controller",
"ai.driftkit.workflow.controllers.service"
})
public class WorkflowControllersAutoConfiguration {
public WorkflowControllersAutoConfiguration() {
log.info("Initializing Workflow Controllers module");
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-controllers/0.8.1/ai/driftkit/workflow/controllers
|
java-sources/ai/driftkit/driftkit-workflow-controllers/0.8.1/ai/driftkit/workflow/controllers/controller/AnalyticsController.java
|
package ai.driftkit.workflow.controllers.controller;
import ai.driftkit.common.domain.RestResponse;
import ai.driftkit.workflow.controllers.service.WorkflowAnalyticsService;
import ai.driftkit.workflow.engine.spring.dto.AnalyticsDtos.DailyMetricsResponse;
import ai.driftkit.workflow.engine.spring.dto.AnalyticsDtos.PromptMetricsResponse;
import ai.driftkit.workflow.engine.spring.dto.AnalyticsDtos.TaskVariables;
import ai.driftkit.workflow.engine.spring.tracing.domain.ModelRequestTrace;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.autoconfigure.condition.ConditionalOnWebApplication;
import org.springframework.data.domain.Page;
import org.springframework.format.annotation.DateTimeFormat;
import org.springframework.stereotype.Controller;
import org.springframework.web.bind.annotation.*;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.util.List;
/**
* REST controller for analytics and tracing.
* Provides endpoints for querying model request traces and analytics metrics.
*/
@Slf4j
@Controller("workflowAnalyticsController")
@RequestMapping(path = "/data/v1.0/analytics")
@ConditionalOnWebApplication
@ConditionalOnProperty(
prefix = "driftkit.workflow.tracing",
name = "enabled",
havingValue = "true",
matchIfMissing = true
)
public class AnalyticsController {
@Autowired
private WorkflowAnalyticsService analyticsService;
/**
* Get model request traces within a time range
*/
@GetMapping("/traces")
public @ResponseBody RestResponse<Page<ModelRequestTrace>> getTraces(
@RequestParam(required = false) @DateTimeFormat(iso = DateTimeFormat.ISO.DATE_TIME) LocalDateTime startTime,
@RequestParam(required = false) @DateTimeFormat(iso = DateTimeFormat.ISO.DATE_TIME) LocalDateTime endTime,
@RequestParam(required = false) String promptId,
@RequestParam(required = false) String excludePurpose,
@RequestParam(defaultValue = "0") int page,
@RequestParam(defaultValue = "50") int size
) {
Page<ModelRequestTrace> traces = analyticsService.getTraces(startTime, endTime, promptId, excludePurpose, page, size);
return new RestResponse<>(true, traces);
}
/**
* Get traces by context ID
*/
@GetMapping("/traces/{contextId}")
public @ResponseBody RestResponse<List<ModelRequestTrace>> getTracesByContextId(
@PathVariable String contextId
) {
List<ModelRequestTrace> traces = analyticsService.getTracesByContextId(contextId);
return new RestResponse<>(true, traces);
}
/**
* Get daily metrics for the dashboard
*/
@GetMapping("/metrics/daily")
public @ResponseBody RestResponse<DailyMetricsResponse> getDailyMetrics(
@RequestParam(required = false) @DateTimeFormat(iso = DateTimeFormat.ISO.DATE) LocalDate startDate,
@RequestParam(required = false) @DateTimeFormat(iso = DateTimeFormat.ISO.DATE) LocalDate endDate
) {
DailyMetricsResponse metrics = analyticsService.getDailyMetrics(startDate, endDate);
return new RestResponse<>(true, metrics);
}
/**
* Get available prompt methods for analytics
*/
@GetMapping("/prompt-methods")
public @ResponseBody RestResponse<List<String>> getAvailablePromptMethods() {
List<String> methods = analyticsService.getAvailablePromptMethods();
return new RestResponse<>(true, methods);
}
/**
* Get message tasks by context IDs
*/
@GetMapping("/message-tasks")
public @ResponseBody RestResponse<List<TaskVariables>> getMessageTasksByContextIds(
@RequestParam String contextIds
) {
List<String> ids = List.of(contextIds.split(","));
List<TaskVariables> tasks = analyticsService.getMessageTasksByContextIds(ids);
return new RestResponse<>(true, tasks);
}
/**
* Get metrics for a specific prompt method
*/
@GetMapping("/metrics/prompt")
public @ResponseBody RestResponse<PromptMetricsResponse> getPromptMetrics(
@RequestParam(required = false) @DateTimeFormat(iso = DateTimeFormat.ISO.DATE_TIME) LocalDateTime startTime,
@RequestParam(required = false) @DateTimeFormat(iso = DateTimeFormat.ISO.DATE_TIME) LocalDateTime endTime,
@RequestParam String promptId
) {
PromptMetricsResponse metrics = analyticsService.getPromptMetrics(startTime, endTime, promptId);
return new RestResponse<>(true, metrics);
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-controllers/0.8.1/ai/driftkit/workflow/controllers
|
java-sources/ai/driftkit/driftkit-workflow-controllers/0.8.1/ai/driftkit/workflow/controllers/controller/AssistantController.java
|
package ai.driftkit.workflow.controllers.controller;
import ai.driftkit.common.domain.chat.ChatMessage;
import ai.driftkit.common.domain.chat.ChatRequest;
import ai.driftkit.common.domain.chat.ChatResponse;
import ai.driftkit.workflow.engine.async.ProgressTracker;
import ai.driftkit.workflow.engine.chat.ChatMessageTask;
import ai.driftkit.workflow.engine.chat.converter.ChatMessageTaskConverter;
import ai.driftkit.workflow.engine.core.WorkflowEngine;
import ai.driftkit.workflow.engine.domain.ChatSession;
import ai.driftkit.workflow.engine.domain.StepMetadata;
import ai.driftkit.workflow.engine.domain.WorkflowDetails;
import ai.driftkit.workflow.engine.domain.WorkflowMetadata;
import ai.driftkit.workflow.engine.schema.AIFunctionSchema;
import ai.driftkit.workflow.engine.spring.dto.PageableResponseWithChat;
import ai.driftkit.workflow.engine.spring.dto.PageableResponseWithChatMessage;
import ai.driftkit.workflow.engine.spring.service.WorkflowService;
import jakarta.servlet.http.HttpServletRequest;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.BooleanUtils;
import org.apache.commons.lang3.SerializationUtils;
import org.springframework.data.domain.Page;
import org.springframework.data.domain.PageRequest;
import org.springframework.data.domain.Pageable;
import org.springframework.data.domain.Sort;
import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity;
import org.apache.commons.lang3.StringUtils;
import org.springframework.validation.annotation.Validated;
import org.springframework.web.bind.annotation.*;
import org.springframework.web.server.ResponseStatusException;
import javax.validation.Valid;
import java.net.URLDecoder;
import java.nio.charset.Charset;
import java.util.*;
@Slf4j
@RestController
@RequiredArgsConstructor
@RequestMapping("/public/api1.0/ai/assistant/")
@Validated
public class AssistantController {
private final WorkflowEngine engine;
private final ProgressTracker progressTracker;
private final WorkflowService workflowService;
@PostMapping("/chat")
public ChatResponseWithTasks chat(
@Valid @RequestBody ChatRequest request,
@RequestParam(required = false) String userId
) {
userId = decode(userId);
log.info("Processing chat request for session: {}, user: {}", request.getChatId(), userId);
try {
if (StringUtils.isEmpty(request.getUserId())) {
if (StringUtils.isNotBlank(userId)) {
request.setUserId(userId);
} else {
userId = request.getPropertiesMap().getOrDefault("userId", "anonymous");
request.setUserId(userId);
}
}
workflowService.getOrCreateChatSession(request.getChatId(), userId, request.getMessage());
ChatResponse response = workflowService.processChatRequest(request);
List<ChatMessageTask> requestTasks = ChatMessageTaskConverter.convert(request);
List<ChatMessageTask> responseTasks = ChatMessageTaskConverter.convert(response);
return new ChatResponseWithTasks(response, requestTasks, responseTasks);
} catch (Exception e) {
log.error("Error processing chat request", e);
throw new ResponseStatusException(
HttpStatus.INTERNAL_SERVER_ERROR,
"Error processing chat request: " + e.getMessage(),
e
);
}
}
@GetMapping("/chat/response/{responseId}")
public ChatResponseWithTasks getChatResponse(
@PathVariable String responseId,
@RequestParam(required = false) String userId
) {
userId = decode(userId);
log.info("Getting chat response for ID: {}, user: {}", responseId, userId);
try {
Optional<ChatResponse> response = workflowService.getChatResponse(responseId);
if (response.isPresent() && StringUtils.isNotBlank(userId)) {
ChatResponse chatResponse = response.get();
String responseUserId = chatResponse.getUserId();
if (StringUtils.isNotBlank(responseUserId) && !responseUserId.equals(userId)) {
log.warn("User {} attempted to access response {} owned by {}",
userId, responseId, responseUserId);
throw new RuntimeException("Forbidden for [%s] [%s]".formatted(userId, responseId));
}
}
if (response.isEmpty()) {
return new ChatResponseWithTasks();
}
List<ChatMessageTask> responseTasks = ChatMessageTaskConverter.convert(response.get());
return new ChatResponseWithTasks(response.get(), null, responseTasks);
} catch (Exception e) {
log.error("Error retrieving chat response", e);
throw new ResponseStatusException(
HttpStatus.INTERNAL_SERVER_ERROR,
"Error retrieving chat response: " + e.getMessage(),
e
);
}
}
@GetMapping("/chat/history")
public List<ChatMessageTask> history(
HttpServletRequest request,
@RequestParam String chatId,
@RequestParam(required = false) String userId,
@RequestParam(required = false, defaultValue = "0") int page,
@RequestParam(required = false, defaultValue = "1000") int limit,
@RequestParam(required = false, defaultValue = "asc") String sort,
@RequestParam(required = false, defaultValue = "true") boolean showSchema,
@RequestParam(required = false, defaultValue = "false") Boolean context
) {
userId = decode(userId);
log.info("Retrieving chat history for session: {}, user: {}, page: {}, limit: {}, sort: {}",
chatId, userId, page, limit, sort);
try {
Pageable pageable = createPageable(page, limit, sort, "timestamp");
verifyUserChatAccess(chatId, userId);
Page<ChatMessage> historyPage = workflowService.getChatHistory(chatId, pageable, context, showSchema);
if (historyPage.isEmpty()) {
log.info("No history found for chat: {}", chatId);
return new ArrayList<>();
}
List<ChatMessage> content = historyPage.getContent().stream()
.filter(e -> BooleanUtils.isTrue(context) || e.getType() != ChatMessage.MessageType.CONTEXT)
.toList();
if (BooleanUtils.isNotTrue(showSchema)) {
content = content.stream()
.map(SerializationUtils::clone)
.peek(e -> {
if (e instanceof ChatResponse response) {
// Clear schemas to reduce payload size
response.setNextSchema(null);
}
})
.toList();
}
return ChatMessageTaskConverter.convertAll(content);
} catch (ResponseStatusException e) {
throw e;
} catch (Exception e) {
log.error("Error retrieving chat history", e);
throw new ResponseStatusException(
HttpStatus.INTERNAL_SERVER_ERROR,
"Error retrieving chat history: " + e.getMessage(),
e
);
}
}
@GetMapping("/chat/history/pageable")
public PageableResponseWithChatMessage historyPageable(
HttpServletRequest request,
@RequestParam String chatId,
@RequestParam(required = false) String userId,
@RequestParam(required = false, defaultValue = "0") int page,
@RequestParam(required = false, defaultValue = "10") int limit,
@RequestParam(required = false, defaultValue = "asc") String sort
) {
userId = decode(userId);
log.info("Retrieving chat history for session: {}, user: {}, page: {}, limit: {}, sort: {}",
chatId, userId, page, limit, sort);
try {
Pageable pageable = createPageable(page, limit, sort, "timestamp");
verifyUserChatAccess(chatId, userId);
Page<ChatMessage> historyPage = workflowService.getChatHistory(chatId, pageable, false, true);
if (historyPage.isEmpty()) {
log.info("No history found for chat: {}", chatId);
}
return new PageableResponseWithChatMessage(request, historyPage);
} catch (ResponseStatusException e) {
throw e;
} catch (Exception e) {
log.error("Error retrieving chat history", e);
throw new ResponseStatusException(
HttpStatus.INTERNAL_SERVER_ERROR,
"Error retrieving chat history: " + e.getMessage(),
e
);
}
}
@GetMapping("/chat/list")
public PageableResponseWithChat getChats(
HttpServletRequest request,
@RequestParam(required = false) String userId,
@RequestParam(required = false, defaultValue = "0") int page,
@RequestParam(required = false, defaultValue = "100") int limit,
@RequestParam(required = false, defaultValue = "desc") String sort
) {
userId = decode(userId);
log.info("Listing chats for user: {}, page: {}, limit: {}, sort: {}", userId, page, limit, sort);
try {
Pageable pageable = createPageable(page, limit, sort, "lastMessageTime");
Page<ChatSession> chatsPage;
if (StringUtils.isNotBlank(userId)) {
chatsPage = workflowService.listChatsForUser(userId, pageable);
} else {
chatsPage = Page.empty(pageable);
}
return new PageableResponseWithChat(request, chatsPage);
} catch (Exception e) {
log.error("Error listing chats", e);
throw new ResponseStatusException(
HttpStatus.INTERNAL_SERVER_ERROR,
"Error listing chats: " + e.getMessage(),
e
);
}
}
@PostMapping("/chat/create")
public ChatInfo createChat(
@RequestParam(required = false) String userId,
@RequestParam(required = false) String name
) {
userId = decode(userId);
userId = StringUtils.isNotBlank(userId) ? userId : "anonymous";
log.info("Creating new chat for user: {}", userId);
try {
ChatSession chat = workflowService.createChatSession(userId, name);
return new ChatInfo(
chat.getChatId(),
chat.getLastMessageTime(),
chat.getDescription(),
chat.getUserId(),
chat.getName()
);
} catch (Exception e) {
log.error("Error creating new chat", e);
throw new ResponseStatusException(
HttpStatus.INTERNAL_SERVER_ERROR,
"Error creating new chat: " + e.getMessage(),
e
);
}
}
@PostMapping("/chat/{chatId}/archive")
public ResponseEntity<Void> archiveChat(
@PathVariable String chatId,
@RequestParam(required = false) String userId
) {
userId = decode(userId);
log.info("Archiving chat: {}, user: {}", chatId, userId);
try {
verifyUserChatAccess(chatId, userId);
workflowService.archiveChatSession(chatId);
return ResponseEntity.ok().build();
} catch (ResponseStatusException e) {
return ResponseEntity.status(e.getStatusCode()).build();
} catch (Exception e) {
log.error("Error archiving chat", e);
throw new ResponseStatusException(
HttpStatus.INTERNAL_SERVER_ERROR,
"Error archiving chat: " + e.getMessage(),
e
);
}
}
@GetMapping("/schemas")
public SchemaResponse schemas() {
log.info("Retrieving schemas for all workflows");
try {
Set<AIFunctionSchema> schemas = new HashSet<>();
Map<String, String> messageIds = new HashMap<>();
List<WorkflowMetadata> workflows = workflowService.listWorkflows();
for (WorkflowMetadata workflow : workflows) {
List<AIFunctionSchema> workflowSchemas = workflowService.getWorkflowSchemas(workflow.id());
schemas.addAll(workflowSchemas);
}
return new SchemaResponse(new ArrayList<>(schemas), messageIds);
} catch (Exception e) {
log.error("Error retrieving schemas", e);
throw new ResponseStatusException(
HttpStatus.INTERNAL_SERVER_ERROR,
"Error retrieving schemas: " + e.getMessage(),
e
);
}
}
@GetMapping("/workflow/first-schema/{workflowId}")
public FirstStepSchemaResponse getFirstStepSchema(@PathVariable String workflowId) {
log.info("Getting first step schema for workflow: {}", workflowId);
try {
WorkflowDetails details = workflowService.getWorkflowDetails(workflowId);
if (details == null) {
log.warn("Workflow not found with ID: {}", workflowId);
throw new ResponseStatusException(
HttpStatus.NOT_FOUND,
"Workflow not found with ID: " + workflowId
);
}
List<StepMetadata> steps = details.steps();
if (steps.isEmpty()) {
log.warn("No steps found for workflow: {}", workflowId);
throw new ResponseStatusException(
HttpStatus.NOT_FOUND,
"No steps found for workflow: " + workflowId
);
}
StepMetadata firstStep = steps.get(0);
List<AIFunctionSchema> inputSchemas = firstStep.inputSchema() != null
? List.of(firstStep.inputSchema())
: Collections.emptyList();
if (inputSchemas.isEmpty()) {
log.warn("No input schemas found for the first step of workflow: {}", workflowId);
return new FirstStepSchemaResponse(workflowId, firstStep.id(), Collections.emptyList());
}
return new FirstStepSchemaResponse(workflowId, firstStep.id(), inputSchemas);
} catch (ResponseStatusException e) {
throw e;
} catch (Exception e) {
log.error("Error retrieving first step schema", e);
throw new ResponseStatusException(
HttpStatus.INTERNAL_SERVER_ERROR,
"Error retrieving first step schema: " + e.getMessage(),
e
);
}
}
private static String decode(String userId) {
if (StringUtils.isBlank(userId)) {
return null;
}
return URLDecoder.decode(userId, Charset.defaultCharset());
}
private Pageable createPageable(int page, int limit, String sort, String sortBy) {
Sort.Direction sortDirection = "asc".equalsIgnoreCase(sort)
? Sort.Direction.ASC
: Sort.Direction.DESC;
return PageRequest.of(page, limit, Sort.by(sortDirection, sortBy));
}
private void verifyUserChatAccess(String chatId, String userId) {
if (StringUtils.isNotBlank(userId)) {
Optional<ChatSession> chatOpt = workflowService.getChatSession(chatId);
if (chatOpt.isPresent() && !userId.equals(chatOpt.get().getUserId())) {
log.warn("User {} attempted to access chat {} owned by {}",
userId, chatId, chatOpt.get().getUserId());
throw new ResponseStatusException(HttpStatus.FORBIDDEN, "User not authorized to access this chat");
}
}
}
@Data
@NoArgsConstructor
@AllArgsConstructor
public static class ChatInfo {
private String chatId;
private Long lastMessageTime;
private String lastMessage;
private String userId;
private String name;
}
@Data
@NoArgsConstructor
@AllArgsConstructor
public static class SchemaResponse {
private List<AIFunctionSchema> schemas;
private Map<String, String> messageIds;
}
@Data
@NoArgsConstructor
@AllArgsConstructor
public static class FirstStepSchemaResponse {
private String workflowId;
private String stepId;
private List<AIFunctionSchema> schemas;
}
@Data
@NoArgsConstructor
@AllArgsConstructor
public static class ChatResponseWithTasks {
private ChatResponse originalResponse;
private List<ChatMessageTask> request;
private List<ChatMessageTask> response;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-controllers/0.8.1/ai/driftkit/workflow/controllers
|
java-sources/ai/driftkit/driftkit-workflow-controllers/0.8.1/ai/driftkit/workflow/controllers/controller/AsyncModelRequestController.java
|
package ai.driftkit.workflow.controllers.controller;
import ai.driftkit.common.domain.PromptRequest;
import ai.driftkit.workflow.engine.agent.AgentResponse;
import ai.driftkit.workflow.engine.spring.tracing.domain.AsyncTaskEntity;
import ai.driftkit.workflow.engine.spring.tracing.domain.AsyncTaskEntity.TaskStatus;
import ai.driftkit.workflow.controllers.service.AsyncTaskService;
import ai.driftkit.workflow.engine.spring.dto.ModelRequestDtos.AsyncTaskResponse;
import ai.driftkit.workflow.engine.spring.dto.ModelRequestDtos.TaskRating;
import ai.driftkit.workflow.engine.spring.dto.ModelRequestDtos.TextRequest;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import java.util.Optional;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
import org.springframework.boot.autoconfigure.condition.ConditionalOnWebApplication;
import org.springframework.http.HttpStatus;
import org.springframework.http.MediaType;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.*;
/**
* REST controller for asynchronous LLM model requests.
* Returns task IDs immediately and allows checking status/results later.
* Only activated when AsyncTaskService is available (requires MongoDB).
*/
@Slf4j
@RestController
@RequestMapping("/api/v1/model/async")
@RequiredArgsConstructor
@ConditionalOnWebApplication
public class AsyncModelRequestController {
@Autowired(required = false)
private AsyncTaskService asyncTaskService;
/**
* Process a prompt request asynchronously - returns task ID immediately
*/
@PostMapping(value = "/prompt", produces = MediaType.APPLICATION_JSON_VALUE)
public AsyncTaskResponse processPromptRequestAsync(
@RequestBody PromptRequest request,
@RequestHeader(value = "X-User-Id", required = false) String userId) {
log.debug("Processing async prompt request");
// Use "anonymous" if no user ID provided
String effectiveUserId = userId != null ? userId : "anonymous";
// Create async task and return ID
String taskId = asyncTaskService.executePromptRequestAsync(request, effectiveUserId);
return new AsyncTaskResponse(taskId, TaskStatus.PENDING);
}
/**
* Process a text request asynchronously - returns task ID immediately
*/
@PostMapping(value = "/text", produces = MediaType.APPLICATION_JSON_VALUE)
public AsyncTaskResponse processTextRequestAsync(
@RequestBody TextRequest request,
@RequestHeader(value = "X-User-Id", required = false) String userId) {
log.debug("Processing async text request");
// Use "anonymous" if no user ID provided
String effectiveUserId = userId != null ? userId : "anonymous";
// Create async task and return ID
String taskId = asyncTaskService.executeTextRequestAsync(request, effectiveUserId);
return new AsyncTaskResponse(taskId, TaskStatus.PENDING);
}
/**
* Get task status
*/
@GetMapping("/task/{taskId}/status")
public ResponseEntity<AsyncTaskResponse> getTaskStatus(@PathVariable String taskId) {
log.debug("Getting status for task: {}", taskId);
return asyncTaskService.getTask(taskId)
.map(task -> ResponseEntity.ok(new AsyncTaskResponse(task.getTaskId(), task.getStatus())))
.orElse(ResponseEntity.notFound().build());
}
/**
* Get task result
*/
@GetMapping("/task/{taskId}/result")
public ResponseEntity<?> getTaskResult(@PathVariable String taskId) {
log.debug("Getting result for task: {}", taskId);
// First check if task exists
var taskOpt = asyncTaskService.getTask(taskId);
if (taskOpt.isEmpty()) {
return ResponseEntity.notFound().build();
}
AsyncTaskEntity task = taskOpt.get();
// Check task status
switch (task.getStatus()) {
case COMPLETED:
// Get the result
Optional<AgentResponse<?>> result = asyncTaskService.getTaskResult(taskId);
if (result.isPresent()) {
return ResponseEntity.ok(result.get());
} else {
return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR)
.body(AgentResponse.text("Failed to retrieve result"));
}
case FAILED:
return ResponseEntity.ok(AgentResponse.text(task.getErrorMessage()));
case CANCELLED:
return ResponseEntity.ok(AgentResponse.text("Task was cancelled"));
case PENDING:
case RUNNING:
// Task is still in progress
return ResponseEntity.status(HttpStatus.ACCEPTED)
.body(new AsyncTaskResponse(taskId, task.getStatus()));
default:
return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR)
.body(AgentResponse.text("Unknown task status"));
}
}
/**
* Get full task details
*/
@GetMapping("/task/{taskId}")
public ResponseEntity<AsyncTaskEntity> getTask(@PathVariable String taskId) {
log.debug("Getting task details: {}", taskId);
return asyncTaskService.getTask(taskId)
.map(ResponseEntity::ok)
.orElse(ResponseEntity.notFound().build());
}
/**
* Cancel a task
*/
@DeleteMapping("/task/{taskId}")
public ResponseEntity<Void> cancelTask(@PathVariable String taskId) {
log.debug("Cancelling task: {}", taskId);
boolean cancelled = asyncTaskService.cancelTask(taskId);
if (cancelled) {
return ResponseEntity.ok().build();
} else {
return ResponseEntity.status(HttpStatus.NOT_MODIFIED).build();
}
}
/**
* Rate a task result
*/
@PostMapping("/task/{taskId}/rate")
public ResponseEntity<AsyncTaskEntity> rateTask(
@PathVariable String taskId,
@RequestBody TaskRating rating) {
log.debug("Rating task: {} with grade: {}", taskId, rating.getGrade());
Optional<AsyncTaskEntity> taskOpt = asyncTaskService.rateTask(taskId, rating.getGrade(), rating.getComment());
return taskOpt
.map(ResponseEntity::ok)
.orElse(ResponseEntity.notFound().build());
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-controllers/0.8.1/ai/driftkit/workflow/controllers
|
java-sources/ai/driftkit/driftkit-workflow-controllers/0.8.1/ai/driftkit/workflow/controllers/controller/ModelRequestController.java
|
package ai.driftkit.workflow.controllers.controller;
import ai.driftkit.common.domain.PromptRequest;
import ai.driftkit.workflow.engine.agent.AgentResponse;
import ai.driftkit.workflow.controllers.service.AsyncTaskService;
import ai.driftkit.workflow.engine.spring.dto.ModelRequestDtos.TextRequest;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
import org.springframework.boot.autoconfigure.condition.ConditionalOnWebApplication;
import org.springframework.http.MediaType;
import org.springframework.web.bind.annotation.*;
/**
* REST controller for direct LLM model requests outside of workflows.
* Delegates all execution logic to AsyncTaskService for consistency.
* Supports text, image generation, and multimodal requests.
* Only activated when AsyncTaskService is available (requires MongoDB).
*/
@Slf4j
@RestController
@RequestMapping("/api/v1/model")
@RequiredArgsConstructor
@ConditionalOnWebApplication
public class ModelRequestController {
@Autowired(required = false)
private AsyncTaskService asyncTaskService;
/**
* Process a prompt request - supports text, image generation, and multimodal.
*/
@PostMapping(value = "/prompt", produces = MediaType.APPLICATION_JSON_VALUE)
public AgentResponse<?> processPromptRequest(@RequestBody PromptRequest request) {
log.debug("Processing prompt request");
// Delegate all logic to AsyncTaskService
return asyncTaskService.executePromptRequestSync(request);
}
/**
* Process a direct text request without promptId.
*/
@PostMapping(value = "/text", produces = MediaType.APPLICATION_JSON_VALUE, consumes = MediaType.APPLICATION_JSON_VALUE)
public AgentResponse<?> processTextRequest(@RequestBody TextRequest request) {
log.debug("Processing direct text request");
// Delegate all logic to AsyncTaskService
return asyncTaskService.executeTextRequestSync(request);
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-controllers/0.8.1/ai/driftkit/workflow/controllers
|
java-sources/ai/driftkit/driftkit-workflow-controllers/0.8.1/ai/driftkit/workflow/controllers/controller/WorkflowAdminController.java
|
package ai.driftkit.workflow.controllers.controller;
import ai.driftkit.workflow.engine.domain.WorkflowMetadata;
import ai.driftkit.workflow.engine.spring.dto.RestResponse;
import ai.driftkit.workflow.engine.spring.service.WorkflowService;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.stereotype.Controller;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.ResponseBody;
import java.util.List;
import java.util.stream.Collectors;
@Slf4j
@Controller
@RequiredArgsConstructor
@RequestMapping(path = "/data/v1.0/admin/workflows")
public class WorkflowAdminController {
private final WorkflowService workflowService;
@GetMapping
public @ResponseBody RestResponse<List<WorkflowInfo>> getWorkflows() {
log.info("Getting all available workflows");
try {
List<WorkflowMetadata> workflows = workflowService.listWorkflows();
List<WorkflowInfo> workflowInfos = workflows.stream()
.map(workflow -> new WorkflowInfo(
workflow.id(),
workflow.id(), // use id as name for compatibility
workflow.description()
))
.collect(Collectors.toList());
return new RestResponse<>(true, workflowInfos);
} catch (Exception e) {
log.error("Error retrieving workflows", e);
return new RestResponse<>(false, null);
}
}
@Data
@NoArgsConstructor
@AllArgsConstructor
public static class WorkflowInfo {
private String id;
private String name;
private String description;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-controllers/0.8.1/ai/driftkit/workflow/controllers
|
java-sources/ai/driftkit/driftkit-workflow-controllers/0.8.1/ai/driftkit/workflow/controllers/controller/WorkflowManagementController.java
|
package ai.driftkit.workflow.controllers.controller;
import ai.driftkit.workflow.engine.async.ProgressTracker;
import ai.driftkit.workflow.engine.async.ProgressTracker.Progress;
import ai.driftkit.workflow.engine.core.StepResult.Finish;
import ai.driftkit.workflow.engine.core.WorkflowContext.Keys;
import ai.driftkit.workflow.engine.core.StepResult;
import ai.driftkit.workflow.engine.core.WorkflowEngine;
import ai.driftkit.workflow.engine.domain.WorkflowEvent;
import ai.driftkit.workflow.engine.persistence.WorkflowInstance;
import ai.driftkit.workflow.engine.schema.AIFunctionSchema;
import ai.driftkit.workflow.engine.schema.SchemaUtils;
import ai.driftkit.workflow.engine.spring.service.WorkflowService;
import ai.driftkit.workflow.engine.domain.WorkflowDetails;
import ai.driftkit.workflow.engine.domain.WorkflowMetadata;
import ai.driftkit.workflow.engine.spring.dto.WorkflowDtos.*;
import static ai.driftkit.workflow.engine.spring.dto.WorkflowDtos.*;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity;
import org.springframework.validation.annotation.Validated;
import org.springframework.web.bind.annotation.*;
import java.util.*;
/**
* REST controller for workflow-specific execution and management.
* Chat-related endpoints are in AssistantV3Controller.
*/
@Slf4j
@RestController
@RequestMapping("/api/workflows")
@RequiredArgsConstructor
@Validated
public class WorkflowManagementController {
private final WorkflowEngine engine;
private final ProgressTracker progressTracker;
private final WorkflowService workflowService;
// ========== Workflow-specific endpoints ==========
/**
* Execute a workflow with the given input.
*
* @param workflowId The workflow ID
* @param request The execution request
* @return The workflow response
*/
@PostMapping("/{workflowId}/execute")
public ResponseEntity<WorkflowResponse> execute(
@PathVariable String workflowId,
@RequestBody WorkflowExecutionRequest request,
@RequestHeader(value = "X-Session-Id", required = false) String sessionId
) {
try {
log.debug("Executing workflow: workflowId={}, sessionId={}", workflowId, sessionId);
// Convert input data using schema utils if needed
Object input = request.properties();
if (request.inputClass() != null) {
Class<?> inputClass = Class.forName(request.inputClass());
input = SchemaUtils.createInstance(inputClass, request.properties());
}
// Execute workflow
var execution = engine.execute(workflowId, input);
// Check if async
if (execution.isAsync()) {
String taskId = progressTracker.generateTaskId();
WorkflowEvent event = WorkflowEvent.asyncStarted(taskId, execution.getRunId());
progressTracker.trackExecution(taskId, event);
// Return immediate response with task ID
return ResponseEntity.accepted()
.body(WorkflowResponse.async(execution.getRunId(), taskId));
}
// Get synchronous result
Object result = execution.getResult();
// Create WorkflowInstance for the response
WorkflowInstance instance = WorkflowInstance.builder()
.instanceId(execution.getRunId())
.workflowId(workflowId)
.status(WorkflowInstance.WorkflowStatus.COMPLETED)
.build();
// Wrap result in a Finish StepResult for consistent response format
StepResult<?> stepResult = new Finish<>(result);
return ResponseEntity.ok(WorkflowResponse.from(instance, stepResult));
} catch (Exception e) {
log.error("Error executing workflow: workflowId={}", workflowId, e);
return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR)
.body(WorkflowResponse.error(e.getMessage()));
}
}
/**
* Resume a suspended workflow with user input.
*
* @param runId The workflow run ID
* @param request The resume request with user input
* @return The workflow response
*/
@PostMapping("/{runId}/resume")
public ResponseEntity<WorkflowResponse> resume(
@PathVariable String runId,
@RequestBody WorkflowResumeRequest request
) {
try {
log.debug("Resuming workflow: runId={}", runId);
// Convert user input if schema provided
Object userInput = request.getUserInput();
if (request.inputClass() != null) {
Class<?> inputClass = Class.forName(request.inputClass());
userInput = SchemaUtils.createInstance(inputClass, request.properties());
}
// Resume workflow
var execution = engine.resume(runId, userInput);
// Get result
Object result = execution.getResult();
// Create WorkflowInstance for the response
WorkflowInstance instance = WorkflowInstance.builder()
.instanceId(execution.getRunId())
.workflowId(execution.getWorkflowId())
.status(WorkflowInstance.WorkflowStatus.COMPLETED)
.build();
// Wrap result in a Finish StepResult for consistent response format
StepResult<?> stepResult = new Finish<>(result);
return ResponseEntity.ok(WorkflowResponse.from(instance, stepResult));
} catch (Exception e) {
log.error("Error resuming workflow: runId={}", runId, e);
return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR)
.body(WorkflowResponse.error(e.getMessage()));
}
}
/**
* Get the status of a workflow execution.
*
* @param runId The workflow run ID
* @return The workflow status
*/
@GetMapping("/{runId}/status")
public ResponseEntity<WorkflowStatusResponse> getStatus(@PathVariable String runId) {
try {
// Check with progress tracker first for async tasks
var progress = progressTracker.getExecution(runId)
.flatMap(event -> progressTracker.getProgress(event.getAsyncTaskId()));
if (progress.isPresent()) {
return ResponseEntity.ok(WorkflowStatusResponse.fromProgress(progress.get()));
}
// Check workflow state
var state = workflowService.getWorkflowState(runId);
if (state.isPresent()) {
return ResponseEntity.ok(WorkflowStatusResponse.fromState(state.get()));
}
return ResponseEntity.notFound().build();
} catch (Exception e) {
log.error("Error getting workflow status: runId={}", runId, e);
return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR).build();
}
}
/**
* Get the current result of a workflow execution.
* This returns the current state, including intermediate results for async/suspended workflows.
*
* @param runId The workflow run ID
* @return The current workflow result
*/
@GetMapping("/{runId}/current")
public ResponseEntity<WorkflowCurrentResultResponse> getCurrentResult(@PathVariable String runId) {
try {
Optional<WorkflowEvent> currentResult = engine.getCurrentResult(runId);
if (currentResult.isPresent()) {
WorkflowEvent event = currentResult.get();
String message = event.getProperties() != null ?
event.getProperties().get("message") : null;
Map<String, Object> data = event.getProperties() != null ?
new HashMap<>(event.getProperties()) : null;
return ResponseEntity.ok(new WorkflowCurrentResultResponse(
runId,
event.isCompleted() ? WorkflowStatus.COMPLETED : WorkflowStatus.RUNNING,
event.getPercentComplete(),
message,
data,
event.isAsync()
));
}
// Fallback to checking workflow state
var state = workflowService.getWorkflowState(runId);
if (state.isPresent()) {
WorkflowInstance instance = state.get();
Object result = instance.getContext() != null
? instance.getContext().getStepOutputs().get(Keys.FINAL_RESULT)
: null;
return ResponseEntity.ok(new WorkflowCurrentResultResponse(
runId,
mapWorkflowStatus(instance.getStatus()),
instance.getStatus() == WorkflowInstance.WorkflowStatus.COMPLETED ? 100 : 0,
"Workflow " + instance.getStatus().toString().toLowerCase(),
result != null ? Map.of("result", result) : null,
false
));
}
return ResponseEntity.notFound().build();
} catch (Exception e) {
log.error("Error getting current result: runId={}", runId, e);
return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR).build();
}
}
/**
* Cancel an async operation for a workflow.
*
* @param runId The workflow run ID
* @return Success indicator
*/
@PostMapping("/{runId}/cancel")
public ResponseEntity<WorkflowCancelResponse> cancelAsyncOperation(@PathVariable String runId) {
try {
boolean cancelled = engine.cancelAsyncOperation(runId);
if (cancelled) {
return ResponseEntity.ok(new WorkflowCancelResponse(
runId,
true,
"Async operation cancelled successfully"
));
} else {
return ResponseEntity.ok(new WorkflowCancelResponse(
runId,
false,
"No active async operation found for this workflow"
));
}
} catch (Exception e) {
log.error("Error cancelling async operation: runId={}", runId, e);
return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR)
.body(new WorkflowCancelResponse(
runId,
false,
"Error cancelling operation: " + e.getMessage()
));
}
}
private WorkflowStatus mapWorkflowStatus(WorkflowInstance.WorkflowStatus status) {
return switch (status) {
case RUNNING -> WorkflowStatus.RUNNING;
case SUSPENDED -> WorkflowStatus.SUSPENDED;
case COMPLETED -> WorkflowStatus.COMPLETED;
case FAILED -> WorkflowStatus.FAILED;
case CANCELLED -> WorkflowStatus.CANCELLED;
};
}
/**
* Get all schemas for a specific workflow.
*
* @param workflowId The workflow ID
* @return List of schemas for all steps
*/
@GetMapping("/{workflowId}/schemas")
public ResponseEntity<List<AIFunctionSchema>> getWorkflowSchemas(@PathVariable String workflowId) {
try {
List<AIFunctionSchema> schemas = workflowService.getWorkflowSchemas(workflowId);
return ResponseEntity.ok(schemas);
} catch (Exception e) {
log.error("Error getting workflow schemas: workflowId={}", workflowId, e);
return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR).build();
}
}
/**
* List all available workflows
*
* @return List of workflow metadata
*/
@GetMapping
public ResponseEntity<List<WorkflowMetadata>> listWorkflows() {
try {
List<WorkflowMetadata> workflows = workflowService.listWorkflows();
return ResponseEntity.ok(workflows);
} catch (Exception e) {
log.error("Error listing workflows", e);
return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR).build();
}
}
/**
* Get detailed information about a workflow
*
* @param workflowId The workflow ID
* @return Workflow details
*/
@GetMapping("/{workflowId}")
public ResponseEntity<WorkflowDetails> getWorkflowDetails(@PathVariable String workflowId) {
try {
WorkflowDetails details = workflowService.getWorkflowDetails(workflowId);
if (details != null) {
return ResponseEntity.ok(details);
}
return ResponseEntity.notFound().build();
} catch (Exception e) {
log.error("Error getting workflow details: workflowId={}", workflowId, e);
return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR).build();
}
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-controllers/0.8.1/ai/driftkit/workflow/controllers
|
java-sources/ai/driftkit/driftkit-workflow-controllers/0.8.1/ai/driftkit/workflow/controllers/service/AsyncTaskService.java
|
package ai.driftkit.workflow.controllers.service;
import ai.driftkit.common.domain.Language;
import ai.driftkit.common.domain.Prompt;
import ai.driftkit.common.domain.PromptRequest;
import ai.driftkit.common.domain.client.ModelClient;
import ai.driftkit.common.domain.client.ResponseFormat;
import ai.driftkit.common.service.ChatStore;
import ai.driftkit.common.utils.JsonUtils;
import ai.driftkit.context.core.service.PromptService;
import ai.driftkit.context.core.util.PromptUtils;
import ai.driftkit.workflow.engine.agent.AgentResponse;
import ai.driftkit.workflow.engine.agent.LLMAgent;
import ai.driftkit.workflow.engine.agent.RequestTracingProvider;
import ai.driftkit.workflow.engine.core.WorkflowEngine;
import ai.driftkit.workflow.engine.core.WorkflowEngine.WorkflowExecution;
import ai.driftkit.workflow.controllers.controller.ModelRequestController;
import ai.driftkit.workflow.engine.spring.dto.ModelRequestDtos.TextRequest;
import ai.driftkit.workflow.engine.spring.tracing.domain.AsyncTaskEntity;
import ai.driftkit.workflow.engine.spring.tracing.domain.AsyncTaskEntity.TaskStatus;
import ai.driftkit.workflow.engine.spring.tracing.domain.AsyncTaskEntity.TaskType;
import ai.driftkit.workflow.engine.spring.tracing.repository.AsyncTaskRepository;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.collections4.MapUtils;
import org.apache.commons.lang3.StringUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
import org.springframework.data.mongodb.core.MongoTemplate;
import org.springframework.scheduling.annotation.Async;
import org.springframework.stereotype.Service;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.*;
import java.util.HashMap;
import java.util.concurrent.CompletableFuture;
/**
* Service for managing async task execution.
* This is the SINGLE source of truth for LLM request execution logic.
* All controllers should use this service instead of duplicating logic.
* Only activated when AsyncTaskRepository is available (requires MongoDB).
*/
@Slf4j
@Service
@ConditionalOnBean(MongoTemplate.class)
@RequiredArgsConstructor
public class AsyncTaskService {
@Autowired(required = false)
private RequestTracingProvider tracingProvider;
private final AsyncTaskRepository asyncTaskRepository;
private final ModelClient modelClient;
private final PromptService promptService;
private final ChatStore chatStore;
private final WorkflowEngine workflowEngine;
/**
* Execute prompt request synchronously (for ModelRequestController)
*/
public AgentResponse<?> executePromptRequestSync(PromptRequest request) {
// Check if we should use workflow
if (StringUtils.isNotBlank(request.getWorkflow())) {
// For sync calls with workflow, create async task and wait
String taskId = executePromptRequestAsync(request, "system");
return waitForTaskCompletion(taskId);
}
// Execute directly with LLMAgent
return executePromptWithAgent(request);
}
/**
* Execute text request synchronously (for ModelRequestController)
*/
public AgentResponse<?> executeTextRequestSync(TextRequest request) {
// Check if we should use workflow
if (StringUtils.isNotBlank(request.getWorkflow())) {
// For sync calls with workflow, create async task and wait
String taskId = executeTextRequestAsync(request, "system");
return waitForTaskCompletion(taskId);
}
// Execute directly with LLMAgent
return executeTextWithAgent(request);
}
/**
* Create and execute async prompt request
*/
public String executePromptRequestAsync(PromptRequest request, String userId) {
String taskId = UUID.randomUUID().toString();
// Create task entity
AsyncTaskEntity task = AsyncTaskEntity.builder()
.taskId(taskId)
.userId(userId)
.chatId(request.getChatId())
.status(TaskStatus.PENDING)
.taskType(TaskType.PROMPT_REQUEST)
.requestBody(toJsonSafe(request))
.workflowId(request.getWorkflow())
.variables(request.getVariables())
.createdAt(System.currentTimeMillis())
.build();
// Extract prompt ID if available
if (CollectionUtils.isNotEmpty(request.getPromptIds())) {
task.setPromptId(request.getPromptIds().get(0).getPromptId());
}
// Save initial task
asyncTaskRepository.save(task);
// Execute asynchronously
executePromptAsync(taskId, request);
return taskId;
}
/**
* Create and execute async text request
*/
public String executeTextRequestAsync(TextRequest request, String userId) {
String taskId = UUID.randomUUID().toString();
// Create task entity
AsyncTaskEntity task = AsyncTaskEntity.builder()
.taskId(taskId)
.userId(userId)
.chatId(request.getChatId())
.status(TaskStatus.PENDING)
.taskType(TaskType.TEXT_REQUEST)
.requestBody(toJsonSafe(request))
.workflowId(request.getWorkflow())
.variables(request.getVariables())
.modelId(request.getModelId())
.temperature(request.getTemperature())
.createdAt(System.currentTimeMillis())
.build();
// Save initial task
asyncTaskRepository.save(task);
// Execute asynchronously
executeTextAsync(taskId, request);
return taskId;
}
/**
* Execute prompt request asynchronously
*/
@Async("taskExecutor")
protected void executePromptAsync(String taskId, PromptRequest request) {
// Update status to running
updateTaskStatus(taskId, TaskStatus.RUNNING);
try {
// Check if we should use workflow
if (StringUtils.isNotBlank(request.getWorkflow())) {
// Execute workflow asynchronously
executePromptWithWorkflowAsync(taskId, request);
} else {
// Execute with LLMAgent directly
AgentResponse<?> response = executePromptWithAgent(request);
completeTask(taskId, response);
}
} catch (Exception e) {
log.error("Error executing async prompt request: " + taskId, e);
failTask(taskId, e);
}
}
/**
* Execute text request asynchronously
*/
@Async("taskExecutor")
protected void executeTextAsync(String taskId, TextRequest request) {
// Update status to running
updateTaskStatus(taskId, TaskStatus.RUNNING);
try {
// Check if we should use workflow
if (StringUtils.isNotBlank(request.getWorkflow())) {
// Execute workflow asynchronously
executeTextWithWorkflowAsync(taskId, request);
} else {
// Execute with LLMAgent directly
AgentResponse<?> response = executeTextWithAgent(request);
completeTask(taskId, response);
}
} catch (Exception e) {
log.error("Error executing async text request: " + taskId, e);
failTask(taskId, e);
}
}
/**
* Execute prompt request with workflow engine asynchronously
*/
private void executePromptWithWorkflowAsync(String taskId, PromptRequest request) {
try {
// Execute workflow with PromptRequest as input
WorkflowExecution<?> execution = workflowEngine.execute(request.getWorkflow(), request);
// Register completion handler
execution.getFuture().whenComplete((result, error) -> {
if (error != null) {
log.error("Workflow execution failed for task: " + taskId, error);
failTask(taskId, new Exception("Workflow execution failed", error));
} else {
// Convert result to AgentResponse
AgentResponse<?> response;
if (result instanceof AgentResponse) {
response = (AgentResponse<?>) result;
} else {
response = AgentResponse.structured(result);
}
completeTask(taskId, response);
}
});
} catch (Exception e) {
log.error("Error starting workflow for task: " + taskId, e);
failTask(taskId, e);
}
}
/**
* Execute text request with workflow engine asynchronously
*/
private void executeTextWithWorkflowAsync(String taskId, TextRequest request) {
try {
// Execute workflow with TextRequest as input
WorkflowExecution<?> execution = workflowEngine.execute(request.getWorkflow(), request);
// Register completion handler
execution.getFuture().whenComplete((result, error) -> {
if (error != null) {
log.error("Workflow execution failed for task: " + taskId, error);
failTask(taskId, new Exception("Workflow execution failed", error));
} else {
// Convert result to AgentResponse
AgentResponse<?> response;
if (result instanceof AgentResponse) {
response = (AgentResponse<?>) result;
} else {
response = AgentResponse.structured(result);
}
completeTask(taskId, response);
}
});
} catch (Exception e) {
log.error("Error starting workflow for task: " + taskId, e);
failTask(taskId, e);
}
}
/**
* Execute prompt request with LLMAgent
* This is the SINGLE implementation of prompt execution logic
*/
private AgentResponse<?> executePromptWithAgent(PromptRequest request) {
// Validate request
if (CollectionUtils.isEmpty(request.getPromptIds())) {
throw new IllegalArgumentException("promptIds must be provided");
}
// Get first prompt
PromptRequest.PromptIdRequest promptIdRequest = request.getPromptIds().get(0);
String promptId = promptIdRequest.getPromptId();
if (StringUtils.isBlank(promptId)) {
throw new IllegalArgumentException("promptId must be provided");
}
// Get prompt from service
Language language = request.getLanguage() != null ? request.getLanguage() : Language.GENERAL;
Prompt prompt = promptService.getCurrentPromptOrThrow(promptId, language);
// Handle savePrompt flag
if (request.isSavePrompt() && StringUtils.isNotBlank(promptIdRequest.getPrompt())) {
prompt.setMessage(promptIdRequest.getPrompt());
prompt.setUpdatedTime(System.currentTimeMillis());
if (promptIdRequest.getTemperature() != null) {
prompt.setTemperature(promptIdRequest.getTemperature());
}
promptService.savePrompt(prompt);
}
// Use prompt text from request if provided
String promptText = StringUtils.isNotBlank(promptIdRequest.getPrompt())
? promptIdRequest.getPrompt()
: prompt.getMessage();
// Apply variables
Map<String, Object> variables = request.getVariables();
if (MapUtils.isNotEmpty(variables)) {
promptText = PromptUtils.applyVariables(promptText, variables);
}
// Determine chatId
String chatId = StringUtils.isNotBlank(request.getChatId()) ? request.getChatId() : UUID.randomUUID().toString();
// Create LLMAgent
var agentBuilder = LLMAgent.builder()
.modelClient(modelClient)
.name("api-request")
.agentId(UUID.randomUUID().toString())
.chatId(chatId)
.workflowType(request.getWorkflow())
.promptService(promptService)
.tracingProvider(tracingProvider)
.chatStore(chatStore);
// Set temperature
if (promptIdRequest.getTemperature() != null) {
agentBuilder.temperature(promptIdRequest.getTemperature());
} else if (prompt.getTemperature() != null) {
agentBuilder.temperature(prompt.getTemperature());
} else {
agentBuilder.temperature(modelClient.getTemperature());
}
// Set model
if (StringUtils.isNotBlank(request.getModelId())) {
agentBuilder.model(request.getModelId());
} else if (StringUtils.isNotBlank(prompt.getModelId())) {
agentBuilder.model(prompt.getModelId());
} else {
agentBuilder.model(modelClient.getModel());
}
// Set system message
if (StringUtils.isNotBlank(prompt.getSystemMessage())) {
String systemMessage = prompt.getSystemMessage();
if (MapUtils.isNotEmpty(variables)) {
systemMessage = PromptUtils.applyVariables(systemMessage, variables);
}
agentBuilder.systemMessage(systemMessage);
}
LLMAgent agent = agentBuilder.build();
// Get response format
ResponseFormat responseFormat = request.getResponseFormat();
// Determine request type
if (responseFormat != null && responseFormat.getType() == ResponseFormat.ResponseType.IMAGE) {
return agent.executeImageGeneration(promptText, variables);
}
// Check for images
if (CollectionUtils.isNotEmpty(request.getImageBase64())) {
List<byte[]> imageDataList = new ArrayList<>();
for (String base64Image : request.getImageBase64()) {
byte[] imageData = Base64.getDecoder().decode(base64Image);
imageDataList.add(imageData);
}
return agent.executeWithImages(promptText, imageDataList, variables);
}
// Regular text request
return agent.executeText(promptText, variables);
}
/**
* Execute text request with LLMAgent
* This is the SINGLE implementation of text execution logic
*/
private AgentResponse<?> executeTextWithAgent(TextRequest request) {
// Validate request
if (StringUtils.isBlank(request.getText())) {
throw new IllegalArgumentException("text must be provided");
}
// Determine chatId
String chatId = StringUtils.isNotBlank(request.getChatId()) ? request.getChatId() : UUID.randomUUID().toString();
// Apply variables to text
String text = request.getText();
Map<String, Object> variables = request.getVariables();
if (MapUtils.isNotEmpty(variables)) {
text = PromptUtils.applyVariables(text, variables);
}
// Create LLMAgent
var agentBuilder = LLMAgent.builder()
.modelClient(modelClient)
.name("text-request")
.agentId(UUID.randomUUID().toString())
.chatId(chatId)
.workflowType(request.getWorkflow())
.promptService(promptService)
.tracingProvider(tracingProvider)
.chatStore(chatStore);
// Set temperature
if (request.getTemperature() != null) {
agentBuilder.temperature(request.getTemperature());
} else {
agentBuilder.temperature(modelClient.getTemperature());
}
// Set model
if (StringUtils.isNotBlank(request.getModelId())) {
agentBuilder.model(request.getModelId());
} else {
agentBuilder.model(modelClient.getModel());
}
// Set system message
if (StringUtils.isNotBlank(request.getSystemMessage())) {
String systemMessage = request.getSystemMessage();
if (MapUtils.isNotEmpty(variables)) {
systemMessage = PromptUtils.applyVariables(systemMessage, variables);
}
agentBuilder.systemMessage(systemMessage);
}
LLMAgent agent = agentBuilder.build();
// Check response format
ResponseFormat responseFormat = request.getResponseFormat();
if (responseFormat != null && responseFormat.getType() == ResponseFormat.ResponseType.IMAGE) {
return agent.executeImageGeneration(text, variables);
}
// Check for images
if (CollectionUtils.isNotEmpty(request.getImages())) {
List<byte[]> imageDataList = new ArrayList<>();
for (String base64Image : request.getImages()) {
byte[] imageData = Base64.getDecoder().decode(base64Image);
imageDataList.add(imageData);
}
return agent.executeWithImages(text, imageDataList, variables);
}
// Regular text request
return agent.executeText(text, variables);
}
/**
* Get task status
*/
public Optional<AsyncTaskEntity> getTask(String taskId) {
return asyncTaskRepository.findByTaskId(taskId);
}
/**
* Get task result as AgentResponse
*/
public Optional<AgentResponse<?>> getTaskResult(String taskId) {
Optional<AsyncTaskEntity> taskOpt = asyncTaskRepository.findByTaskId(taskId);
if (taskOpt.isEmpty()) {
return Optional.empty();
}
AsyncTaskEntity task = taskOpt.get();
if (task.getStatus() != TaskStatus.COMPLETED) {
return Optional.empty();
}
try {
// Deserialize result
AgentResponse<?> response = JsonUtils.fromJson(task.getResult(), AgentResponse.class);
return Optional.of(response);
} catch (Exception e) {
log.error("Error deserializing task result: " + taskId, e);
return Optional.of(AgentResponse.text("Failed to deserialize result"));
}
}
/**
* Cancel task
*/
public boolean cancelTask(String taskId) {
Optional<AsyncTaskEntity> taskOpt = asyncTaskRepository.findByTaskId(taskId);
if (taskOpt.isEmpty()) {
return false;
}
AsyncTaskEntity task = taskOpt.get();
if (task.getStatus() == TaskStatus.PENDING || task.getStatus() == TaskStatus.RUNNING) {
task.setStatus(TaskStatus.CANCELLED);
task.setCompletedAt(System.currentTimeMillis());
asyncTaskRepository.save(task);
return true;
}
return false;
}
/**
* Wait for task completion (used for sync operations)
*/
private AgentResponse<?> waitForTaskCompletion(String taskId) {
int maxAttempts = 300; // 5 minutes with 1 second intervals
int attempts = 0;
while (attempts < maxAttempts) {
Optional<AsyncTaskEntity> taskOpt = asyncTaskRepository.findByTaskId(taskId);
if (taskOpt.isEmpty()) {
return AgentResponse.text("Task not found: " + taskId);
}
AsyncTaskEntity task = taskOpt.get();
if (task.getStatus() == TaskStatus.COMPLETED) {
return getTaskResult(taskId).orElse(AgentResponse.text("Failed to get result"));
}
if (task.getStatus() == TaskStatus.FAILED) {
return AgentResponse.text(task.getErrorMessage());
}
if (task.getStatus() == TaskStatus.CANCELLED) {
return AgentResponse.text("Task was cancelled");
}
try {
Thread.sleep(1000); // Wait 1 second
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return AgentResponse.text("Interrupted while waiting for task");
}
attempts++;
}
return AgentResponse.text("Task timeout after 5 minutes");
}
/**
* Update task status
*/
private void updateTaskStatus(String taskId, TaskStatus status) {
Optional<AsyncTaskEntity> taskOpt = asyncTaskRepository.findByTaskId(taskId);
if (taskOpt.isPresent()) {
AsyncTaskEntity task = taskOpt.get();
task.setStatus(status);
if (status == TaskStatus.RUNNING) {
task.setStartedAt(System.currentTimeMillis());
}
asyncTaskRepository.save(task);
}
}
/**
* Complete task with success
*/
private void completeTask(String taskId, AgentResponse<?> response) {
Optional<AsyncTaskEntity> taskOpt = asyncTaskRepository.findByTaskId(taskId);
if (taskOpt.isPresent()) {
AsyncTaskEntity task = taskOpt.get();
task.setStatus(TaskStatus.COMPLETED);
task.setCompletedAt(System.currentTimeMillis());
if (task.getStartedAt() != null) {
task.setExecutionTimeMs(task.getCompletedAt() - task.getStartedAt());
}
task.setResult(toJsonSafe(response));
asyncTaskRepository.save(task);
}
}
/**
* Fail task with error
*/
private void failTask(String taskId, Exception e) {
Optional<AsyncTaskEntity> taskOpt = asyncTaskRepository.findByTaskId(taskId);
if (taskOpt.isPresent()) {
AsyncTaskEntity task = taskOpt.get();
task.setStatus(TaskStatus.FAILED);
task.setCompletedAt(System.currentTimeMillis());
if (task.getStartedAt() != null) {
task.setExecutionTimeMs(task.getCompletedAt() - task.getStartedAt());
}
task.setErrorMessage(e.getMessage());
task.setErrorStackTrace(getStackTrace(e));
asyncTaskRepository.save(task);
}
}
/**
* Rate a task
*/
public Optional<AsyncTaskEntity> rateTask(String taskId, Integer grade, String comment) {
Optional<AsyncTaskEntity> taskOpt = asyncTaskRepository.findByTaskId(taskId);
if (taskOpt.isEmpty()) {
return Optional.empty();
}
AsyncTaskEntity task = taskOpt.get();
// Store rating in metadata
if (task.getMetadata() == null) {
task.setMetadata(new HashMap<>());
}
task.getMetadata().put("grade", grade != null ? grade.toString() : null);
task.getMetadata().put("gradeComment", comment);
task.getMetadata().put("ratedAt", String.valueOf(System.currentTimeMillis()));
asyncTaskRepository.save(task);
return Optional.of(task);
}
/**
* Get stack trace as string
*/
private String getStackTrace(Exception e) {
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
e.printStackTrace(pw);
return sw.toString();
}
/**
* Safe JSON conversion that catches JsonProcessingException
*/
private String toJsonSafe(Object obj) {
try {
return JsonUtils.toJson(obj);
} catch (Exception e) {
log.error("Error converting object to JSON", e);
return "{}";
}
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-controllers/0.8.1/ai/driftkit/workflow/controllers
|
java-sources/ai/driftkit/driftkit-workflow-controllers/0.8.1/ai/driftkit/workflow/controllers/service/WorkflowAnalyticsService.java
|
package ai.driftkit.workflow.controllers.service;
import ai.driftkit.common.domain.Language;
import ai.driftkit.common.domain.Prompt;
import ai.driftkit.context.core.service.PromptService;
import ai.driftkit.workflow.engine.spring.tracing.domain.AsyncTaskEntity;
import ai.driftkit.workflow.engine.spring.tracing.domain.ModelRequestTrace;
import ai.driftkit.workflow.engine.spring.tracing.repository.AsyncTaskRepository;
import ai.driftkit.workflow.engine.spring.tracing.repository.CoreModelRequestTraceRepository;
import ai.driftkit.workflow.engine.spring.dto.AnalyticsDtos.*;
import jakarta.annotation.PostConstruct;
import org.apache.commons.lang3.StringUtils;
import org.jetbrains.annotations.NotNull;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.data.domain.*;
import org.springframework.data.mongodb.core.MongoTemplate;
import org.springframework.data.mongodb.core.aggregation.Aggregation;
import org.springframework.data.mongodb.core.aggregation.AggregationResults;
import org.springframework.data.mongodb.core.query.Criteria;
import org.springframework.data.mongodb.core.query.Query;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Service;
import java.text.SimpleDateFormat;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.ZoneId;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
@Service
@ConditionalOnBean(MongoTemplate.class)
@ConditionalOnProperty(
prefix = "driftkit.workflow.tracing",
name = "enabled",
havingValue = "true",
matchIfMissing = true
)
public class WorkflowAnalyticsService {
private static final SimpleDateFormat DATE_FORMAT = new SimpleDateFormat("yyyy-MM-dd");
@Autowired
private CoreModelRequestTraceRepository modelRequestTraceRepository;
@Autowired(required = false)
private AsyncTaskRepository asyncTaskRepository;
@Autowired
private MongoTemplate mongoTemplate;
@Autowired
private PromptService promptService;
private Map<String, DailyMetricsResponse> dailyMetricsCache;
@PostConstruct
public void init() {
this.dailyMetricsCache = new ConcurrentHashMap<>();
}
@Scheduled(fixedRate = 8 * 60000)
public void dailyMetrics() {
getDailyMetrics(LocalDate.now().minusDays(1), LocalDate.now());
}
/**
* Get model request traces within a time range
* @param startTime Start of time range
* @param endTime End of time range
* @param promptId Optional prompt method filter
* @param excludePurpose Optional comma-separated list of purpose keywords to exclude
* @param page Page number
* @param size Page size
* @return Page of traces
*/
public Page<ModelRequestTrace> getTraces(LocalDateTime startTime, LocalDateTime endTime, String promptId, String excludePurpose, int page, int size) {
Pageable pageable = PageRequest.of(page, size, Sort.by("timestamp").descending());
// Default to today if not specified
Criteria criteria = getDatesCriteria(startTime, endTime);
// Add promptId filter if specified
if (StringUtils.isNotBlank(promptId)) {
promptId = getPromptIdByMethod(promptId);
criteria = criteria.and("promptId").is(promptId);
}
// Add purpose exclusion filter if specified
if (StringUtils.isNotBlank(excludePurpose)) {
List<String> purposesToExclude = Arrays.asList(excludePurpose.split(","));
// Create criteria that matches documents where either:
// 1. purpose field doesn't exist, OR
// 2. purpose field exists but doesn't match any of the excluded values
Criteria purposeCriteria = new Criteria().orOperator(
Criteria.where("purpose").exists(false),
Criteria.where("purpose").not().regex(String.join("|", purposesToExclude), "i")
);
criteria = criteria.andOperator(purposeCriteria);
}
List<ModelRequestTrace> traces = getTraces(criteria, pageable);
// Count total for pagination
long total = mongoTemplate.count(
Query.query(criteria),
ModelRequestTrace.class);
return new PageImpl<>(
traces, pageable, total);
}
@NotNull
private List<ModelRequestTrace> getTraces(Criteria criteria, Pageable pageable) {
Aggregation agg = pageable == null ? Aggregation.newAggregation(
Aggregation.match(criteria),
Aggregation.sort(Sort.by("timestamp").descending())
) : Aggregation.newAggregation(
Aggregation.match(criteria),
Aggregation.sort(Sort.by("timestamp").descending()),
Aggregation.skip((long) pageable.getPageNumber() * pageable.getPageSize()),
Aggregation.limit(pageable.getPageSize())
);
AggregationResults<ModelRequestTrace> results = mongoTemplate.aggregate(
agg, "model_request_traces", ModelRequestTrace.class);
List<ModelRequestTrace> traces = results.getMappedResults();
return traces;
}
@NotNull
private static Criteria getDatesCriteria(LocalDateTime startTime, LocalDateTime endTime) {
if (startTime == null) {
startTime = LocalDateTime.now();
}
if (endTime == null) {
endTime = LocalDateTime.now();
}
// Convert dates to timestamps
long startTimestamp = startTime.atZone(ZoneId.systemDefault()).toInstant().toEpochMilli() - TimeUnit.DAYS.toMillis(1);
long endTimestamp = endTime.plusDays(1).atZone(ZoneId.systemDefault()).toInstant().toEpochMilli() - 1;
// Use MongoDB Criteria to find traces within time range
Criteria criteria = Criteria.where("timestamp").gte(startTimestamp).lte(endTimestamp);
return criteria;
}
/**
* Get traces by context ID
*/
public List<ModelRequestTrace> getTracesByContextId(String contextId) {
List<ModelRequestTrace> traces = modelRequestTraceRepository.findByContextId(contextId);
traces.sort(Comparator.comparing(ModelRequestTrace::getTimestamp));
return traces;
}
/**
* Get available prompt methods for analytics
*/
public List<String> getAvailablePromptMethods() {
if (promptService == null) {
return new ArrayList<>();
}
List<Prompt> prompts = promptService.getPrompts();
return prompts.stream()
.map(Prompt::getMethod)
.distinct()
.sorted()
.collect(Collectors.toList());
}
/**
* Get message tasks by context IDs
*
* @param contextIds List of context IDs (same as messageIds)
* @return List of TaskVariables objects containing task data
*/
public List<TaskVariables> getMessageTasksByContextIds(List<String> contextIds) {
if (asyncTaskRepository == null) {
// If AsyncTaskRepository is not available, return empty list
return new ArrayList<>();
}
// Get tasks by task IDs (contextIds are taskIds)
List<AsyncTaskEntity> entities = asyncTaskRepository.findByTaskIdIn(contextIds);
return entities.stream()
.map(task -> TaskVariables.builder()
.messageId(task.getTaskId())
.contextId(task.getTaskId()) // contextId is taskId
.message(task.getRequestBody())
.result(task.getResult())
.modelId(task.getModelId())
.variables(task.getVariables())
.createdTime(task.getCreatedAt())
.responseTime(task.getExecutionTimeMs())
.promptIds(task.getPromptId() != null ? List.of(task.getPromptId()) : null)
.build())
.collect(Collectors.toList());
}
/**
* Get daily metrics for the dashboard based on MessageTask and promptIds.
*
* @param startDate Start date for the metrics range
* @param endDate End date for the metrics range
* @return DailyMetricsResponse object containing all metrics
*/
public DailyMetricsResponse getDailyMetrics(LocalDate startDate, LocalDate endDate) {
if (!promptService.isConfigured()) {
return new DailyMetricsResponse();
}
// Default to today if not specified
if (startDate == null) {
startDate = LocalDate.now();
}
if (endDate == null) {
endDate = LocalDate.now();
}
String dailyCacheKey = startDate + " " + endDate;
DailyMetricsResponse dailyMetricsResponse = dailyMetricsCache.get(dailyCacheKey);
if (dailyMetricsResponse != null && System.currentTimeMillis() - dailyMetricsResponse.getTimestamp() < 10 * 60 * 1000) {
return dailyMetricsResponse;
}
// Convert dates to timestamps
long startTimestamp = startDate.atStartOfDay().atZone(ZoneId.systemDefault()).toInstant().toEpochMilli();
long endTimestamp = endDate.plusDays(1).atStartOfDay().atZone(ZoneId.systemDefault()).toInstant().toEpochMilli() - 1;
// Query async tasks if repository is available
List<AsyncTaskEntity> asyncTasks = new ArrayList<>();
if (asyncTaskRepository != null) {
Criteria taskCriteria = Criteria.where("createdAt").gte(startTimestamp).lte(endTimestamp);
Query taskQuery = Query.query(taskCriteria);
asyncTasks = mongoTemplate.find(taskQuery, AsyncTaskEntity.class);
}
// Query model request traces for token usage data
// The contextId in trace equals messageId in MessageTask
Criteria traceCriteria = Criteria.where("timestamp").gte(startTimestamp).lte(endTimestamp);
Query traceQuery = Query.query(traceCriteria);
List<ModelRequestTrace> traces = mongoTemplate.find(traceQuery, ModelRequestTrace.class);
// Create a map of messageId to traces for easy lookup
Map<String, List<ModelRequestTrace>> tracesByMessageId = traces.stream()
.collect(Collectors.groupingBy(ModelRequestTrace::getContextId));
// Get all prompt IDs from tasks to load their corresponding Prompt records
List<String> allPromptIds = asyncTasks.stream()
.filter(t -> StringUtils.isNotBlank(t.getPromptId()))
.map(AsyncTaskEntity::getPromptId)
.distinct()
.collect(Collectors.toList());
// Load all Prompt objects needed for the method field
List<Prompt> prompts = promptService != null ? promptService.getPromptsByIds(allPromptIds) : new ArrayList<>();
// Create a map of promptId -> method for conversion
Map<String, String> promptIdToMethodMap = prompts.stream()
.collect(Collectors.toMap(Prompt::getId, Prompt::getMethod, (m1, m2) -> m1));
// Calculate overall metrics
Map<String, Object> metrics = new HashMap<>();
// --- Overall metrics ---
// Request counts
metrics.put("totalTasks", asyncTasks.size());
// Token usage from traces
int totalPromptTokens = traces.stream()
.filter(t -> t.getTrace() != null)
.mapToInt(t -> t.getTrace().getPromptTokens())
.sum();
int totalCompletionTokens = traces.stream()
.filter(t -> t.getTrace() != null)
.mapToInt(t -> t.getTrace().getCompletionTokens())
.sum();
metrics.put("totalPromptTokens", totalPromptTokens);
metrics.put("totalCompletionTokens", totalCompletionTokens);
// --- Latency metrics ---
// Overall latency calculated from tasks
List<Long> latencies = asyncTasks.stream()
.filter(t -> t.getExecutionTimeMs() != null && t.getExecutionTimeMs() > 0)
.map(AsyncTaskEntity::getExecutionTimeMs)
.sorted()
.collect(Collectors.toList());
metrics.put("latencyPercentiles", calculatePercentiles(latencies));
// --- Group metrics ---
// By Model
Map<String, Long> tasksByModel = asyncTasks.stream()
.filter(t -> StringUtils.isNotBlank(t.getModelId()))
.collect(Collectors.groupingBy(AsyncTaskEntity::getModelId, Collectors.counting()));
metrics.put("tasksByModel", tasksByModel);
// Extract all promptIds from tasks, map them to method names, and count occurrences
Map<String, Long> tasksByPromptMethod = new HashMap<>();
// Iterate through tasks to count by prompt method
asyncTasks.stream()
.filter(t -> StringUtils.isNotBlank(t.getPromptId()))
.forEach(task -> {
// Convert promptId to method name (if available)
String method = promptIdToMethodMap.getOrDefault(task.getPromptId(), task.getPromptId());
tasksByPromptMethod.merge(method, 1L, Long::sum);
});
metrics.put("tasksByPromptMethod", tasksByPromptMethod);
// --- Detailed metrics with group breakdowns ---
// 1. Token usage by prompt method
Map<String, Map<String, Integer>> tokensByPromptMethod = new HashMap<>();
tokensByPromptMethod.put("promptTokens", new HashMap<>());
tokensByPromptMethod.put("completionTokens", new HashMap<>());
// Process tasks and their associated traces to get token usage by prompt method
asyncTasks.forEach(task -> {
if (StringUtils.isNotBlank(task.getPromptId())) {
// Get traces associated with this task via messageId = contextId
List<ModelRequestTrace> taskTraces = tracesByMessageId.getOrDefault(task.getTaskId(), Collections.emptyList());
// Calculate total tokens for this task
int taskPromptTokens = taskTraces.stream()
.filter(t -> t.getTrace() != null)
.mapToInt(t -> t.getTrace().getPromptTokens())
.sum();
int taskCompletionTokens = taskTraces.stream()
.filter(t -> t.getTrace() != null)
.mapToInt(t -> t.getTrace().getCompletionTokens())
.sum();
// Add tokens for this task's prompt method
if (taskPromptTokens > 0 || taskCompletionTokens > 0) {
String method = promptIdToMethodMap.getOrDefault(task.getPromptId(), task.getPromptId());
// Add prompt tokens
if (taskPromptTokens > 0) {
tokensByPromptMethod.get("promptTokens").merge(method, taskPromptTokens, Integer::sum);
}
// Add completion tokens
if (taskCompletionTokens > 0) {
tokensByPromptMethod.get("completionTokens").merge(method, taskCompletionTokens, Integer::sum);
}
}
}
});
metrics.put("tokensByPromptMethod", tokensByPromptMethod);
// 2. Token usage by promptMethod+model
Map<String, Map<String, Integer>> tokensByPromptMethodModel = new HashMap<>();
tokensByPromptMethodModel.put("promptTokens", new HashMap<>());
tokensByPromptMethodModel.put("completionTokens", new HashMap<>());
// Process tasks for token usage by prompt method and model
asyncTasks.forEach(task -> {
if (StringUtils.isNotBlank(task.getPromptId()) &&
StringUtils.isNotBlank(task.getModelId())) {
// Get traces associated with this task
List<ModelRequestTrace> taskTraces = tracesByMessageId.getOrDefault(task.getTaskId(), Collections.emptyList());
// Calculate total tokens for this task
int taskPromptTokens = taskTraces.stream()
.filter(t -> t.getTrace() != null)
.mapToInt(t -> t.getTrace().getPromptTokens())
.sum();
int taskCompletionTokens = taskTraces.stream()
.filter(t -> t.getTrace() != null)
.mapToInt(t -> t.getTrace().getCompletionTokens())
.sum();
// Add tokens for prompt method + model combination
if (taskPromptTokens > 0 || taskCompletionTokens > 0) {
// Convert promptId to method name
String method = promptIdToMethodMap.getOrDefault(task.getPromptId(), task.getPromptId());
String statKey = method + ":" + task.getModelId();
// Add tokens
if (taskPromptTokens > 0) {
tokensByPromptMethodModel.get("promptTokens").merge(statKey, taskPromptTokens, Integer::sum);
}
if (taskCompletionTokens > 0) {
tokensByPromptMethodModel.get("completionTokens").merge(statKey, taskCompletionTokens, Integer::sum);
}
}
}
});
metrics.put("tokensByPromptMethodModel", tokensByPromptMethodModel);
// 3. Latency by prompt method
Map<String, Map<String, Long>> latencyByPromptMethod = new HashMap<>();
// Group tasks by prompt method
Map<String, List<AsyncTaskEntity>> tasksByPromptMethodGroup = new HashMap<>();
asyncTasks.forEach(task -> {
if (StringUtils.isNotBlank(task.getPromptId())) {
// Convert promptId to method
String method = promptIdToMethodMap.getOrDefault(task.getPromptId(), task.getPromptId());
// Add task to the method's list
if (!tasksByPromptMethodGroup.containsKey(method)) {
tasksByPromptMethodGroup.put(method, new ArrayList<>());
}
tasksByPromptMethodGroup.get(method).add(task);
}
});
// Calculate latency percentiles for each prompt method
tasksByPromptMethodGroup.forEach((method, methodTasks) -> {
List<Long> methodLatencies = methodTasks.stream()
.filter(t -> t.getExecutionTimeMs() != null && t.getExecutionTimeMs() > 0)
.map(AsyncTaskEntity::getExecutionTimeMs)
.sorted()
.collect(Collectors.toList());
Map<String, Long> percentiles = calculatePercentiles(methodLatencies);
if (!percentiles.isEmpty()) {
latencyByPromptMethod.put(method, percentiles);
}
});
metrics.put("latencyByPromptMethod", latencyByPromptMethod);
// 3.1. Success/Error counts by prompt method, grouped by contextId
Map<String, Long> successByPromptMethod = new HashMap<>();
Map<String, Long> errorsByPromptMethod = new HashMap<>();
// First group traces by contextId to count each context as a single unit
Map<String, List<ModelRequestTrace>> allTracesByContextId = traces.stream()
.filter(t -> StringUtils.isNotBlank(t.getContextId()))
.collect(Collectors.groupingBy(ModelRequestTrace::getContextId));
// Group context-based traces by prompt method
Map<String, Map<String, List<ModelRequestTrace>>> contextsByPromptMethod = new HashMap<>();
// For each context, create a mapping of prompt methods to traces
allTracesByContextId.forEach((contextId, contextTraces) -> {
contextTraces.forEach(trace -> {
if (trace.getPromptId() != null && trace.getTrace() != null) {
// Convert promptId to method
String method = promptIdToMethodMap.getOrDefault(trace.getPromptId(), trace.getPromptId());
// Initialize nested map if needed
if (!contextsByPromptMethod.containsKey(method)) {
contextsByPromptMethod.put(method, new HashMap<>());
}
// Add this trace to the mapping for this method and contextId
if (!contextsByPromptMethod.get(method).containsKey(contextId)) {
contextsByPromptMethod.get(method).put(contextId, new ArrayList<>());
}
contextsByPromptMethod.get(method).get(contextId).add(trace);
}
});
});
// Count success/error for each prompt method based on contexts
contextsByPromptMethod.forEach((method, contextMap) -> {
long methodSuccessCount = 0;
long methodErrorCount = 0;
// For each context, check if any trace has an error
for (List<ModelRequestTrace> contextTraces : contextMap.values()) {
boolean hasError = contextTraces.stream()
.anyMatch(t -> t.getTrace() != null && t.getTrace().isHasError());
if (hasError) {
methodErrorCount++;
} else {
methodSuccessCount++;
}
}
successByPromptMethod.put(method, methodSuccessCount);
errorsByPromptMethod.put(method, methodErrorCount);
});
// Calculate success rate for each prompt method
Map<String, Double> successRateByPromptMethod = new HashMap<>();
for (String method : contextsByPromptMethod.keySet()) {
long methodSuccess = successByPromptMethod.getOrDefault(method, 0L);
long methodError = errorsByPromptMethod.getOrDefault(method, 0L);
long total = methodSuccess + methodError;
double rate = total > 0 ? (double) methodSuccess / total : 0.0;
successRateByPromptMethod.put(method, rate);
}
// Add maps to metrics
metrics.put("successByPromptMethod", successByPromptMethod);
metrics.put("errorsByPromptMethod", errorsByPromptMethod);
metrics.put("successRateByPromptMethod", successRateByPromptMethod);
// Calculate overall success/error metrics based on contexts
long successCount = 0;
long errorCount = 0;
for (List<ModelRequestTrace> contextTraces : allTracesByContextId.values()) {
boolean hasError = contextTraces.stream()
.anyMatch(t -> t.getTrace() != null && t.getTrace().isHasError());
if (hasError) {
errorCount++;
} else {
successCount++;
}
}
metrics.put("successCount", successCount);
metrics.put("errorCount", errorCount);
// Add success rate for better metrics
double successRate = (successCount + errorCount) > 0 ?
(double) successCount / (successCount + errorCount) : 0;
metrics.put("successRate", successRate);
// 4. Latency by promptMethod+model
Map<String, Map<String, Long>> latencyByPromptMethodModel = new HashMap<>();
Map<String, List<AsyncTaskEntity>> tasksByPromptMethodModelGroup = new HashMap<>();
// Process tasks to get latency by prompt method and model
asyncTasks.forEach(task -> {
if (StringUtils.isNotBlank(task.getPromptId()) &&
StringUtils.isNotBlank(task.getModelId()) &&
task.getExecutionTimeMs() != null && task.getExecutionTimeMs() > 0) {
// Convert promptId to method
String method = promptIdToMethodMap.getOrDefault(task.getPromptId(), task.getPromptId());
String statKey = method + ":" + task.getModelId();
// Add task to the method+model combination
if (!tasksByPromptMethodModelGroup.containsKey(statKey)) {
tasksByPromptMethodModelGroup.put(statKey, new ArrayList<>());
}
tasksByPromptMethodModelGroup.get(statKey).add(task);
}
});
// Calculate latency percentiles for each prompt method+model combination
tasksByPromptMethodModelGroup.forEach((statKey, methodModelTasks) -> {
List<Long> methodModelLatencies = methodModelTasks.stream()
.filter(t -> t.getExecutionTimeMs() != null && t.getExecutionTimeMs() > 0)
.map(AsyncTaskEntity::getExecutionTimeMs)
.sorted()
.collect(Collectors.toList());
Map<String, Long> percentiles = calculatePercentiles(methodModelLatencies);
if (!percentiles.isEmpty()) {
latencyByPromptMethodModel.put(statKey, percentiles);
}
});
// Create TokensByCategory objects
TokensByCategory tokensByCategoryPromptMethod = TokensByCategory.builder()
.promptTokens(tokensByPromptMethod.get("promptTokens"))
.completionTokens(tokensByPromptMethod.get("completionTokens"))
.build();
TokensByCategory tokensByCategoryPromptMethodModel = TokensByCategory.builder()
.promptTokens(tokensByPromptMethodModel.get("promptTokens"))
.completionTokens(tokensByPromptMethodModel.get("completionTokens"))
.build();
// Convert latency percentiles maps to proper objects
Map<String, LatencyPercentiles> latencyPercentilesByPromptMethod = new HashMap<>();
latencyByPromptMethod.forEach((method, percentileMap) -> {
latencyPercentilesByPromptMethod.put(method, LatencyPercentiles.fromMap(percentileMap));
});
Map<String, LatencyPercentiles> latencyPercentilesByPromptMethodModel = new HashMap<>();
latencyByPromptMethodModel.forEach((method, percentileMap) -> {
latencyPercentilesByPromptMethodModel.put(method, LatencyPercentiles.fromMap(percentileMap));
});
// Build and return a properly structured object
DailyMetricsResponse response = DailyMetricsResponse.builder()
.totalTasks(asyncTasks.size())
.totalPromptTokens(totalPromptTokens)
.totalCompletionTokens(totalCompletionTokens)
.latencyPercentiles(LatencyPercentiles.fromMap(calculatePercentiles(latencies)))
.tasksByModel(tasksByModel)
.tasksByPromptMethod(tasksByPromptMethod)
.tokensByPromptMethod(tokensByCategoryPromptMethod)
.tokensByPromptMethodModel(tokensByCategoryPromptMethodModel)
.latencyByPromptMethod(latencyPercentilesByPromptMethod)
.successByPromptMethod(successByPromptMethod)
.errorsByPromptMethod(errorsByPromptMethod)
.successRateByPromptMethod(successRateByPromptMethod)
.successCount(successCount)
.errorCount(errorCount)
.successRate(successRate)
.latencyByPromptMethodModel(latencyPercentilesByPromptMethodModel)
.timestamp(System.currentTimeMillis())
.build();
this.dailyMetricsCache.put(dailyCacheKey, response);
return response;
}
/**
* Get metrics for a specific prompt method
* @param startTime Start of time range
* @param endTime End of time range
* @param promptId The prompt method to get metrics for
* @return PromptMetricsResponse containing all metrics
*/
public PromptMetricsResponse getPromptMetrics(LocalDateTime startTime, LocalDateTime endTime, String promptId) {
// Default to the last 24 hours if not specified
Criteria criteria = getDatesCriteria(startTime, endTime);
// Only add promptId filter if specified
if (StringUtils.isNotBlank(promptId)) {
promptId = getPromptIdByMethod(promptId);
criteria = criteria.and("promptId").is(promptId);
}
List<ModelRequestTrace> traces = getTraces(criteria, null);
// Basic counts
long totalTraces = traces.size();
// Token usage
int totalPromptTokens = traces.stream()
.filter(t -> t.getTrace() != null)
.mapToInt(t -> t.getTrace().getPromptTokens())
.sum();
int totalCompletionTokens = traces.stream()
.filter(t -> t.getTrace() != null)
.mapToInt(t -> t.getTrace().getCompletionTokens())
.sum();
// Latency metrics
List<Long> latencies = traces.stream()
.filter(t -> t.getTrace() != null)
.map(t -> t.getTrace().getExecutionTimeMs())
.sorted()
.collect(Collectors.toList());
// By Model breakdown
Map<String, Long> tracesByModel = traces.stream()
.filter(t -> StringUtils.isNotBlank(t.getModelId()))
.collect(Collectors.groupingBy(ModelRequestTrace::getModelId, Collectors.counting()));
// Token usage by model
Map<String, Integer> promptTokensByModel = new HashMap<>();
Map<String, Integer> completionTokensByModel = new HashMap<>();
traces.stream()
.filter(t -> t.getModelId() != null && !t.getModelId().isEmpty() && t.getTrace() != null)
.forEach(t -> {
String model = t.getModelId();
// Add prompt tokens
promptTokensByModel.merge(model, t.getTrace().getPromptTokens(), Integer::sum);
// Add completion tokens
completionTokensByModel.merge(model, t.getTrace().getCompletionTokens(), Integer::sum);
});
// Create TokensByCategory for models
TokensByCategory tokensByModel = TokensByCategory.builder()
.promptTokens(promptTokensByModel)
.completionTokens(completionTokensByModel)
.build();
// Latency by model
Map<String, LatencyPercentiles> latencyPercentilesByModel = new HashMap<>();
// Group traces by model
Map<String, List<ModelRequestTrace>> tracesByModelGroup = traces.stream()
.filter(t -> t.getModelId() != null && !t.getModelId().isEmpty() && t.getTrace() != null)
.collect(Collectors.groupingBy(ModelRequestTrace::getModelId));
// Calculate percentiles for each model
tracesByModelGroup.forEach((model, modelTraces) -> {
List<Long> modelLatencies = modelTraces.stream()
.map(t -> t.getTrace().getExecutionTimeMs())
.sorted()
.collect(Collectors.toList());
Map<String, Long> percentiles = calculatePercentiles(modelLatencies);
if (!percentiles.isEmpty()) {
latencyPercentilesByModel.put(model, LatencyPercentiles.fromMap(percentiles));
}
});
// Group traces by contextId
Map<String, List<ModelRequestTrace>> tracesByContextId = traces.stream()
.filter(t -> StringUtils.isNotBlank(t.getContextId()))
.collect(Collectors.groupingBy(ModelRequestTrace::getContextId));
// Total unique contexts
long uniqueContexts = tracesByContextId.size();
// Count contexts with errors vs. successful contexts
long errorCount = 0;
long successCount = 0;
for (List<ModelRequestTrace> contextTraces : tracesByContextId.values()) {
// Check if any trace in this context has an error
boolean hasError = contextTraces.stream()
.anyMatch(t -> t.getTrace() != null && t.getTrace().isHasError());
if (hasError) {
errorCount++;
} else {
successCount++;
}
}
// Success rate calculation
double successRate = uniqueContexts == 0 ? 0 : (double) successCount / uniqueContexts;
// Daily counts (useful for graph visualization)
Map<String, Long> dailyCounts = traces.stream()
.collect(Collectors.groupingBy(
t -> DATE_FORMAT.format(new Date(t.getTimestamp())),
Collectors.counting()));
// Build and return the response object
return PromptMetricsResponse.builder()
.totalTraces(uniqueContexts)
.totalPromptTokens(totalPromptTokens)
.totalCompletionTokens(totalCompletionTokens)
.totalTokens(totalPromptTokens + totalCompletionTokens)
.latencyPercentiles(LatencyPercentiles.fromMap(calculatePercentiles(latencies)))
.tracesByModel(tracesByModel)
.tokensByModel(tokensByModel)
.latencyByModel(latencyPercentilesByModel)
.successCount(successCount)
.errorCount(errorCount)
.successRate(successRate)
.dailyCounts(dailyCounts)
.build();
}
private String getPromptIdByMethod(String promptId) {
if (promptService == null) {
return promptId;
}
Optional<Prompt> prompt = promptService.getCurrentPrompt(promptId, Language.GENERAL);
promptId = prompt.map(Prompt::getId).orElse(promptId);
return promptId;
}
/**
* Calculate percentiles for a list of values
*/
private Map<String, Long> calculatePercentiles(List<Long> values) {
Map<String, Long> percentiles = new HashMap<>();
if (values.isEmpty()) {
return percentiles;
}
int size = values.size();
percentiles.put("p25", values.get(Math.max(0, (int)(size * 0.25) - 1)));
percentiles.put("p50", values.get(Math.max(0, (int)(size * 0.50) - 1)));
percentiles.put("p75", values.get(Math.max(0, (int)(size * 0.75) - 1)));
percentiles.put("p90", values.get(Math.max(0, (int)(size * 0.90) - 1)));
return percentiles;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-agents/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-agents/0.8.1/ai/driftkit/workflow/engine/agent/Agent.java
|
package ai.driftkit.workflow.engine.agent;
import java.util.List;
import java.util.Map;
/**
* Base interface for all agents in the simplified DriftKit agent system.
* Agents are simplified wrappers around complex DriftKit workflows that provide
* easy-to-use interfaces for common AI operations.
*/
public interface Agent {
/**
* Execute the agent with a simple text input.
*
* @param input The text input to process
* @return The agent's response as a string
*/
String execute(String input);
/**
* Execute the agent with text and image input.
*
* @param text The text input to process
* @param imageData Raw image data as byte array
* @return The agent's response as a string
*/
String execute(String text, byte[] imageData);
/**
* Execute the agent with text and multiple images.
*
* @param text The text input to process
* @param imageDataList List of raw image data as byte arrays
* @return The agent's response as a string
*/
String execute(String text, List<byte[]> imageDataList);
/**
* Execute the agent with input and context variables.
*
* @param input The text input to process
* @param variables Context variables for template processing
* @return The agent's response as a string
*/
String execute(String input, Map<String, Object> variables);
/**
* Get the agent's name/identifier.
*
* @return The agent's name
*/
String getName();
/**
* Get the agent's description.
*
* @return The agent's description
*/
String getDescription();
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-agents/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-agents/0.8.1/ai/driftkit/workflow/engine/agent/AgentResponse.java
|
package ai.driftkit.workflow.engine.agent;
import ai.driftkit.common.domain.client.ModelImageResponse.ModelContentMessage.ModelContentElement;
import ai.driftkit.common.tools.ToolCall;
import lombok.Builder;
import lombok.Data;
import org.apache.commons.collections4.CollectionUtils;
import java.util.List;
/**
* Unified response wrapper for LLMAgent operations.
* Supports text, images, tool calls, and structured data.
*/
@Data
@Builder
public class AgentResponse<T> {
// Response content
private final String text;
private final List<ModelContentElement.ImageData> images;
private final T structuredData;
private final List<ToolCall> toolCalls;
private final List<ToolExecutionResult> toolResults;
// Response type
private final ResponseType type;
public enum ResponseType {
TEXT,
IMAGES,
STRUCTURED_DATA,
TOOL_CALLS,
TOOL_RESULTS,
MULTIMODAL
}
// Convenience methods
public boolean hasText() {
return text != null;
}
public boolean hasImages() {
return CollectionUtils.isNotEmpty(images);
}
public boolean hasStructuredData() {
return structuredData != null;
}
public boolean hasToolCalls() {
return CollectionUtils.isNotEmpty(toolCalls);
}
public boolean hasToolResults() {
return CollectionUtils.isNotEmpty(toolResults);
}
// Factory methods
public static AgentResponse<String> text(String text) {
return AgentResponse.<String>builder()
.text(text)
.type(ResponseType.TEXT)
.build();
}
public static AgentResponse<ModelContentElement.ImageData> image(ModelContentElement.ImageData image) {
return AgentResponse.<ModelContentElement.ImageData>builder()
.images(List.of(image))
.type(ResponseType.IMAGES)
.build();
}
public static AgentResponse<List<ModelContentElement.ImageData>> images(List<ModelContentElement.ImageData> images) {
return AgentResponse.<List<ModelContentElement.ImageData>>builder()
.images(images)
.type(ResponseType.IMAGES)
.build();
}
public static <T> AgentResponse<T> structured(T data) {
return AgentResponse.<T>builder()
.structuredData(data)
.type(ResponseType.STRUCTURED_DATA)
.build();
}
public static AgentResponse<List<ToolCall>> toolCalls(List<ToolCall> toolCalls) {
return AgentResponse.<List<ToolCall>>builder()
.toolCalls(toolCalls)
.type(ResponseType.TOOL_CALLS)
.build();
}
public static AgentResponse<List<ToolExecutionResult>> toolResults(List<ToolExecutionResult> results) {
return AgentResponse.<List<ToolExecutionResult>>builder()
.toolResults(results)
.type(ResponseType.TOOL_RESULTS)
.build();
}
public static AgentResponse<String> multimodal(String text, List<ModelContentElement.ImageData> images) {
return AgentResponse.<String>builder()
.text(text)
.images(images)
.type(ResponseType.MULTIMODAL)
.build();
}
// Legacy support methods - to maintain backward compatibility
public static <T> AgentResponse<T> success(T data) {
if (data instanceof String) {
return (AgentResponse<T>) text((String) data);
}
return structured(data);
}
public static AgentResponse<String> error(String errorMessage) {
return text("Error: " + errorMessage);
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-agents/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-agents/0.8.1/ai/driftkit/workflow/engine/agent/EvaluationResult.java
|
package ai.driftkit.workflow.engine.agent;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
/**
* POJO for evaluation result from evaluator agent.
* This ensures type-safe parsing of evaluation responses.
*/
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
public class EvaluationResult {
/**
* The status of the evaluation.
*/
private LoopStatus status;
/**
* Optional feedback message for revision.
*/
private String feedback;
/**
* Optional reason for the evaluation result.
*/
private String reason;
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-agents/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-agents/0.8.1/ai/driftkit/workflow/engine/agent/LLMAgent.java
|
package ai.driftkit.workflow.engine.agent;
import ai.driftkit.common.domain.Language;
import ai.driftkit.common.domain.chat.ChatMessage;
import ai.driftkit.common.domain.chat.ChatMessage.MessageType;
import ai.driftkit.common.domain.client.*;
import ai.driftkit.common.domain.client.ModelImageRequest;
import ai.driftkit.common.domain.client.ModelImageResponse;
import ai.driftkit.common.domain.client.ModelImageResponse.ModelContentMessage;
import ai.driftkit.common.domain.client.ModelImageResponse.ModelContentMessage.ModelContentElement;
import ai.driftkit.common.domain.client.ModelImageResponse.ModelMessage;
import ai.driftkit.common.domain.client.ModelTextRequest;
import ai.driftkit.common.domain.client.ModelTextResponse.ResponseMessage;
import ai.driftkit.common.domain.streaming.StreamingResponse;
import ai.driftkit.common.domain.streaming.StreamingCallback;
import java.util.concurrent.CompletableFuture;
import ai.driftkit.common.service.ChatStore;
import ai.driftkit.common.tools.ToolCall;
import ai.driftkit.common.tools.ToolInfo;
import ai.driftkit.common.tools.ToolRegistry;
import ai.driftkit.common.utils.JsonUtils;
import ai.driftkit.common.utils.AIUtils;
import ai.driftkit.common.domain.Prompt;
import ai.driftkit.context.core.registry.PromptServiceRegistry;
import ai.driftkit.context.core.service.PromptService;
import ai.driftkit.context.core.util.PromptUtils;
import lombok.Builder;
import lombok.Data;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import java.util.*;
import java.util.stream.Collectors;
/**
* A simplified LLM agent that wraps the complex DriftKit ModelClient interface
* with an easy-to-use builder pattern and unified execute() methods.
*
* Features:
* - Unified execution interface with typed responses
* - Tool/function calling with automatic execution
* - Structured output extraction
* - Multi-modal support (text + images)
* - Conversation memory management
* - Prompt template support
*/
@Slf4j
@Data
public class LLMAgent implements Agent {
private final ModelClient modelClient;
private final String name;
private final String description;
private final String systemMessage;
private final Double temperature;
private final Integer maxTokens;
private final String model;
private final String imageModel;
// Unique agent identifier
private final String agentId;
// Core components
private final ChatStore chatStore;
private final String chatId;
private final PromptService promptService;
private final ToolRegistry toolRegistry;
// Tracing support
private final RequestTracingProvider tracingProvider;
// Workflow context fields for tracing
private final String workflowId;
private final String workflowType;
private final String workflowStep;
// Enable automatic tool execution
private final boolean autoExecuteTools;
// Default temperature for structured extraction
private static final double STRUCTURED_EXTRACTION_TEMPERATURE = 0.1;
// Constructor
protected LLMAgent(ModelClient modelClient, String name, String description, String systemMessage,
Double temperature, Integer maxTokens, String model, String imageModel, String agentId,
ChatStore chatStore, String chatId, PromptService promptService, ToolRegistry toolRegistry,
RequestTracingProvider tracingProvider, String workflowId, String workflowType,
String workflowStep, boolean autoExecuteTools) {
this.modelClient = modelClient;
this.name = name;
this.description = description;
this.systemMessage = systemMessage;
this.temperature = temperature;
this.maxTokens = maxTokens;
this.model = model;
this.imageModel = imageModel;
this.agentId = agentId != null ? agentId : AIUtils.generateId();
this.chatStore = chatStore;
this.chatId = chatId != null ? chatId : agentId; // Use agentId as chatId if not provided
this.promptService = promptService;
this.toolRegistry = toolRegistry;
this.tracingProvider = tracingProvider;
this.workflowId = workflowId;
this.workflowType = workflowType;
this.workflowStep = workflowStep;
this.autoExecuteTools = autoExecuteTools;
}
/**
* Create a new LLMAgent builder.
*
* @return A new LLMAgent builder
*/
public static CustomLLMAgentBuilder builder() {
return new CustomLLMAgentBuilder();
}
/**
* Execute with simple text input
*/
public AgentResponse<String> executeText(String message) {
return executeText(message, Collections.emptyMap());
}
/**
* Execute with text and context variables
*/
public AgentResponse<String> executeText(String message, Map<String, Object> variables) {
try {
// Process message with variables
String processedMessage = processMessageWithVariables(message, variables);
// Add user message to memory
addUserMessage(processedMessage);
// Build and execute request
ModelTextRequest request = buildChatRequest();
ModelTextResponse response = modelClient.textToText(request);
// Trace if provider is available
RequestTracingProvider provider = getTracingProvider();
if (provider != null) {
String contextType = buildContextType("TEXT");
RequestTracingProvider.RequestContext traceContext = RequestTracingProvider.RequestContext.builder()
.contextId(agentId)
.contextType(contextType)
.variables(variables)
.chatId(chatId)
.workflowId(workflowId)
.workflowType(workflowType)
.workflowStep(workflowStep)
.build();
provider.traceTextRequest(request, response, traceContext);
}
// Extract response
return extractResponse(response);
} catch (Exception e) {
log.error("Error in executeText", e);
throw new RuntimeException("Failed to execute text", e);
}
}
/**
* Execute with tools - returns tool calls for manual execution
*/
public AgentResponse<List<ToolCall>> executeForToolCalls(String message) {
return executeForToolCalls(message, Collections.emptyMap());
}
/**
* Execute with tools - returns tool calls for manual execution with variables
*/
public AgentResponse<List<ToolCall>> executeForToolCalls(String message, Map<String, Object> variables) {
try {
// Process message with variables
String processedMessage = processMessageWithVariables(message, variables);
// Add user message to memory
addUserMessage(processedMessage);
// Build and execute request with tools
ModelTextRequest request = buildChatRequestWithTools();
ModelTextResponse response = modelClient.textToText(request);
// Trace if provider is available
RequestTracingProvider provider = getTracingProvider();
if (provider != null) {
String contextType = buildContextType("TOOL_CALLS");
RequestTracingProvider.RequestContext traceContext = RequestTracingProvider.RequestContext.builder()
.contextId(agentId)
.contextType(contextType)
.variables(variables)
.chatId(chatId)
.workflowId(workflowId)
.workflowType(workflowType)
.workflowStep(workflowStep)
.build();
provider.traceTextRequest(request, response, traceContext);
}
// Extract tool calls
List<ToolCall> toolCalls = extractToolCalls(response);
return AgentResponse.toolCalls(toolCalls);
} catch (Exception e) {
log.error("Error getting tool calls", e);
throw new RuntimeException("Failed to get tool calls", e);
}
}
/**
* Execute with tools and automatic execution - returns typed results
*/
public AgentResponse<List<ToolExecutionResult>> executeWithTools(String message) {
return executeWithTools(message, Collections.emptyMap());
}
/**
* Execute with tools and automatic execution - returns typed results with variables
*/
public AgentResponse<List<ToolExecutionResult>> executeWithTools(String message, Map<String, Object> variables) {
try {
// Process message with variables
String processedMessage = processMessageWithVariables(message, variables);
// Add user message to memory
addUserMessage(processedMessage);
// Build and execute request with tools
ModelTextRequest request = buildChatRequestWithTools();
ModelTextResponse response = modelClient.textToText(request);
// Trace if provider is available
RequestTracingProvider provider = getTracingProvider();
if (provider != null) {
String contextType = buildContextType("TOOLS_EXEC");
RequestTracingProvider.RequestContext traceContext = RequestTracingProvider.RequestContext.builder()
.contextId(agentId)
.contextType(contextType)
.variables(variables)
.chatId(chatId)
.workflowId(workflowId)
.workflowType(workflowType)
.workflowStep(workflowStep)
.build();
provider.traceTextRequest(request, response, traceContext);
}
// Check for tool calls
if (hasToolCalls(response)) {
// Execute tools and get typed results
List<ToolExecutionResult> results = executeToolsAndGetResults(response);
// Get final response from model
ModelTextRequest followUpRequest = buildChatRequest();
ModelTextResponse finalResponse = modelClient.textToText(followUpRequest);
// Trace follow-up request if provider is available
if (provider != null) {
String contextType = buildContextType("TOOLS_FOLLOWUP");
RequestTracingProvider.RequestContext traceContext = RequestTracingProvider.RequestContext.builder()
.contextId(agentId)
.contextType(contextType)
.variables(variables)
.build();
provider.traceTextRequest(followUpRequest, finalResponse, traceContext);
}
// Add final response to memory
String finalText = extractResponseText(finalResponse);
addAssistantMessage(finalText);
return AgentResponse.toolResults(results);
}
// No tool calls, return regular response
return AgentResponse.toolResults(Collections.emptyList());
} catch (Exception e) {
log.error("Error in executeWithTools", e);
throw new RuntimeException("Failed to execute with tools", e);
}
}
/**
* Execute with structured output extraction
*/
public <T> AgentResponse<T> executeStructured(String userMessage, Class<T> targetClass) {
try {
// Create response format for structured output
ResponseFormat responseFormat = ResponseFormat.jsonSchema(targetClass);
// Build messages
List<ModelContentMessage> messages = buildBaseMessages();
messages.add(ModelContentMessage.create(Role.user, userMessage));
// Build request with structured output
ModelTextRequest request = ModelTextRequest.builder()
.model(getEffectiveModel())
.temperature(getTemperature(null))
.messages(messages)
.responseFormat(responseFormat)
.build();
// Execute request
ModelTextResponse response = modelClient.textToText(request);
// Trace if provider is available
RequestTracingProvider provider = getTracingProvider();
if (provider != null) {
String contextType = buildContextType("STRUCTURED");
RequestTracingProvider.RequestContext traceContext = RequestTracingProvider.RequestContext.builder()
.contextId(agentId)
.contextType(contextType)
.chatId(chatId)
.workflowId(workflowId)
.workflowType(workflowType)
.workflowStep(workflowStep)
.build();
provider.traceTextRequest(request, response, traceContext);
}
// Extract and parse response
String jsonResponse = extractResponseText(response);
T result = JsonUtils.fromJson(jsonResponse, targetClass);
return AgentResponse.structured(result);
} catch (Exception e) {
log.error("Error extracting structured data", e);
throw new RuntimeException("Failed to extract structured data", e);
}
}
/**
* Execute structured extraction using prompt template by ID
*/
public <T> AgentResponse<T> executeStructuredWithPrompt(String promptId, Map<String, Object> variables, Class<T> targetClass) {
return executeStructuredWithPrompt(promptId, variables, targetClass, Language.GENERAL);
}
/**
* Execute structured extraction using prompt template by ID with language
*/
public <T> AgentResponse<T> executeStructuredWithPrompt(String promptId, Map<String, Object> variables,
Class<T> targetClass, Language language) {
try {
// Use injected PromptService or fall back to registry
PromptService effectivePromptService = promptService != null ?
promptService : PromptServiceRegistry.getInstance();
if (effectivePromptService == null) {
throw new IllegalStateException("PromptService not configured. " +
"Please ensure PromptService is available in your application context " +
"or register one via PromptServiceRegistry.register()");
}
// Get prompt by ID
List<Prompt> promptOpt = effectivePromptService.getPromptsByMethods(List.of(promptId));
if (CollectionUtils.isEmpty(promptOpt)) {
throw new IllegalArgumentException("Prompt not found: " + promptId);
}
Prompt prompt = promptOpt.getFirst();
// Apply variables to prompt
String processedMessage = PromptUtils.applyVariables(prompt.getMessage(), variables);
// Create response format for structured output
ResponseFormat responseFormat = ResponseFormat.jsonSchema(targetClass);
// Build messages with prompt system message if available
List<ModelContentMessage> messages = new ArrayList<>();
String promptSystemMessage = prompt.getSystemMessage();
if (StringUtils.isNotBlank(promptSystemMessage)) {
promptSystemMessage = PromptUtils.applyVariables(promptSystemMessage, variables);
messages.add(ModelContentMessage.create(Role.system, promptSystemMessage));
} else if (StringUtils.isNotBlank(systemMessage)) {
messages.add(ModelContentMessage.create(Role.system, systemMessage));
}
// Add conversation history if needed
messages.addAll(convertMemoryToMessages());
// Add user message
messages.add(ModelContentMessage.create(Role.user, processedMessage));
// Build request
ModelTextRequest request = ModelTextRequest.builder()
.model(getEffectiveModel())
.temperature(getTemperature(prompt))
.messages(messages)
.responseFormat(responseFormat)
.build();
// Execute request
ModelTextResponse response = modelClient.textToText(request);
// Trace if provider is available
RequestTracingProvider provider = getTracingProvider();
if (provider != null) {
String contextType = buildContextType("STRUCTURED_PROMPT");
RequestTracingProvider.RequestContext traceContext = RequestTracingProvider.RequestContext.builder()
.contextId(agentId)
.contextType(contextType)
.promptId(promptId)
.variables(variables)
.chatId(chatId)
.workflowId(workflowId)
.workflowType(workflowType)
.workflowStep(workflowStep)
.build();
provider.traceTextRequest(request, response, traceContext);
}
// Extract and parse response
String jsonResponse = extractResponseText(response);
// Add logging to see the actual JSON response
log.info("Raw JSON response from AI: {}", jsonResponse);
T result = JsonUtils.fromJson(jsonResponse, targetClass);
return AgentResponse.structured(result);
} catch (Exception e) {
log.error("Error in executeStructuredWithPrompt", e);
throw new RuntimeException("Failed to execute structured with prompt", e);
}
}
private double getTemperature(Prompt prompt) {
if (temperature != null) {
return temperature;
}
if (prompt != null && prompt.getTemperature() != null) {
return prompt.getTemperature();
}
return modelClient.getTemperature();
}
/**
* Execute using prompt template by ID
*/
public AgentResponse<String> executeWithPrompt(String promptId, Map<String, Object> variables) {
return executeWithPrompt(promptId, variables, Language.GENERAL);
}
/**
* Execute using prompt template by ID with language
*/
public AgentResponse<String> executeWithPrompt(String promptId, Map<String, Object> variables, Language language) {
try {
// Use injected PromptService or fall back to registry
PromptService effectivePromptService = promptService != null ?
promptService : PromptServiceRegistry.getInstance();
if (effectivePromptService == null) {
throw new IllegalStateException("PromptService not configured. " +
"Please ensure PromptService is available in your application context " +
"or register one via PromptServiceRegistry.register()");
}
// Get prompt by ID
List<Prompt> promptOpt = effectivePromptService.getPromptsByMethods(List.of(promptId));
if (CollectionUtils.isEmpty(promptOpt)) {
throw new IllegalArgumentException("Prompt not found: " + promptId);
}
Prompt prompt = promptOpt.getFirst();
// Apply variables to prompt
String processedMessage = PromptUtils.applyVariables(prompt.getMessage(), variables);
// Use system message from prompt if available
String promptSystemMessage = prompt.getSystemMessage();
if (StringUtils.isNotBlank(promptSystemMessage)) {
promptSystemMessage = PromptUtils.applyVariables(promptSystemMessage, variables);
}
// Add messages to memory
addUserMessage(processedMessage);
// Build messages with prompt system message
List<ModelContentMessage> messages = new ArrayList<>();
if (StringUtils.isNotBlank(promptSystemMessage)) {
messages.add(ModelContentMessage.create(Role.system, promptSystemMessage));
} else if (StringUtils.isNotBlank(systemMessage)) {
messages.add(ModelContentMessage.create(Role.system, systemMessage));
}
// Add conversation history
messages.addAll(convertMemoryToMessages());
// Build request
ModelTextRequest request = ModelTextRequest.builder()
.model(getEffectiveModel())
.temperature(getTemperature(prompt))
.messages(messages)
.build();
// Execute request
ModelTextResponse response = modelClient.textToText(request);
// Trace if provider is available
RequestTracingProvider provider = getTracingProvider();
if (provider != null) {
String contextType = buildContextType("PROMPT");
RequestTracingProvider.RequestContext traceContext = RequestTracingProvider.RequestContext.builder()
.contextId(agentId)
.contextType(contextType)
.promptId(promptId)
.variables(variables)
.chatId(chatId)
.workflowId(workflowId)
.workflowType(workflowType)
.workflowStep(workflowStep)
.build();
provider.traceTextRequest(request, response, traceContext);
}
// Extract response
return extractResponse(response);
} catch (Exception e) {
log.error("Error in executeWithPrompt", e);
throw new RuntimeException("Failed to execute with prompt", e);
}
}
/**
* Execute image generation using the agent's imageModel field
*/
public AgentResponse<ModelContentElement.ImageData> executeImageGeneration(String prompt) {
return executeImageGeneration(prompt, null);
}
/**
* Execute image generation with variables
*/
public AgentResponse<ModelContentElement.ImageData> executeImageGeneration(String prompt, Map<String, Object> variables) {
try {
// Process prompt with variables if provided
String processedPrompt = prompt;
if (variables != null && !variables.isEmpty()) {
processedPrompt = processMessageWithVariables(prompt, variables);
}
// Build image request using agent's imageModel field
ModelImageRequest request = ModelImageRequest.builder()
.prompt(processedPrompt)
.model(imageModel) // Use the agent's imageModel field!
.build();
// Execute request
ModelImageResponse response = modelClient.textToImage(request);
// Trace if provider is available
RequestTracingProvider provider = getTracingProvider();
if (provider != null) {
String contextType = buildContextType("IMAGE_GEN");
RequestTracingProvider.RequestContext traceContext = RequestTracingProvider.RequestContext.builder()
.contextId(agentId)
.contextType(contextType)
.variables(variables)
.chatId(chatId)
.workflowId(workflowId)
.workflowType(workflowType)
.workflowStep(workflowStep)
.build();
provider.traceImageRequest(request, response, traceContext);
}
// Extract first image
if (response != null && response.getBytes() != null && !response.getBytes().isEmpty()) {
ModelContentElement.ImageData imageData = response.getBytes().get(0);
return AgentResponse.image(imageData);
}
throw new RuntimeException("No image generated");
} catch (Exception e) {
log.error("Error generating image", e);
throw new RuntimeException("Failed to generate image", e);
}
}
/**
* Execute with images
*/
public AgentResponse<String> executeWithImages(String text, byte[] imageData) {
return executeWithImages(text, Collections.singletonList(imageData));
}
/**
* Execute with images and variables
*/
public AgentResponse<String> executeWithImages(String text, byte[] imageData, Map<String, Object> variables) {
return executeWithImages(text, Collections.singletonList(imageData), variables);
}
/**
* Execute with multiple images
*/
public AgentResponse<String> executeWithImages(String text, List<byte[]> imageDataList) {
return executeWithImages(text, imageDataList, null);
}
/**
* Execute with multiple images and variables
*/
public AgentResponse<String> executeWithImages(String text, List<byte[]> imageDataList, Map<String, Object> variables) {
try {
// Process text with variables if provided
String processedText = text;
if (variables != null && !variables.isEmpty()) {
processedText = processMessageWithVariables(text, variables);
}
// Convert byte arrays to image data objects
List<ModelContentElement.ImageData> imageDataObjects = imageDataList.stream()
.map(bytes -> new ModelContentElement.ImageData(bytes, "image/jpeg"))
.collect(Collectors.toList());
// Build multimodal content
List<ModelContentElement> content = buildMultimodalContent(processedText, imageDataObjects);
// Create multimodal message
ModelContentMessage userMessage = ModelContentMessage.builder()
.role(Role.user)
.content(content)
.build();
// Add to memory
addUserMessage(processedText); // Add processed text version to memory
// Build messages with system and multimodal content
List<ModelContentMessage> messages = buildBaseMessages();
messages.add(userMessage);
// Build request
ModelTextRequest request = ModelTextRequest.builder()
.model(getEffectiveModel())
.temperature(getTemperature(null))
.messages(messages)
.build();
// Execute request
ModelTextResponse response = modelClient.imageToText(request);
// Trace if provider is available
RequestTracingProvider provider = getTracingProvider();
if (provider != null) {
String contextType = buildContextType("IMAGE");
RequestTracingProvider.RequestContext traceContext = RequestTracingProvider.RequestContext.builder()
.contextId(agentId)
.contextType(contextType)
.variables(variables)
.chatId(chatId)
.workflowId(workflowId)
.workflowType(workflowType)
.workflowStep(workflowStep)
.build();
provider.traceImageToTextRequest(request, response, traceContext);
}
// Extract response
return extractResponse(response);
} catch (Exception e) {
log.error("Error executing with images", e);
throw new RuntimeException("Failed to execute with images", e);
}
}
/**
* Execute a single tool call manually
*/
public ToolExecutionResult executeToolCall(ToolCall toolCall) {
try {
Object result = toolRegistry.executeToolCall(toolCall);
return ToolExecutionResult.success(toolCall.getFunction().getName(), result);
} catch (Exception e) {
log.error("Error executing tool: {}", toolCall.getFunction().getName(), e);
return ToolExecutionResult.failure(toolCall.getFunction().getName(), e.getMessage());
}
}
/**
* Register a tool function using instance method
*/
public LLMAgent registerTool(String methodName, Object instance) {
toolRegistry.registerInstanceMethod(instance, methodName);
return this;
}
/**
* Register a tool function with description
*/
public LLMAgent registerTool(String methodName, Object instance, String description) {
toolRegistry.registerInstanceMethod(instance, methodName, description);
return this;
}
/**
* Register a static method as a tool
*/
public LLMAgent registerStaticTool(String methodName, Class<?> clazz) {
toolRegistry.registerStaticMethod(clazz, methodName);
return this;
}
/**
* Register all annotated tools from an instance
*/
public LLMAgent registerTools(Object instance) {
toolRegistry.registerClass(instance);
return this;
}
/**
* Register all static annotated tools from a class
*/
public LLMAgent registerStaticTools(Class<?> clazz) {
toolRegistry.registerStaticClass(clazz);
return this;
}
/**
* Clear conversation history
*/
public void clearHistory() {
if (chatStore != null) {
chatStore.deleteAll(chatId);
}
}
/**
* Get conversation history
*/
public List<ChatMessage> getConversationHistory() {
if (chatStore != null) {
return chatStore.getAll(chatId);
}
return Collections.emptyList();
}
@Override
public String getName() {
return name;
}
@Override
public String getDescription() {
return description;
}
@Override
public String execute(String input) {
return executeText(input).getText();
}
@Override
public String execute(String text, byte[] imageData) {
return executeWithImages(text, imageData).getText();
}
@Override
public String execute(String text, List<byte[]> imageDataList) {
return executeWithImages(text, imageDataList).getText();
}
@Override
public String execute(String input, Map<String, Object> variables) {
return executeText(input, variables).getText();
}
/**
* Execute with streaming and required callback
* @return CompletableFuture with the complete response text
*/
public CompletableFuture<String> executeStreaming(String input, StreamingCallback<String> callback) {
return executeStreaming(input, null, callback);
}
/**
* Execute with streaming, variables and required callback
* @return CompletableFuture with the complete response text
*/
public CompletableFuture<String> executeStreaming(String input, Map<String, Object> variables, StreamingCallback<String> callback) {
if (callback == null) {
throw new IllegalArgumentException("Callback is required for streaming execution");
}
CompletableFuture<String> future = new CompletableFuture<>();
try {
// Process message with variables
String processedMessage = processMessageWithVariables(input, variables);
// Add user message to memory
addUserMessage(processedMessage);
// Build chat request
ModelTextRequest request;
if (chatStore == null) {
// If no chat store, manually add the user message to the request
List<ModelContentMessage> messages = buildBaseMessages();
messages.add(ModelContentMessage.create(Role.user, processedMessage));
request = ModelTextRequest.builder()
.model(getEffectiveModel())
.temperature(getTemperature(null))
.messages(messages)
.build();
} else {
request = buildChatRequest();
}
// Get streaming response from model client
StreamingResponse<String> streamingResponse = modelClient.streamTextToText(request);
// Create wrapper for memory and tracing
final StringBuilder fullResponse = new StringBuilder();
// Subscribe immediately with the provided callback
streamingResponse.subscribe(new StreamingCallback<String>() {
@Override
public void onNext(String item) {
fullResponse.append(item);
callback.onNext(item);
}
@Override
public void onError(Throwable error) {
log.error("Streaming error in LLMAgent", error);
callback.onError(error);
future.completeExceptionally(error);
}
@Override
public void onComplete() {
// Add complete response to memory
String finalResponse = fullResponse.toString();
if (finalResponse.length() > 0) {
addAssistantMessage(finalResponse);
}
// Trace if provider is available
RequestTracingProvider provider = getTracingProvider();
if (provider != null) {
String contextType = buildContextType("STREAM");
RequestTracingProvider.RequestContext traceContext = RequestTracingProvider.RequestContext.builder()
.contextId(agentId)
.contextType(contextType)
.variables(variables)
.chatId(chatId)
.workflowId(workflowId)
.workflowType(workflowType)
.workflowStep(workflowStep)
.build();
// Create a synthetic response for tracing
ModelTextResponse syntheticResponse = ModelTextResponse.builder()
.choices(Collections.singletonList(
ResponseMessage.builder()
.message(ModelMessage.builder()
.role(Role.assistant)
.content(finalResponse)
.build())
.build()
))
.build();
provider.traceTextRequest(request, syntheticResponse, traceContext);
}
callback.onComplete();
future.complete(finalResponse);
}
});
} catch (Exception e) {
log.error("Error in executeStreaming", e);
callback.onError(new RuntimeException("Failed to execute streaming", e));
future.completeExceptionally(new RuntimeException("Failed to execute streaming", e));
}
return future;
}
// Private helper methods
private String processMessageWithVariables(String message, Map<String, Object> variables) {
if (variables != null && !variables.isEmpty()) {
return PromptUtils.applyVariables(message, variables);
}
return message;
}
private void addUserMessage(String content) {
if (chatStore != null && StringUtils.isBlank(workflowId)) {
chatStore.add(chatId, content, MessageType.USER);
}
}
private void addAssistantMessage(String content) {
if (chatStore != null && StringUtils.isBlank(workflowId)) {
chatStore.add(chatId, content, MessageType.AI);
}
}
private ModelTextRequest buildChatRequest() {
List<ModelContentMessage> messages = buildBaseMessages();
messages.addAll(convertMemoryToMessages());
return ModelTextRequest.builder()
.model(getEffectiveModel())
.temperature(getTemperature(null))
.messages(messages)
.build();
}
private ModelTextRequest buildChatRequestWithTools() {
List<ModelContentMessage> messages = buildBaseMessages();
messages.addAll(convertMemoryToMessages());
ModelClient.Tool[] tools = toolRegistry.getTools();
return ModelTextRequest.builder()
.model(getEffectiveModel())
.temperature(getTemperature(null))
.messages(messages)
.tools(tools.length > 0 ? Arrays.asList(tools) : null)
.build();
}
private List<ModelContentMessage> buildBaseMessages() {
List<ModelContentMessage> messages = new ArrayList<>();
// Add system message if present
if (StringUtils.isNotBlank(systemMessage)) {
messages.add(ModelContentMessage.create(Role.system, systemMessage));
}
return messages;
}
private List<ModelContentMessage> convertMemoryToMessages() {
if (chatStore == null) {
return Collections.emptyList();
}
// Get messages with default token limit
List<ChatMessage> messages = chatStore.getRecent(chatId);
return messages.stream()
.map(this::convertMessageToModelMessage)
.collect(Collectors.toList());
}
private ModelContentMessage convertMessageToModelMessage(ChatMessage message) {
Role role = switch (message.getType()) {
case USER -> Role.user;
case AI -> Role.assistant;
case SYSTEM -> Role.system;
default -> Role.user;
};
// Get message content from properties
String content = message.getPropertiesMap().get(ChatMessage.PROPERTY_MESSAGE);
if (content == null) {
// Fallback to JSON representation of all properties
try {
content = JsonUtils.toJson(message.getPropertiesMap());
} catch (Exception e) {
throw new RuntimeException(e);
}
}
return ModelContentMessage.create(role, content);
}
private boolean hasToolCalls(ModelTextResponse response) {
return response != null &&
CollectionUtils.isNotEmpty(response.getChoices()) &&
response.getChoices().get(0).getMessage() != null &&
CollectionUtils.isNotEmpty(response.getChoices().get(0).getMessage().getToolCalls());
}
private List<ToolCall> extractToolCalls(ModelTextResponse response) {
if (!hasToolCalls(response)) {
return Collections.emptyList();
}
return response.getChoices().get(0).getMessage().getToolCalls();
}
private AgentResponse<String> extractResponse(ModelTextResponse response) {
String responseText = extractResponseText(response);
addAssistantMessage(responseText);
// Check if response contains images
List<ModelContentElement.ImageData> images = extractImages(response);
if (CollectionUtils.isNotEmpty(images)) {
return AgentResponse.multimodal(responseText, images);
}
return AgentResponse.text(responseText);
}
private List<ModelContentElement.ImageData> extractImages(ModelTextResponse response) {
// For now, text-to-text responses don't contain images
// This is a placeholder for future multimodal responses
return Collections.emptyList();
}
private List<ToolExecutionResult> executeToolsAndGetResults(ModelTextResponse response) {
List<ToolCall> toolCalls = extractToolCalls(response);
List<ToolExecutionResult> results = new ArrayList<>();
// Add assistant message with tool calls to memory
String assistantContent = extractResponseText(response);
if (StringUtils.isNotBlank(assistantContent)) {
addAssistantMessage(assistantContent);
}
// Execute each tool call
for (ToolCall toolCall : toolCalls) {
ToolExecutionResult result = executeToolCall(toolCall);
results.add(result);
// Add tool result to memory as user message
String resultStr = result.isSuccess() ?
String.format("[Tool: %s]\nResult: %s", result.getToolName(), convertResultToString(result.getResult())) :
String.format("[Tool: %s]\nError: %s", result.getToolName(), result.getError());
addUserMessage(resultStr);
}
return results;
}
private String convertResultToString(Object result) {
if (result == null) {
return "null";
}
if (result instanceof String) {
return (String) result;
}
// For complex objects, serialize as JSON
try {
return JsonUtils.toJson(result);
} catch (Exception e) {
return result.toString();
}
}
private List<ModelContentElement> buildMultimodalContent(String text,
List<ModelContentElement.ImageData> imageDataList) {
List<ModelContentElement> content = new ArrayList<>();
// Add text content if present
if (StringUtils.isNotBlank(text)) {
content.add(ModelContentElement.builder()
.type(ModelTextRequest.MessageType.text)
.text(text)
.build());
}
// Add image content
if (CollectionUtils.isNotEmpty(imageDataList)) {
for (ModelContentElement.ImageData imageData : imageDataList) {
content.add(ModelContentElement.builder()
.type(ModelTextRequest.MessageType.image)
.image(imageData)
.build());
}
}
return content;
}
private String extractResponseText(ModelTextResponse response) {
if (response == null || CollectionUtils.isEmpty(response.getChoices())) {
return "";
}
ResponseMessage choice = response.getChoices().get(0);
if (choice.getMessage() == null) {
return "";
}
return choice.getMessage().getContent();
}
/**
* Get effective model (from agent config or client default).
*/
private String getEffectiveModel() {
return StringUtils.isNotBlank(model) ? model : modelClient.getModel();
}
/**
* Get effective max tokens (from agent config or client default).
*/
private Integer getEffectiveMaxTokens() {
return maxTokens != null ? maxTokens : modelClient.getMaxTokens();
}
/**
* Get tracing provider - first check injected provider, then registry
*/
private RequestTracingProvider getTracingProvider() {
if (tracingProvider != null) {
return tracingProvider;
}
return RequestTracingRegistry.getInstance();
}
/**
* Build context type based on agent name/description and operation type
*/
private String buildContextType(String operationType) {
if (StringUtils.isNotBlank(name)) {
return String.format("%s_%s", name.toUpperCase().replace(" ", "_"), operationType);
} else if (StringUtils.isNotBlank(description)) {
// Use first few words of description
String[] words = description.split("\\s+");
String shortDesc = words[0].toUpperCase();
return String.format("%s_%s", shortDesc, operationType);
} else {
// Default to agent ID and operation
return String.format("AGENT_%s_%s", agentId, operationType);
}
}
/**
* Custom builder to set default values and validation.
*/
public static class CustomLLMAgentBuilder {
private ModelClient modelClient;
private String name;
private String description;
private String systemMessage;
private Double temperature;
private Integer maxTokens;
private String model;
private String imageModel;
private String agentId;
private ChatStore chatStore;
private String chatId;
private PromptService promptService;
private ToolRegistry toolRegistry;
private RequestTracingProvider tracingProvider;
private String workflowId;
private String workflowType;
private String workflowStep;
private boolean autoExecuteTools = true;
private List<ToolInfo> pendingTools = new ArrayList<>();
public CustomLLMAgentBuilder() {
// Set defaults
this.autoExecuteTools = true;
}
public CustomLLMAgentBuilder modelClient(ModelClient modelClient) {
this.modelClient = modelClient;
return this;
}
public CustomLLMAgentBuilder name(String name) {
this.name = name;
return this;
}
public CustomLLMAgentBuilder description(String description) {
this.description = description;
return this;
}
public CustomLLMAgentBuilder systemMessage(String systemMessage) {
this.systemMessage = systemMessage;
return this;
}
public CustomLLMAgentBuilder temperature(Double temperature) {
this.temperature = temperature;
return this;
}
public CustomLLMAgentBuilder maxTokens(Integer maxTokens) {
this.maxTokens = maxTokens;
return this;
}
public CustomLLMAgentBuilder model(String model) {
this.model = model;
return this;
}
public CustomLLMAgentBuilder imageModel(String imageModel) {
this.imageModel = imageModel;
return this;
}
public CustomLLMAgentBuilder agentId(String agentId) {
this.agentId = agentId;
return this;
}
public CustomLLMAgentBuilder chatStore(ChatStore chatStore) {
this.chatStore = chatStore;
return this;
}
public CustomLLMAgentBuilder chatId(String chatId) {
this.chatId = chatId;
return this;
}
public CustomLLMAgentBuilder promptService(PromptService promptService) {
this.promptService = promptService;
return this;
}
public CustomLLMAgentBuilder toolRegistry(ToolRegistry toolRegistry) {
this.toolRegistry = toolRegistry;
return this;
}
public CustomLLMAgentBuilder tracingProvider(RequestTracingProvider tracingProvider) {
this.tracingProvider = tracingProvider;
return this;
}
public CustomLLMAgentBuilder autoExecuteTools(boolean autoExecuteTools) {
this.autoExecuteTools = autoExecuteTools;
return this;
}
public CustomLLMAgentBuilder workflowId(String workflowId) {
this.workflowId = workflowId;
return this;
}
public CustomLLMAgentBuilder workflowType(String workflowType) {
this.workflowType = workflowType;
return this;
}
public CustomLLMAgentBuilder workflowStep(String workflowStep) {
this.workflowStep = workflowStep;
return this;
}
/**
* Add a tool to the agent
*/
public CustomLLMAgentBuilder addTool(ToolInfo toolInfo) {
pendingTools.add(toolInfo);
return this;
}
public LLMAgent build() {
// Auto-discover PromptService if not explicitly set
if (promptService == null) {
promptService = PromptServiceRegistry.getInstance();
if (promptService != null) {
log.debug("Auto-discovered PromptService from registry: {}",
promptService.getClass().getSimpleName());
}
}
// Create tool registry if tools were added
if (!pendingTools.isEmpty()) {
if (toolRegistry == null) {
toolRegistry = new ToolRegistry();
}
for (ToolInfo toolInfo : pendingTools) {
toolRegistry.registerTool(toolInfo);
}
} else if (toolRegistry == null) {
// Set default empty registry
toolRegistry = new ToolRegistry();
}
return new LLMAgent(modelClient, name, description, systemMessage,
temperature, maxTokens, model, imageModel, agentId,
chatStore, chatId, promptService, toolRegistry,
tracingProvider, workflowId, workflowType, workflowStep,
autoExecuteTools);
}
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-agents/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-agents/0.8.1/ai/driftkit/workflow/engine/agent/LoopAgent.java
|
package ai.driftkit.workflow.engine.agent;
import ai.driftkit.common.domain.client.ResponseFormat;
import ai.driftkit.common.domain.streaming.StreamingResponse;
import ai.driftkit.common.domain.streaming.BasicStreamingResponse;
import ai.driftkit.common.utils.JsonUtils;
import ai.driftkit.context.core.util.DefaultPromptLoader;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import java.io.IOException;
import java.io.InputStream;
import java.nio.charset.StandardCharsets;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.HashMap;
import java.util.concurrent.ConcurrentHashMap;
/**
* Agent that executes a worker agent in a loop until a stop condition is met.
* The evaluator agent determines whether to continue or stop the loop.
*/
@Slf4j
@Builder
@Getter
@AllArgsConstructor
public class LoopAgent implements Agent {
private final Agent worker;
private final Agent evaluator;
private final LoopStatus stopCondition;
@Builder.Default
private final String name = "LoopAgent";
@Builder.Default
private final String description = "Agent that executes work in a loop until condition is met";
@Builder.Default
private final int maxIterations = 10;
@Override
public String execute(String input) {
return runLoop(input, null);
}
@Override
public String execute(String text, byte[] imageData) {
return worker.execute(text, imageData);
}
@Override
public String execute(String text, List<byte[]> imageDataList) {
return worker.execute(text, imageDataList);
}
@Override
public String execute(String input, Map<String, Object> variables) {
return runLoop(input, variables);
}
/**
* Execute the loop with worker and evaluator agents.
*/
private String runLoop(String input, Map<String, Object> variables) {
String currentResult = input;
int iteration = 0;
while (iteration < maxIterations) {
iteration++;
log.debug("LoopAgent '{}' - iteration {}/{}", getName(), iteration, maxIterations);
try {
// Execute worker agent
String workerResult;
if (variables != null) {
workerResult = worker.execute(currentResult, variables);
} else {
workerResult = worker.execute(currentResult);
}
// Evaluate the result
EvaluationResult evaluationResult;
// If evaluator is an LLMAgent, use structured output
if (evaluator instanceof LLMAgent) {
LLMAgent llmEvaluator = (LLMAgent) evaluator;
String evaluationInput = buildStructuredEvaluationInput(currentResult, workerResult);
AgentResponse<EvaluationResult> response = llmEvaluator.executeStructured(
evaluationInput,
EvaluationResult.class
);
evaluationResult = response.getStructuredData();
} else {
// Fallback to traditional JSON parsing approach
String evaluationInput = buildEvaluationInput(currentResult, workerResult);
String evaluationResponse;
if (variables != null) {
evaluationResponse = evaluator.execute(evaluationInput, variables);
} else {
evaluationResponse = evaluator.execute(evaluationInput);
}
evaluationResult = parseEvaluationResult(evaluationResponse);
}
log.debug("LoopAgent '{}' - evaluation status: {}", getName(), evaluationResult.getStatus());
// Check stop condition
if (evaluationResult.getStatus() == stopCondition) {
log.debug("LoopAgent '{}' - stop condition met after {} iterations", getName(), iteration);
return workerResult;
}
// Handle different statuses
switch (evaluationResult.getStatus()) {
case REVISE:
currentResult = buildRevisionInput(workerResult, evaluationResult.getFeedback());
break;
case RETRY:
// Keep the same input for retry
break;
case FAILED:
throw new RuntimeException("Evaluator indicated failure: " + evaluationResult.getReason());
case CONTINUE:
default:
currentResult = workerResult;
break;
}
} catch (Exception e) {
log.error("Error in LoopAgent '{}' iteration {}", getName(), iteration, e);
throw new RuntimeException("LoopAgent execution failed at iteration " + iteration, e);
}
}
log.warn("LoopAgent '{}' reached maximum iterations ({})", getName(), maxIterations);
return currentResult;
}
/**
* Build input for the evaluator agent with structured output.
* Uses DefaultPromptLoader to get prompt from resources or PromptService.
*/
private String buildStructuredEvaluationInput(String originalInput, String workerResult) {
Map<String, Object> variables = new HashMap<>();
variables.put("originalRequest", originalInput);
variables.put("generatedResult", workerResult);
return DefaultPromptLoader.loadPrompt("loop.agent.structured.evaluation", variables);
}
/**
* Build input for the evaluator agent (legacy JSON format).
* Uses DefaultPromptLoader to get prompt from resources or PromptService.
*/
private String buildEvaluationInput(String originalInput, String workerResult) {
Map<String, Object> variables = new HashMap<>();
variables.put("originalRequest", originalInput);
variables.put("generatedResult", workerResult);
return DefaultPromptLoader.loadPrompt("loop.agent.json.evaluation", variables);
}
/**
* Build input for revision based on evaluator feedback.
* Uses DefaultPromptLoader to get prompt from resources or PromptService.
*/
private String buildRevisionInput(String workerResult, String feedback) {
Map<String, Object> variables = new HashMap<>();
variables.put("previousResult", workerResult);
if (StringUtils.isNotBlank(feedback)) {
variables.put("feedback", feedback);
return DefaultPromptLoader.loadPrompt("loop.agent.revision", variables);
} else {
return DefaultPromptLoader.loadPrompt("loop.agent.revision.no_feedback", variables);
}
}
/**
* Parse the evaluation response as JSON to get typed result.
*/
private EvaluationResult parseEvaluationResult(String evaluationResponse) {
try {
// Try to parse as JSON first
EvaluationResult result = JsonUtils.safeParse(evaluationResponse, EvaluationResult.class);
if (result != null && result.getStatus() != null) {
return result;
}
} catch (Exception e) {
log.debug("Failed to parse evaluation response as JSON, falling back to enum analysis", e);
}
// Fallback to enum name analysis if JSON parsing fails
return fallbackEnumAnalysis(evaluationResponse);
}
/**
* Fallback method to analyze text response using enum names if JSON parsing fails.
*/
private EvaluationResult fallbackEnumAnalysis(String response) {
if (StringUtils.isBlank(response)) {
return EvaluationResult.builder()
.status(LoopStatus.CONTINUE)
.build();
}
String upperResponse = response.toUpperCase();
// Check for each enum value by name
for (LoopStatus status : LoopStatus.values()) {
if (upperResponse.contains(status.name())) {
return EvaluationResult.builder()
.status(status)
.feedback(response)
.build();
}
}
// Default to CONTINUE if no enum match found
return EvaluationResult.builder()
.status(LoopStatus.CONTINUE)
.feedback(response)
.build();
}
// Loop agents don't support streaming - need to evaluate complete results at each iteration
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-agents/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-agents/0.8.1/ai/driftkit/workflow/engine/agent/LoopStatus.java
|
package ai.driftkit.workflow.engine.agent;
/**
* Status enumeration for loop execution results.
*/
public enum LoopStatus {
/**
* Loop should continue with the current result.
*/
CONTINUE,
/**
* Loop should stop - condition has been met.
*/
COMPLETE,
/**
* Loop should continue but with revision based on feedback.
*/
REVISE,
/**
* Loop should retry the current iteration.
*/
RETRY,
/**
* Loop failed due to an error.
*/
FAILED
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-agents/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-agents/0.8.1/ai/driftkit/workflow/engine/agent/NoOpRequestTracingProvider.java
|
package ai.driftkit.workflow.engine.agent;
import ai.driftkit.common.domain.client.ModelTextRequest;
import ai.driftkit.common.domain.client.ModelTextResponse;
import ai.driftkit.common.domain.client.ModelImageRequest;
import ai.driftkit.common.domain.client.ModelImageResponse;
import lombok.extern.slf4j.Slf4j;
/**
* No-operation implementation of RequestTracingProvider.
* Used as a fallback when tracing is disabled or MongoDB is not available.
*/
@Slf4j
public class NoOpRequestTracingProvider implements RequestTracingProvider {
@Override
public void traceTextRequest(ModelTextRequest request, ModelTextResponse response, RequestContext context) {
log.debug("Text request tracing disabled - NoOp provider active");
}
@Override
public void traceImageRequest(ModelImageRequest request, ModelImageResponse response, RequestContext context) {
log.debug("Image request tracing disabled - NoOp provider active");
}
@Override
public void traceImageToTextRequest(ModelTextRequest request, ModelTextResponse response, RequestContext context) {
log.debug("Image-to-text request tracing disabled - NoOp provider active");
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-agents/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-agents/0.8.1/ai/driftkit/workflow/engine/agent/RequestTracingProvider.java
|
package ai.driftkit.workflow.engine.agent;
import ai.driftkit.common.domain.client.ModelTextRequest;
import ai.driftkit.common.domain.client.ModelTextResponse;
import ai.driftkit.common.domain.client.ModelImageRequest;
import ai.driftkit.common.domain.client.ModelImageResponse;
import lombok.Builder;
import lombok.Data;
import java.util.Map;
/**
* Interface for providing request tracing capabilities to LLMAgent.
* Implementations can provide tracing via Spring (ModelRequestService),
* REST API, or other mechanisms.
*/
public interface RequestTracingProvider {
/**
* Trace a text-to-text request and response
*/
void traceTextRequest(ModelTextRequest request, ModelTextResponse response, RequestContext context);
/**
* Trace a text-to-image request and response
*/
void traceImageRequest(ModelImageRequest request, ModelImageResponse response, RequestContext context);
/**
* Trace an image-to-text request and response
*/
void traceImageToTextRequest(ModelTextRequest request, ModelTextResponse response, RequestContext context);
/**
* Context information for request tracing
*/
@Data
@Builder
class RequestContext {
private final String contextId;
private final String contextType;
private final String promptId;
private final Map<String, Object> variables;
private final String workflowId;
private final String workflowType;
private final String workflowStep;
private final String chatId;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-agents/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-agents/0.8.1/ai/driftkit/workflow/engine/agent/RequestTracingRegistry.java
|
package ai.driftkit.workflow.engine.agent;
import lombok.extern.slf4j.Slf4j;
/**
* Registry for RequestTracingProvider instances.
* Allows Spring-based implementations to register themselves automatically.
*/
@Slf4j
public class RequestTracingRegistry {
private static volatile RequestTracingProvider instance;
/**
* Register a tracing provider (typically called by Spring components)
*/
public static void register(RequestTracingProvider provider) {
instance = provider;
log.info("Registered RequestTracingProvider: {}", provider.getClass().getSimpleName());
}
/**
* Get the current tracing provider instance
*/
public static RequestTracingProvider getInstance() {
return instance;
}
/**
* Check if a tracing provider is available
*/
public static boolean isAvailable() {
return instance != null;
}
/**
* Unregister the current provider (useful for testing)
*/
public static void unregister() {
RequestTracingProvider old = instance;
instance = null;
if (old != null) {
log.info("Unregistered RequestTracingProvider: {}", old.getClass().getSimpleName());
}
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-agents/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-agents/0.8.1/ai/driftkit/workflow/engine/agent/SequentialAgent.java
|
package ai.driftkit.workflow.engine.agent;
import ai.driftkit.common.domain.streaming.StreamingResponse;
import ai.driftkit.common.domain.streaming.BasicStreamingResponse;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import lombok.Singular;
import lombok.extern.slf4j.Slf4j;
import java.util.Collections;
import java.util.List;
import java.util.Map;
/**
* Agent that executes a sequence of agents one after another.
* The output of each agent becomes the input for the next agent.
*/
@Slf4j
@Builder
@Getter
@AllArgsConstructor
public class SequentialAgent implements Agent {
@Singular
private final List<Agent> agents;
@Builder.Default
private final String name = "SequentialAgent";
@Builder.Default
private final String description = "Agent that executes multiple agents in sequence";
@Override
public String execute(String input) {
return runSequence(input, null);
}
@Override
public String execute(String text, byte[] imageData) {
if (agents.isEmpty()) {
return text;
}
// For multimodal input, only the first agent can handle images
// Subsequent agents work with text output
String result = agents.get(0).execute(text, imageData);
for (int i = 1; i < agents.size(); i++) {
Agent agent = agents.get(i);
log.debug("SequentialAgent '{}' - executing step {}/{}: {}",
getName(), i + 1, agents.size(), agent.getName());
result = agent.execute(result);
}
return result;
}
@Override
public String execute(String text, List<byte[]> imageDataList) {
if (agents.isEmpty()) {
return text;
}
// For multimodal input, only the first agent can handle images
// Subsequent agents work with text output
String result = agents.get(0).execute(text, imageDataList);
for (int i = 1; i < agents.size(); i++) {
Agent agent = agents.get(i);
log.debug("SequentialAgent '{}' - executing step {}/{}: {}",
getName(), i + 1, agents.size(), agent.getName());
result = agent.execute(result);
}
return result;
}
@Override
public String execute(String input, Map<String, Object> variables) {
return runSequence(input, variables);
}
/**
* Execute the sequence of agents.
*/
private String runSequence(String input, Map<String, Object> variables) {
if (agents.isEmpty()) {
log.warn("SequentialAgent '{}' has no agents to execute", getName());
return input;
}
String result = input;
for (int i = 0; i < agents.size(); i++) {
Agent agent = agents.get(i);
log.debug("SequentialAgent '{}' - executing step {}/{}: {}",
getName(), i + 1, agents.size(), agent.getName());
try {
if (variables != null) {
result = agent.execute(result, variables);
} else {
result = agent.execute(result);
}
log.debug("SequentialAgent '{}' - step {} completed", getName(), i + 1);
} catch (Exception e) {
log.error("SequentialAgent '{}' - step {} failed: {}",
getName(), i + 1, agent.getName(), e);
throw new RuntimeException(
String.format("SequentialAgent step %d failed: %s", i + 1, agent.getName()), e);
}
}
log.debug("SequentialAgent '{}' completed all {} steps", getName(), agents.size());
return result;
}
// Sequential agents don't support streaming - each agent needs complete output of previous one
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-agents/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-agents/0.8.1/ai/driftkit/workflow/engine/agent/ToolExecutionResult.java
|
package ai.driftkit.workflow.engine.agent;
import lombok.Builder;
import lombok.Data;
/**
* Result of tool execution with typed data.
*/
@Data
@Builder
public class ToolExecutionResult {
private final String toolName;
private final Object result;
private final Class<?> resultType;
private final boolean success;
private final String error;
// Convenience method to get typed result
public <T> T getTypedResult(Class<T> type) {
if (result == null) {
return null;
}
if (type.isInstance(result)) {
return type.cast(result);
}
throw new ClassCastException("Cannot cast result of type " + result.getClass().getName() + " to " + type.getName());
}
// Legacy method for backward compatibility
@SuppressWarnings("unchecked")
public <T> T getTypedResult() {
return (T) result;
}
// Factory methods
public static ToolExecutionResult success(String toolName, Object result) {
return ToolExecutionResult.builder()
.toolName(toolName)
.result(result)
.resultType(result != null ? result.getClass() : Void.class)
.success(true)
.build();
}
public static ToolExecutionResult failure(String toolName, String error) {
return ToolExecutionResult.builder()
.toolName(toolName)
.success(false)
.error(error)
.build();
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-agents/0.8.1/ai/driftkit/workflow/engine/agent
|
java-sources/ai/driftkit/driftkit-workflow-engine-agents/0.8.1/ai/driftkit/workflow/engine/agent/tool/AgentAsTool.java
|
package ai.driftkit.workflow.engine.agent.tool;
import ai.driftkit.common.domain.client.ModelClient;
import ai.driftkit.common.tools.ToolInfo;
import ai.driftkit.workflow.engine.agent.Agent;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Wrapper that allows any Agent to be used as a Tool.
* This enables composition of agents where one agent can call another agent as a tool.
*/
@Slf4j
@RequiredArgsConstructor
public class AgentAsTool implements Tool<SimpleToolArguments> {
private final Agent agent;
private final String name;
private final String description;
/**
* Create a new AgentAsTool wrapper as ToolInfo.
*
* @param name The name of the tool
* @param description The description of what the tool does
* @param agent The agent to wrap as a tool
* @return A new ToolInfo instance representing the agent as a tool
*/
public static ToolInfo create(String name, String description, Agent agent) {
AgentAsTool agentAsTool = new AgentAsTool(agent, name, description);
// Create ToolInfo from the AgentAsTool instance
return ToolInfo.builder()
.functionName(name)
.description(description)
.parameterNames(Arrays.asList("arguments"))
.parameterTypes(Arrays.asList(SimpleToolArguments.class))
.returnType(String.class)
.method(null) // No method for Tool<?> objects
.instance(agentAsTool) // Store the AgentAsTool instance
.isStatic(false)
.toolDefinition(createToolDefinition(agentAsTool))
.build();
}
/**
* Creates a ModelClient.Tool definition from an AgentAsTool instance
*/
private static ModelClient.Tool createToolDefinition(AgentAsTool agentAsTool) {
return ModelClient.Tool.builder()
.type(ModelClient.ResponseFormatType.function)
.function(ModelClient.ToolFunction.builder()
.name(agentAsTool.getName())
.description(agentAsTool.getDescription())
.parameters(convertToFunctionParameters(agentAsTool.getParametersSchema()))
.build())
.build();
}
/**
* Converts ToolParameterSchema to FunctionParameters
*/
private static ModelClient.ToolFunction.FunctionParameters convertToFunctionParameters(ToolParameterSchema schema) {
Map<String, ModelClient.Property> properties = new HashMap<>();
// Convert each property in the schema
if (schema.getProperties() != null) {
for (Map.Entry<String, ToolParameterSchema.PropertySchema> entry : schema.getProperties().entrySet()) {
ToolParameterSchema.PropertySchema propSchema = entry.getValue();
ModelClient.Property property = new ModelClient.Property();
property.setType(ModelClient.ResponseFormatType.fromType(propSchema.getType()));
property.setDescription(propSchema.getDescription());
properties.put(entry.getKey(), property);
}
}
ModelClient.ToolFunction.FunctionParameters params = new ModelClient.ToolFunction.FunctionParameters();
params.setType(ModelClient.ResponseFormatType.Object);
params.setProperties(properties);
params.setRequired(schema.getRequired());
return params;
}
@Override
public String getName() {
return name;
}
@Override
public String getDescription() {
return description;
}
@Override
public ToolParameterSchema getParametersSchema() {
// Create schema POJO
ToolParameterSchema.PropertySchema inputProperty = ToolParameterSchema.PropertySchema.builder()
.type("string")
.description("The input text to process")
.build();
return ToolParameterSchema.builder()
.type("object")
.properties(Map.of("input", inputProperty))
.required(List.of("input"))
.build();
}
@Override
public Class<SimpleToolArguments> getArgumentType() {
return SimpleToolArguments.class;
}
@Override
public String execute(SimpleToolArguments arguments) throws Exception {
// Execute the wrapped agent
String result = agent.execute(arguments.getInput());
log.debug("AgentAsTool '{}' executed successfully", name);
return result;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-agents/0.8.1/ai/driftkit/workflow/engine/agent
|
java-sources/ai/driftkit/driftkit-workflow-engine-agents/0.8.1/ai/driftkit/workflow/engine/agent/tool/SimpleToolArguments.java
|
package ai.driftkit.workflow.engine.agent.tool;
import lombok.Data;
import lombok.EqualsAndHashCode;
/**
* Simple tool arguments with a single input field.
* Used for AgentAsTool and other simple tools.
*/
@Data
@EqualsAndHashCode(callSuper = true)
public class SimpleToolArguments extends ToolArguments {
private String input;
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-agents/0.8.1/ai/driftkit/workflow/engine/agent
|
java-sources/ai/driftkit/driftkit-workflow-engine-agents/0.8.1/ai/driftkit/workflow/engine/agent/tool/Tool.java
|
package ai.driftkit.workflow.engine.agent.tool;
/**
* Interface for tools that can be used by agents.
* Tools provide functionality that agents can call to perform specific tasks.
*/
public interface Tool<T extends ToolArguments> {
/**
* Get the name of the tool.
*
* @return The tool's name
*/
String getName();
/**
* Get the description of what the tool does.
*
* @return The tool's description
*/
String getDescription();
/**
* Get the parameter schema for the tool.
*
* @return The parameters schema as POJO
*/
ToolParameterSchema getParametersSchema();
/**
* Get the argument type class for this tool.
* Used by the framework to parse JSON arguments into typed objects.
*
* @return The argument type class
*/
Class<T> getArgumentType();
/**
* Execute the tool with the given arguments.
*
* @param arguments The parsed arguments as a typed object
* @return The result of the tool execution
* @throws Exception if tool execution fails
*/
String execute(T arguments) throws Exception;
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-agents/0.8.1/ai/driftkit/workflow/engine/agent
|
java-sources/ai/driftkit/driftkit-workflow-engine-agents/0.8.1/ai/driftkit/workflow/engine/agent/tool/ToolArguments.java
|
package ai.driftkit.workflow.engine.agent.tool;
import lombok.Data;
import java.util.Map;
/**
* Base class for tool arguments.
* Specific tools should extend this class or use the generic version.
*/
@Data
public class ToolArguments {
/**
* Raw arguments as a map for flexible tools.
*/
private Map<String, Object> arguments;
/**
* Get a specific argument value.
*
* @param key The argument key
* @return The argument value
*/
public Object get(String key) {
return arguments != null ? arguments.get(key) : null;
}
/**
* Get a specific argument value as string.
*
* @param key The argument key
* @return The argument value as string
*/
public String getString(String key) {
Object value = get(key);
return value != null ? value.toString() : null;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-agents/0.8.1/ai/driftkit/workflow/engine/agent
|
java-sources/ai/driftkit/driftkit-workflow-engine-agents/0.8.1/ai/driftkit/workflow/engine/agent/tool/ToolParameterSchema.java
|
package ai.driftkit.workflow.engine.agent.tool;
import lombok.Builder;
import lombok.Data;
import java.util.List;
import java.util.Map;
/**
* POJO for tool parameter schema definition.
* This class represents the JSON schema for tool parameters.
*/
@Data
@Builder
public class ToolParameterSchema {
private String type;
private Map<String, PropertySchema> properties;
private List<String> required;
@Data
@Builder
public static class PropertySchema {
private String type;
private String description;
private List<String> enumValues;
private PropertySchema items; // For array types
private Map<String, PropertySchema> properties; // For object types
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/analyzer/MethodAnalyzer.java
|
package ai.driftkit.workflow.engine.analyzer;
import ai.driftkit.workflow.engine.core.StepResult;
import ai.driftkit.workflow.engine.core.WorkflowContext;
import lombok.experimental.UtilityClass;
import lombok.extern.slf4j.Slf4j;
import java.lang.reflect.Method;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.lang.reflect.WildcardType;
import java.util.Arrays;
import java.util.concurrent.CompletableFuture;
/**
* Analyzes method signatures and return types for workflow steps.
* Extracts type information and validates method signatures.
*/
@Slf4j
@UtilityClass
public class MethodAnalyzer {
/**
* Validates that a step method has a valid signature.
*
* @param method The method to validate
* @throws IllegalArgumentException if the method signature is invalid
*/
public static void validateStepMethod(Method method) {
Class<?> returnType = method.getReturnType();
// Check return type
if (!StepResult.class.isAssignableFrom(returnType) &&
!CompletableFuture.class.isAssignableFrom(returnType)) {
throw new IllegalArgumentException(
"Step method must return StepResult or CompletableFuture<StepResult>: " +
method.getName() + " returns " + returnType.getName()
);
}
// Validate CompletableFuture generic type
if (CompletableFuture.class.isAssignableFrom(returnType)) {
Type genericReturnType = method.getGenericReturnType();
if (!(genericReturnType instanceof ParameterizedType pt)) {
throw new IllegalArgumentException(
"CompletableFuture must be parameterized: " + method.getName()
);
}
Type[] actualTypeArguments = pt.getActualTypeArguments();
if (actualTypeArguments.length == 0) {
throw new IllegalArgumentException(
"CompletableFuture must have type parameter: " + method.getName()
);
}
Type actualType = actualTypeArguments[0];
if (!isStepResultType(actualType)) {
throw new IllegalArgumentException(
"CompletableFuture must contain StepResult type: " + method.getName() +
" contains " + actualType.getTypeName()
);
}
}
// Validate parameter count
Class<?>[] paramTypes = method.getParameterTypes();
if (paramTypes.length > 2) {
throw new IllegalArgumentException(
"Step method can have at most 2 parameters (input and context): " +
method.getName() + " has " + paramTypes.length
);
}
// Validate parameter types
int contextParams = 0;
int inputParams = 0;
for (Class<?> paramType : paramTypes) {
if (WorkflowContext.class.isAssignableFrom(paramType)) {
contextParams++;
} else {
inputParams++;
}
}
if (contextParams > 1) {
throw new IllegalArgumentException(
"Step method can have at most one WorkflowContext parameter: " + method.getName()
);
}
if (inputParams > 1) {
throw new IllegalArgumentException(
"Step method can have at most one input parameter: " + method.getName()
);
}
}
/**
* Analyzes method parameters to populate StepInfo.
*
* @param stepInfo The StepInfo to populate
*/
public static void analyzeMethodParameters(StepInfo stepInfo) {
Method method = stepInfo.getMethod();
Class<?>[] paramTypes = method.getParameterTypes();
Type[] genericParamTypes = method.getGenericParameterTypes();
stepInfo.setRequiresContext(false);
stepInfo.setInputType(null);
// Process parameters in order
for (int i = 0; i < paramTypes.length; i++) {
Class<?> paramType = paramTypes[i];
if (WorkflowContext.class.isAssignableFrom(paramType)) {
stepInfo.setRequiresContext(true);
stepInfo.setContextParameterIndex(i);
} else {
// This is the input parameter
stepInfo.setInputType(paramType);
stepInfo.setInputParameterIndex(i);
// Store generic type information if available
if (genericParamTypes[i] instanceof ParameterizedType pt) {
stepInfo.setGenericInputType(pt);
}
}
}
// If no input type found, default to Object
if (stepInfo.getInputType() == null) {
stepInfo.setInputType(Object.class);
log.debug("Step {} has no input parameter, defaulting to Object type", stepInfo.getId());
}
}
/**
* Analyzes the return type to determine possible next steps.
*
* @param stepInfo The StepInfo to populate with return type information
*/
public static void analyzeReturnType(StepInfo stepInfo) {
Method method = stepInfo.getMethod();
Type returnType = method.getGenericReturnType();
// Handle CompletableFuture unwrapping
if (returnType instanceof ParameterizedType pt &&
CompletableFuture.class.isAssignableFrom(method.getReturnType())) {
returnType = pt.getActualTypeArguments()[0];
}
// Extract the inner type from StepResult<T>
if (!(returnType instanceof ParameterizedType pt) || !isStepResultType(pt.getRawType())) {
// Raw StepResult without type parameter
log.warn("Step {} returns raw StepResult without type parameter. " +
"Consider adding type parameter for better type safety.", stepInfo.getId());
stepInfo.setPossibleContinueType(Object.class);
return;
}
Type[] typeArgs = pt.getActualTypeArguments();
if (typeArgs.length == 0) {
return;
}
Type innerType = typeArgs[0];
// Handle wildcards
if (innerType instanceof WildcardType wt) {
Type[] upperBounds = wt.getUpperBounds();
if (upperBounds.length > 0) {
innerType = upperBounds[0];
}
}
// Check if it's a sealed interface or class
if (innerType instanceof Class<?> clazz) {
if (clazz.isSealed()) {
// Get all permitted subclasses
Class<?>[] permitted = clazz.getPermittedSubclasses();
stepInfo.getPossibleBranchTypes().addAll(Arrays.asList(permitted));
log.debug("Step {} can branch to {} types via sealed interface {}",
stepInfo.getId(), permitted.length, clazz.getSimpleName());
} else if (!clazz.equals(Void.class) && !clazz.equals(void.class)) {
// Single concrete type for Continue
stepInfo.setPossibleContinueType(clazz);
log.debug("Step {} continues with type {}",
stepInfo.getId(), clazz.getSimpleName());
}
}
// Store the complete return type info
stepInfo.setReturnTypeInfo(new StepInfo.ReturnTypeInfo(pt, innerType));
}
/**
* Checks if a type represents StepResult.
*/
private static boolean isStepResultType(Type type) {
if (type instanceof Class<?> clazz) {
return StepResult.class.isAssignableFrom(clazz);
}
if (type instanceof ParameterizedType pt) {
return isStepResultType(pt.getRawType());
}
return false;
}
/**
* Extracts the inner type from StepResult<T> or CompletableFuture<StepResult<T>>.
*
* @param type The generic type to extract from
* @return The inner type class, or Object.class if it cannot be determined
*/
public static Class<?> extractStepResultType(Type type) {
if (!(type instanceof ParameterizedType paramType)) {
return Object.class;
}
Type rawType = paramType.getRawType();
// Handle CompletableFuture<StepResult<T>>
if (rawType instanceof Class<?> clazz && CompletableFuture.class.isAssignableFrom(clazz)) {
Type[] typeArgs = paramType.getActualTypeArguments();
if (typeArgs.length > 0 && typeArgs[0] instanceof ParameterizedType innerType) {
return extractStepResultType(innerType);
}
}
// Handle StepResult<T>
if (rawType instanceof Class<?> clazz && StepResult.class.isAssignableFrom(clazz)) {
Type[] typeArgs = paramType.getActualTypeArguments();
if (typeArgs.length > 0 && typeArgs[0] instanceof Class<?> resultClass) {
return resultClass;
}
}
return Object.class;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/analyzer/StepInfo.java
|
package ai.driftkit.workflow.engine.analyzer;
import ai.driftkit.workflow.engine.annotations.OnInvocationsLimit;
import ai.driftkit.workflow.engine.annotations.RetryPolicy;
import lombok.Builder;
import lombok.Data;
import java.lang.reflect.Method;
import java.lang.reflect.Type;
import java.util.HashSet;
import java.util.Set;
/**
* Information about a workflow step extracted from annotations.
* This is an intermediate representation used during graph construction.
*/
@Data
@Builder
public class StepInfo {
private final String id;
private final Method method;
private final Object instance;
@Builder.Default
private final boolean isInitial = false;
private final String description;
// Execution order and control
@Builder.Default
private final int index = 0;
@Builder.Default
private final long timeoutMs = -1;
// Type information
private Class<?> inputType;
private Type genericInputType;
private int inputParameterIndex = -1;
// Flow control fields from annotation
private final Class<?>[] nextClasses;
private final String[] nextSteps;
private final String condition;
private final String onTrue;
private final String onFalse;
// Context requirements
private boolean requiresContext;
private int contextParameterIndex = -1;
// Return type analysis
private Class<?> possibleContinueType;
@Builder.Default
private final Set<Class<?>> possibleBranchTypes = new HashSet<>();
private ReturnTypeInfo returnTypeInfo;
// Retry configuration
private RetryPolicy retryPolicy;
@Builder.Default
private int invocationLimit = 100;
@Builder.Default
private OnInvocationsLimit onInvocationsLimit = OnInvocationsLimit.ERROR;
/**
* Holds detailed return type information.
*/
public record ReturnTypeInfo(
Type rawType,
Type innerType
) {}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/analyzer/TypeMatcher.java
|
package ai.driftkit.workflow.engine.analyzer;
import lombok.experimental.UtilityClass;
import lombok.extern.slf4j.Slf4j;
import java.util.*;
/**
* Utility class for type matching and compatibility checks.
* Centralizes the logic for determining if types are compatible
* for workflow step connections.
*/
@Slf4j
@UtilityClass
public class TypeMatcher {
private static final Map<Class<?>, Class<?>> PRIMITIVE_TO_WRAPPER = Map.of(
boolean.class, Boolean.class,
byte.class, Byte.class,
char.class, Character.class,
short.class, Short.class,
int.class, Integer.class,
long.class, Long.class,
float.class, Float.class,
double.class, Double.class
);
/**
* Checks if a source type is compatible with a target type.
* This handles inheritance, primitive/wrapper compatibility, and null cases.
*
* @param sourceType The type being provided
* @param targetType The type expected
* @return true if the types are compatible
*/
public static boolean isTypeCompatible(Class<?> sourceType, Class<?> targetType) {
// Target accepts any type
if (targetType == null || targetType == Object.class) {
return true;
}
// Source type unknown
if (sourceType == null) {
return false;
}
// Check direct assignment compatibility
if (targetType.isAssignableFrom(sourceType)) {
return true;
}
// Check primitive type compatibility
if (targetType.isPrimitive() || sourceType.isPrimitive()) {
return isCompatiblePrimitive(sourceType, targetType);
}
return false;
}
/**
* Checks compatibility between primitive types and their wrappers.
*/
private static boolean isCompatiblePrimitive(Class<?> source, Class<?> target) {
// Get wrapper classes for primitives
Class<?> sourceWrapper = PRIMITIVE_TO_WRAPPER.getOrDefault(source, source);
Class<?> targetWrapper = PRIMITIVE_TO_WRAPPER.getOrDefault(target, target);
return targetWrapper.isAssignableFrom(sourceWrapper);
}
/**
* Finds the most specific common superclass of a set of classes.
* Used for determining workflow output type when multiple Finish types exist.
*
* @param classes Set of classes to find common superclass for
* @return The most specific common superclass
*/
public static Class<?> findCommonSuperclass(Set<Class<?>> classes) {
if (classes.isEmpty()) {
return Object.class;
}
Iterator<Class<?>> iter = classes.iterator();
Class<?> common = iter.next();
while (iter.hasNext()) {
Class<?> next = iter.next();
common = findCommonSuperclass(common, next);
}
return common;
}
/**
* Finds the most specific common superclass of two classes.
*/
private static Class<?> findCommonSuperclass(Class<?> a, Class<?> b) {
if (a.isAssignableFrom(b)) {
return a;
}
if (b.isAssignableFrom(a)) {
return b;
}
// Walk up the hierarchy
Class<?> parent = a.getSuperclass();
while (parent != null && !parent.isAssignableFrom(b)) {
parent = parent.getSuperclass();
}
return parent != null ? parent : Object.class;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/analyzer/TypeUtils.java
|
package ai.driftkit.workflow.engine.analyzer;
import ai.driftkit.common.domain.chat.ChatRequest;
import ai.driftkit.workflow.engine.core.StepResult;
import ai.driftkit.workflow.engine.graph.StepNode;
import ai.driftkit.workflow.engine.graph.WorkflowGraph;
import ai.driftkit.workflow.engine.schema.SchemaUtils;
import lombok.extern.slf4j.Slf4j;
import java.lang.reflect.Type;
import java.util.HashMap;
import java.util.Map;
/**
* Utility class for type analysis and conversion operations.
* Contains static methods for type checking and ChatRequest conversions.
*/
@Slf4j
public final class TypeUtils {
private TypeUtils() {
// Utility class
}
/**
* Extracts the result type from a StepResult generic type.
* Delegates to MethodAnalyzer for the actual extraction.
*
* @param type The generic type to extract from
* @return The extracted type class
*/
public static Class<?> extractStepResultType(Type type) {
return MethodAnalyzer.extractStepResultType(type);
}
/**
* Checks if a source type is compatible with a target type.
* Delegates to TypeMatcher for the actual compatibility check.
*
* @param sourceType The source type (output from previous step)
* @param targetType The target type (input of next step)
* @return true if types are compatible
*/
public static boolean isTypeCompatible(Class<?> sourceType, Class<?> targetType) {
return TypeMatcher.isTypeCompatible(sourceType, targetType);
}
/**
* Checks if a type represents StepResult.Finish.
*
* @param rawType The raw type to check
* @return true if the type is StepResult.Finish
*/
public static boolean isFinishType(Type rawType) {
if (rawType instanceof Class<?> clazz) {
return StepResult.Finish.class.isAssignableFrom(clazz);
}
return false;
}
/**
* Converts input to the appropriate type for workflow execution.
* Handles ChatRequest conversion based on schema name or workflow's expected input type.
*
* @param input The input to convert
* @param graph The workflow graph
* @param workflowId The workflow ID for logging
* @param <T> The expected type
* @return The converted input or the original if no conversion needed
*/
@SuppressWarnings("unchecked")
public static <T> T convertInputForWorkflow(T input, WorkflowGraph<?, ?> graph, String workflowId) {
if (!(input instanceof ChatRequest)) {
return input;
}
ChatRequest chatRequest = (ChatRequest) input;
// Try schema-based conversion first
String schemaName = chatRequest.getRequestSchemaName();
if (schemaName != null) {
Class<?> schemaClass = SchemaUtils.getSchemaClass(schemaName);
if (schemaClass != null) {
Object converted = convertChatRequestToClass(chatRequest, schemaClass);
if (converted != null) {
log.debug("Converted ChatRequest to {} using schema name: {}",
schemaClass.getSimpleName(), schemaName);
return (T) converted;
}
}
log.warn("Schema not found in registry: {}, using ChatRequest as-is", schemaName);
return input;
}
// No schema name, try workflow's expected input type
StepNode initialStep = graph.nodes().get(graph.initialStepId());
if (initialStep == null || initialStep.executor() == null) {
return input;
}
Class<?> expectedInputType = initialStep.executor().getInputType();
if (expectedInputType == null || expectedInputType == void.class ||
ChatRequest.class.isAssignableFrom(expectedInputType)) {
return input;
}
Object converted = convertChatRequestToClass(chatRequest, expectedInputType);
if (converted != null) {
log.debug("Converted ChatRequest to {} for workflow {}",
expectedInputType.getSimpleName(), workflowId);
return (T) converted;
}
return input;
}
/**
* Resolves the expected input type from a ChatRequest.
* Used for resume operations where we need to determine the actual type.
*
* @param chatRequest The chat request
* @param defaultType The default type if no schema is found
* @return The resolved input type class
*/
public static Class<?> resolveInputType(ChatRequest chatRequest, Class<?> defaultType) {
String schemaName = chatRequest.getRequestSchemaName();
if (schemaName != null) {
// Find the actual input class by schema name
Class<?> actualInputClass = SchemaUtils.getSchemaClass(schemaName);
if (actualInputClass != null) {
log.debug("ChatRequest with schema {}, resolved to class {}",
schemaName, actualInputClass.getName());
return actualInputClass;
}
}
return defaultType;
}
/**
* Converts ChatRequest to a specific class type.
*
* @param chatRequest The chat request to convert
* @param targetClass The target class to convert to
* @return The converted object or null if conversion fails
*/
public static Object convertChatRequestToClass(ChatRequest chatRequest, Class<?> targetClass) {
try {
return SchemaUtils.createInstance(targetClass, chatRequest.getPropertiesMap());
} catch (Exception e) {
log.warn("Failed to convert ChatRequest to {}: {}",
targetClass.getSimpleName(), e.getMessage());
return null;
}
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/annotations/AsyncStep.java
|
package ai.driftkit.workflow.engine.annotations;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import java.util.Map;
/**
* Marks a method as an asynchronous workflow step handler.
* Async steps handle asynchronous operations such as long-running tasks,
* external API calls, or background processing.
*
* <p>The async step is identified by its value, which must match the taskId
* used in StepResult.Async returned by a regular step.</p>
*
* <p>Async step methods must return a StepResult type and have signatures like:</p>
* <ul>
* <li>{@code StepResult<?> handleAsyncTask(Map<String, Object> taskArgs, WorkflowContext context, AsyncProgressReporter progress)}</li>
* </ul>
*
* <p>Example usage:</p>
* <pre>{@code
* @Step("process-order")
* public StepResult<OrderTask> processOrder(OrderRequest request) {
* // Prepare async task
* Map<String, Object> taskArgs = Map.of("orderId", request.getOrderId());
* WorkflowEvent immediateEvent = WorkflowEvent.builder()
* .properties(Map.of("status", "PROCESSING"))
* .build();
*
* return new StepResult.Async<>(
* "process-order-async", // taskId matching @AsyncStep value
* taskArgs,
* immediateEvent
* );
* }
*
* @AsyncStep("process-order-async")
* public StepResult<OrderResult> processOrderAsync(Map<String, Object> taskArgs,
* WorkflowContext context,
* AsyncProgressReporter progress) {
* // Handle async processing with progress updates
* String orderId = (String) taskArgs.get("orderId");
*
* progress.updateProgress(10, "Validating order");
* validateOrder(orderId);
*
* progress.updateProgress(50, "Processing payment");
* processPayment(orderId);
*
* progress.updateProgress(90, "Finalizing order");
* OrderResult result = orderService.finalizeOrder(orderId);
*
* return new StepResult.Continue<>(result);
* }
* }</pre>
*/
@Target(ElementType.METHOD)
@Retention(RetentionPolicy.RUNTIME)
public @interface AsyncStep {
/**
* The unique identifier for this async step.
* This must match the taskId used in StepResult.Async.
*
* @return The async step ID
*/
String value();
/**
* Human-readable description of what this async step handles.
*
* @return The step description
*/
String description() default "";
/**
* The expected input class for this async step.
* Used for type validation and routing async results.
*
* @return The input class type
*/
Class<?> inputClass() default Map.class;
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/annotations/InitialStep.java
|
package ai.driftkit.workflow.engine.annotations;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Marks a method as the initial step of a workflow.
* Each workflow must have exactly one method annotated with @InitialStep.
* This method will be the entry point when the workflow execution starts.
*
* <p>The initial step method can have the following signatures:</p>
* <ul>
* <li>{@code StepResult<?> methodName(InputType input)}</li>
* <li>{@code StepResult<?> methodName(InputType input, WorkflowContext context)}</li>
* <li>{@code CompletableFuture<StepResult<?>> methodName(InputType input)} (async)</li>
* </ul>
*
* <p>Example usage:</p>
* <pre>{@code
* @InitialStep
* public StepResult<UserData> validateInput(RegistrationRequest request) {
* // validation logic
* return new Continue<>(userData);
* }
* }</pre>
*/
@Target(ElementType.METHOD)
@Retention(RetentionPolicy.RUNTIME)
public @interface InitialStep {
/**
* Optional description of what this initial step does.
* If not provided, a default description will be generated.
*
* @return The step description
*/
String description() default "";
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/annotations/OnInvocationsLimit.java
|
package ai.driftkit.workflow.engine.annotations;
/**
* Defines the behavior when a step reaches its invocation limit.
* This is used to control what happens when a step has been executed
* too many times within a single workflow execution.
*/
public enum OnInvocationsLimit {
/**
* Throw an error when the invocation limit is reached.
* This will cause the workflow to fail with an exception.
*/
ERROR,
/**
* Stop the workflow gracefully when the limit is reached.
* The workflow will complete with the last successful result.
*/
STOP,
/**
* Continue execution despite reaching the limit.
* The step will continue to execute, but a warning will be logged.
* Use with caution as this may lead to infinite loops.
*/
CONTINUE
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/annotations/RetryPolicy.java
|
package ai.driftkit.workflow.engine.annotations;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Defines retry behavior for workflow steps.
* This annotation can be used within the @Step annotation to configure
* how failed step executions should be retried.
*
* <p>Example usage:</p>
* <pre>{@code
* @Step(
* retryPolicy = @RetryPolicy(
* maxAttempts = 3,
* delay = 1000,
* backoffMultiplier = 2.0,
* maxDelay = 10000
* )
* )
* public StepResult<Result> processStep(Input input) {
* // Step logic that might fail
* }
* }</pre>
*/
@Target({ElementType.ANNOTATION_TYPE})
@Retention(RetentionPolicy.RUNTIME)
public @interface RetryPolicy {
/**
* Maximum number of retry attempts.
* Default is 3 attempts (initial + 2 retries).
* Set to 1 to disable retries.
*
* @return Maximum retry attempts
*/
int maxAttempts() default 3;
/**
* Initial delay between retries in milliseconds.
* Default is 1000ms (1 second).
*
* @return Initial delay in milliseconds
*/
long delay() default 1000;
/**
* Multiplier for exponential backoff.
* Each retry delay will be multiplied by this factor.
* Default is 1.0 (no backoff, constant delay).
* Set to 2.0 for exponential backoff (1s, 2s, 4s, 8s...).
*
* @return Backoff multiplier
*/
double backoffMultiplier() default 1.0;
/**
* Maximum delay between retries in milliseconds.
* Prevents exponential backoff from growing indefinitely.
* Default is 60000ms (1 minute).
*
* @return Maximum delay in milliseconds
*/
long maxDelay() default 60000;
/**
* Jitter factor for randomizing retry delays.
* Value between 0.0 and 1.0, where 0.0 means no jitter
* and 1.0 means up to 100% randomization.
* Default is 0.1 (10% jitter).
*
* @return Jitter factor
*/
double jitterFactor() default 0.1;
/**
* Exception types that should trigger a retry.
* If empty, all exceptions will trigger retry.
*
* @return Array of retryable exception types
*/
Class<? extends Throwable>[] retryOn() default {};
/**
* Exception types that should abort retry attempts.
* These exceptions will not trigger retries even if they
* are subclasses of exceptions in retryOn.
*
* @return Array of non-retryable exception types
*/
Class<? extends Throwable>[] abortOn() default {};
/**
* Whether to retry on StepResult.Fail results.
* Default is false (only exceptions trigger retry).
*
* @return True to retry on fail results
*/
boolean retryOnFailResult() default false;
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/annotations/Step.java
|
package ai.driftkit.workflow.engine.annotations;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Marks a method as a workflow step.
* Methods annotated with @Step will be included in the workflow graph
* and can be targets of transitions from other steps.
*
* <p>The step ID is determined in the following order:</p>
* <ol>
* <li>Explicit value provided in the annotation</li>
* <li>Explicit id provided in the annotation</li>
* <li>Method name (if both value and id are empty)</li>
* </ol>
*
* <p>Step methods must return a StepResult type and can have various signatures:</p>
* <ul>
* <li>{@code StepResult<?> methodName(InputType input)}</li>
* <li>{@code StepResult<?> methodName(InputType input, WorkflowContext context)}</li>
* <li>{@code StepResult<?> methodName(WorkflowContext context)} (for steps that only need context)</li>
* </ul>
*
* <p>Example usage:</p>
* <pre>{@code
* @Step // ID will be "processPayment"
* public StepResult<Receipt> processPayment(PaymentDetails details) {
* // process payment
* return new Continue<>(receipt);
* }
*
* @Step(value = "notify-user", requiresUserInput = true)
* public StepResult<Void> sendNotification(Receipt receipt, WorkflowContext context) {
* String userId = context.getStepResult("validateUser", String.class);
* // send notification
* return new Finish<>(null);
* }
*
* @Step(condition = "#result.success", onTrue = "processSuccess", onFalse = "handleError")
* public StepResult<CheckResult> checkPayment(PaymentDetails details) {
* // check payment
* return new Branch<>(checkResult);
* }
* }</pre>
*/
@Target(ElementType.METHOD)
@Retention(RetentionPolicy.RUNTIME)
public @interface Step {
/**
* Alternative ID field for compatibility.
* If both value and id are specified, value takes precedence.
*
* @return The step ID (optional)
*/
String id() default "";
/**
* Human-readable description of what this step does.
* If not provided, a default description will be generated.
*
* @return The step description
*/
String description() default "";
/**
* Execution order index for this step.
* Steps with lower indices are registered first in the workflow.
*
* @return The execution order index
*/
int index() default 0;
/**
* The expected input class for this step.
* Used for type validation and schema generation.
*
* @return The input class type
*/
Class<?> inputClass() default void.class;
/**
* Possible next step IDs this step can transition to.
* If empty, the workflow engine will determine next steps based on type matching.
*
* @return Array of possible next step IDs
*/
String[] nextSteps() default {};
/**
* The possible input classes for the next steps.
* Used for type-based routing and schema generation.
*
* @return Array of possible next step input classes
*/
Class<?>[] nextClasses() default {};
/**
* Spring Expression Language (SpEL) condition to evaluate for branching.
* The condition has access to the step result and workflow context.
*
* @return The condition expression
*/
String condition() default "";
/**
* Step ID to execute if the condition evaluates to true.
* Only used when condition is specified.
*
* @return The step ID for true branch
*/
String onTrue() default "";
/**
* Step ID to execute if the condition evaluates to false.
* Only used when condition is specified.
*
* @return The step ID for false branch
*/
String onFalse() default "";
/**
* Timeout in milliseconds for async step execution.
* Only applicable when async is true. -1 means no timeout.
*
* @return The timeout in milliseconds
*/
long timeoutMs() default -1;
/**
* Retry policy for this step.
* Defines how the step should be retried on failure.
*
* @return The retry policy configuration
*/
RetryPolicy retryPolicy() default @RetryPolicy();
/**
* Maximum number of times this step can be invoked within a single workflow execution.
* This helps prevent infinite loops. Default is 100.
* Set to -1 for unlimited invocations.
*
* @return The maximum invocation count
*/
int invocationLimit() default 100;
/**
* Behavior when the invocation limit is reached.
* Default is ERROR which will fail the workflow.
*
* @return The behavior on reaching invocation limit
*/
OnInvocationsLimit onInvocationsLimit() default OnInvocationsLimit.ERROR;
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/annotations/Workflow.java
|
package ai.driftkit.workflow.engine.annotations;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Marks a class as a workflow definition.
* Classes annotated with @Workflow will be automatically discovered and registered
* by the workflow engine during application startup.
*
* <p>Example usage:</p>
* <pre>{@code
* @Workflow(id = "user-onboarding", version = "2.0")
* public class UserOnboardingWorkflow {
* // workflow implementation
* }
* }</pre>
*/
@Target(ElementType.TYPE)
@Retention(RetentionPolicy.RUNTIME)
public @interface Workflow {
/**
* Unique identifier for this workflow.
* This ID is used to reference the workflow when starting execution.
*
* @return The workflow ID
*/
String id();
/**
* Version of the workflow definition.
* Useful for managing workflow evolution and backward compatibility.
*
* @return The workflow version (default: "1.0")
*/
String version() default "1.0";
/**
* Human-readable description of what this workflow does.
*
* @return The workflow description
*/
String description() default "";
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/async/InMemoryProgressTracker.java
|
package ai.driftkit.workflow.engine.async;
import ai.driftkit.workflow.engine.async.ProgressTracker.Progress.ProgressStatus;
import ai.driftkit.workflow.engine.domain.WorkflowEvent;
import lombok.extern.slf4j.Slf4j;
import java.util.Map;
import java.util.Optional;
import java.util.UUID;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.Supplier;
/**
* In-memory implementation of ProgressTracker for development and testing.
* For production, consider using Redis or database-backed implementation.
*/
@Slf4j
public class InMemoryProgressTracker implements ProgressTracker {
private final Map<String, WorkflowEvent> executions = new ConcurrentHashMap<>();
private final Map<String, Progress> progressMap = new ConcurrentHashMap<>();
@Override
public String generateTaskId() {
return UUID.randomUUID().toString();
}
@Override
public void trackExecution(String taskId, WorkflowEvent event) {
executions.put(taskId, event);
progressMap.put(taskId, Progress.started(taskId));
log.debug("Tracking execution: taskId={}, event={}", taskId, event);
}
@Override
public void updateExecutionStatus(String taskId, WorkflowEvent event) {
executions.put(taskId, event);
log.debug("Updated execution status: taskId={}, event={}", taskId, event);
}
@Override
public void updateProgress(String taskId, int percentComplete, String message) {
progressMap.compute(taskId, (id, existing) -> {
if (existing == null) {
return new Progress(
taskId,
percentComplete,
message,
percentComplete == 100 ? ProgressStatus.COMPLETED : ProgressStatus.IN_PROGRESS,
System.currentTimeMillis(),
null
);
}
return existing.withUpdate(percentComplete, message);
});
// Update the workflow event as well
WorkflowEvent event = executions.get(taskId);
if (event != null) {
event.updateProgress(percentComplete, message);
}
log.debug("Updated progress: taskId={}, percent={}, message={}", taskId, percentComplete, message);
}
@Override
public Optional<WorkflowEvent> getExecution(String taskId) {
return Optional.ofNullable(executions.get(taskId));
}
@Override
public void removeExecution(String taskId) {
executions.remove(taskId);
progressMap.remove(taskId);
log.debug("Removed execution: taskId={}", taskId);
}
@Override
public <T> CompletableFuture<T> executeAsync(
String taskId,
WorkflowEvent initialEvent,
Supplier<T> task) {
trackExecution(taskId, initialEvent);
return CompletableFuture.supplyAsync(() -> {
try {
log.debug("Starting async execution: taskId={}", taskId);
T result = task.get();
onComplete(taskId, result);
return result;
} catch (Exception e) {
log.error("Async execution failed: taskId={}", taskId, e);
onError(taskId, e);
throw new RuntimeException("Async execution failed", e);
}
});
}
@Override
public void onComplete(String taskId, Object result) {
progressMap.compute(taskId, (id, existing) -> {
if (existing == null) {
return new Progress(taskId, 100, "Completed",
Progress.ProgressStatus.COMPLETED, System.currentTimeMillis(), System.currentTimeMillis());
}
return existing.completed();
});
// Update workflow event
WorkflowEvent event = executions.get(taskId);
if (event != null) {
event.setCompleted(true);
event.setPercentComplete(100);
if (result != null) {
event.addProperty("result", result.toString());
}
}
log.debug("Task completed: taskId={}", taskId);
}
@Override
public void onError(String taskId, Throwable error) {
progressMap.compute(taskId, (id, existing) -> {
if (existing == null) {
return new Progress(taskId, 0, error.getMessage(),
Progress.ProgressStatus.FAILED, System.currentTimeMillis(), System.currentTimeMillis());
}
return existing.failed(error.getMessage());
});
// Update workflow event
WorkflowEvent event = executions.get(taskId);
if (event != null) {
event.setError(error.getMessage());
event.setCompleted(true);
}
log.error("Task failed: taskId={}", taskId, error);
}
@Override
public Optional<Progress> getProgress(String taskId) {
return Optional.ofNullable(progressMap.get(taskId));
}
@Override
public boolean isCancelled(String taskId) {
Progress progress = progressMap.get(taskId);
return progress != null && progress.status() == Progress.ProgressStatus.CANCELLED;
}
@Override
public boolean cancelTask(String taskId) {
Progress existing = progressMap.compute(taskId, (id, progress) -> {
if (progress == null ||
progress.status() == Progress.ProgressStatus.COMPLETED ||
progress.status() == Progress.ProgressStatus.FAILED) {
return progress; // Can't cancel if not exists or already finished
}
return new Progress(taskId, progress.percentComplete(), "Cancelled",
Progress.ProgressStatus.CANCELLED, progress.startTime(), System.currentTimeMillis());
});
boolean cancelled = existing != null && existing.status() == Progress.ProgressStatus.CANCELLED;
if (cancelled) {
// Update workflow event
WorkflowEvent event = executions.get(taskId);
if (event != null) {
event.setError("Task cancelled");
event.setCompleted(true);
}
log.info("Task cancelled: taskId={}", taskId);
}
return cancelled;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/async/ProgressTracker.java
|
package ai.driftkit.workflow.engine.async;
import ai.driftkit.workflow.engine.domain.WorkflowEvent;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.function.Supplier;
/**
* Service interface for tracking asynchronous workflow execution and progress.
* Ported from AsyncResponseTracker in driftkit-chat-assistant-framework
*/
public interface ProgressTracker {
/**
* Generate a unique response/task ID
*
* @return A unique ID
*/
String generateTaskId();
/**
* Track a workflow execution for asynchronous processing
*
* @param taskId The task ID
* @param event The workflow event to track
*/
void trackExecution(String taskId, WorkflowEvent event);
/**
* Update the status of a tracked execution
*
* @param taskId The task ID
* @param event The updated event
*/
void updateExecutionStatus(String taskId, WorkflowEvent event);
/**
* Update progress for a specific task
*
* @param taskId The task ID
* @param percentComplete Progress percentage (0-100)
* @param message Progress message
*/
void updateProgress(String taskId, int percentComplete, String message);
/**
* Get a tracked execution by ID
*
* @param taskId The task ID
* @return The workflow event if found
*/
Optional<WorkflowEvent> getExecution(String taskId);
/**
* Remove a tracked execution
*
* @param taskId The task ID to remove
*/
void removeExecution(String taskId);
/**
* Execute a task asynchronously and track its progress
*
* @param <T> The type of result
* @param taskId The task ID for tracking
* @param initialEvent The initial event to return immediately
* @param task The async task to execute
* @return A CompletableFuture that completes when the task is done
*/
<T> CompletableFuture<T> executeAsync(
String taskId,
WorkflowEvent initialEvent,
Supplier<T> task);
/**
* Mark a task as completed
*
* @param taskId The task ID
* @param result The completion result
*/
void onComplete(String taskId, Object result);
/**
* Mark a task as failed
*
* @param taskId The task ID
* @param error The error that occurred
*/
void onError(String taskId, Throwable error);
/**
* Get the current progress for a task
*
* @param taskId The task ID
* @return Progress information
*/
Optional<Progress> getProgress(String taskId);
/**
* Check if a task has been cancelled
*
* @param taskId The task ID
* @return true if the task has been cancelled
*/
boolean isCancelled(String taskId);
/**
* Cancel a task
*
* @param taskId The task ID to cancel
* @return true if the task was successfully cancelled
*/
boolean cancelTask(String taskId);
/**
* Creates a progress reporter for a specific task.
* This replaces the separate AsyncProgressReporter interface.
*
* @param taskId The task ID
* @return Progress reporter for the task
*/
default TaskProgressReporter createReporter(String taskId) {
return new TaskProgressReporter() {
@Override
public void updateProgress(int percentComplete, String message) {
ProgressTracker.this.updateProgress(taskId, percentComplete, message);
}
@Override
public void updateProgress(int percentComplete) {
ProgressTracker.this.updateProgress(taskId, percentComplete,
"Processing... " + percentComplete + "%");
}
@Override
public void updateMessage(String message) {
Optional<Progress> current = ProgressTracker.this.getProgress(taskId);
int currentPercent = current.map(Progress::percentComplete).orElse(0);
ProgressTracker.this.updateProgress(taskId, currentPercent, message);
}
@Override
public boolean isCancelled() {
return ProgressTracker.this.isCancelled(taskId);
}
};
}
/**
* Progress information for a task
*/
record Progress(
String taskId,
int percentComplete,
String message,
ProgressStatus status,
long startTime,
Long endTime
) {
public enum ProgressStatus {
PENDING,
IN_PROGRESS,
COMPLETED,
FAILED,
CANCELLED
}
public static Progress started(String taskId) {
return new Progress(taskId, 0, "Started", ProgressStatus.IN_PROGRESS, System.currentTimeMillis(), null);
}
public Progress withUpdate(int percentComplete, String message) {
return new Progress(taskId, percentComplete, message, status, startTime, endTime);
}
public Progress completed() {
return new Progress(taskId, 100, "Completed", ProgressStatus.COMPLETED, startTime, System.currentTimeMillis());
}
public Progress failed(String error) {
return new Progress(taskId, percentComplete, error, ProgressStatus.FAILED, startTime, System.currentTimeMillis());
}
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/async/TaskProgressReporter.java
|
package ai.driftkit.workflow.engine.async;
/**
* Unified interface for reporting task progress.
* This combines the functionality of the former AsyncProgressReporter.
*/
public interface TaskProgressReporter {
/**
* Updates the progress of the current operation.
*
* @param percentComplete Progress percentage (0-100)
* @param message Status message describing current operation
*/
void updateProgress(int percentComplete, String message);
/**
* Updates just the progress percentage.
*
* @param percentComplete Progress percentage (0-100)
*/
void updateProgress(int percentComplete);
/**
* Updates just the status message.
*
* @param message Status message
*/
void updateMessage(String message);
/**
* Checks if the operation has been cancelled.
*
* @return true if the operation has been cancelled
*/
boolean isCancelled();
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/builder/FluentApiAsyncStepMetadata.java
|
package ai.driftkit.workflow.engine.builder;
import ai.driftkit.workflow.engine.annotations.AsyncStep;
import ai.driftkit.workflow.engine.async.TaskProgressReporter;
import ai.driftkit.workflow.engine.core.StepResult;
import ai.driftkit.workflow.engine.core.WorkflowAnalyzer.AsyncStepMetadata;
import ai.driftkit.workflow.engine.core.WorkflowContext;
import java.lang.reflect.Method;
import java.util.Map;
/**
* Custom AsyncStepMetadata for FluentAPI that stores the TriFunction handler directly.
* This allows async handlers to be registered without a workflowInstance.
*/
public class FluentApiAsyncStepMetadata extends AsyncStepMetadata {
private final WorkflowBuilder.TriFunction<Map<String, Object>, WorkflowContext, TaskProgressReporter, StepResult<?>> handler;
public FluentApiAsyncStepMetadata(Method method, AsyncStep annotation,
WorkflowBuilder.TriFunction<Map<String, Object>, WorkflowContext, TaskProgressReporter, StepResult<?>> handler) {
super(method, null, annotation); // null instance - we don't need it
this.handler = handler;
}
/**
* Gets the handler function that will process the async task.
*/
public WorkflowBuilder.TriFunction<Map<String, Object>, WorkflowContext, TaskProgressReporter, StepResult<?>> getHandler() {
return handler;
}
/**
* Invokes the handler directly without needing an instance.
*/
public StepResult<?> invoke(Map<String, Object> taskArgs, WorkflowContext context, TaskProgressReporter progress) {
return handler.apply(taskArgs, context, progress);
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/builder/InternalRoutingMarker.java
|
package ai.driftkit.workflow.engine.builder;
/**
* Marker interface for internal routing objects used by fluent API workflows.
* Objects implementing this interface are used for routing decisions only
* and should not be passed as input data to subsequent workflow steps.
*/
public interface InternalRoutingMarker {
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/builder/MethodReferenceUtils.java
|
package ai.driftkit.workflow.engine.builder;
import lombok.extern.slf4j.Slf4j;
import java.lang.reflect.Field;
import java.util.function.Function;
/**
* Utilities for working with method references.
*/
@Slf4j
public class MethodReferenceUtils {
/**
* Attempts to extract the target instance from a method reference.
* This uses reflection to access the captured instance.
*
* @param function The function that might be a method reference
* @return The target instance or null if not a method reference or extraction failed
*/
public static Object extractTargetInstance(Object function) {
if (function == null) {
return null;
}
Class<?> clazz = function.getClass();
// Method references typically have synthetic classes with names containing $$Lambda$
if (!clazz.isSynthetic() || !clazz.getName().contains("$$Lambda$")) {
return null;
}
try {
// Method references usually capture the target instance in a field named "arg$1"
// This is implementation-specific but works for OpenJDK/Oracle JDK
Field[] fields = clazz.getDeclaredFields();
for (Field field : fields) {
if (field.getName().startsWith("arg$")) {
field.setAccessible(true);
Object target = field.get(function);
if (target != null && !isPrimitive(target)) {
log.debug("Extracted target instance {} from method reference", target.getClass().getName());
return target;
}
}
}
} catch (Exception e) {
log.debug("Failed to extract target from method reference", e);
}
return null;
}
private static boolean isPrimitive(Object obj) {
return obj instanceof Number || obj instanceof String || obj instanceof Boolean ||
obj instanceof Character || obj.getClass().isPrimitive();
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/builder/OnBuilder.java
|
package ai.driftkit.workflow.engine.builder;
import ai.driftkit.workflow.engine.core.WorkflowContext;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.function.Consumer;
import java.util.function.Function;
/**
* Builder for multi-way branching (on/is/otherwise pattern).
*/
public class OnBuilder<T, R, V> {
private final WorkflowBuilder<T, R> parentBuilder;
private final Function<WorkflowContext, V> selector;
private final Map<V, Consumer<WorkflowBuilder<?, ?>>> cases = new LinkedHashMap<>();
private Consumer<WorkflowBuilder<?, ?>> otherwiseCase;
OnBuilder(WorkflowBuilder<T, R> parentBuilder, Function<WorkflowContext, V> selector) {
this.parentBuilder = parentBuilder;
this.selector = selector;
}
public OnBuilder<T, R, V> is(V value, Consumer<WorkflowBuilder<?, ?>> caseFlow) {
if (value == null) {
throw new IllegalArgumentException("Case value cannot be null");
}
if (caseFlow == null) {
throw new IllegalArgumentException("Case flow cannot be null");
}
if (cases.containsKey(value)) {
throw new IllegalStateException("Duplicate case value: " + value);
}
cases.put(value, caseFlow);
return this;
}
public WorkflowBuilder<T, R> otherwise(Consumer<WorkflowBuilder<?, ?>> otherwiseFlow) {
if (otherwiseFlow == null) {
throw new IllegalArgumentException("Otherwise flow cannot be null");
}
if (cases.isEmpty()) {
throw new IllegalStateException("Must define at least one 'is' case before 'otherwise'");
}
this.otherwiseCase = otherwiseFlow;
// Add multi-branch step to parent builder
parentBuilder.addBuildStep(new WorkflowBuilder.MultiBranchStep<>(selector, cases, otherwiseCase));
return parentBuilder;
}
Map<V, Consumer<WorkflowBuilder<?, ?>>> getCases() {
return cases;
}
Consumer<WorkflowBuilder<?, ?>> getOtherwiseCase() {
return otherwiseCase;
}
Function<WorkflowContext, V> getSelector() {
return selector;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/builder/RetryPolicyBuilder.java
|
package ai.driftkit.workflow.engine.builder;
import ai.driftkit.workflow.engine.annotations.RetryPolicy;
import java.lang.annotation.Annotation;
/**
* Builder for creating RetryPolicy instances programmatically.
* This is useful when configuring retry behavior in fluent API without annotations.
*/
public class RetryPolicyBuilder {
private int maxAttempts = 3;
private long delay = 1000;
private double backoffMultiplier = 1.0;
private long maxDelay = 60000;
private double jitterFactor = 0.1;
private Class<? extends Throwable>[] retryOn = new Class[0];
private Class<? extends Throwable>[] abortOn = new Class[0];
private boolean retryOnFailResult = false;
public static RetryPolicyBuilder retry() {
return new RetryPolicyBuilder();
}
public RetryPolicyBuilder withMaxAttempts(int maxAttempts) {
this.maxAttempts = maxAttempts;
return this;
}
public RetryPolicyBuilder withDelay(long delay) {
this.delay = delay;
return this;
}
public RetryPolicyBuilder withBackoffMultiplier(double backoffMultiplier) {
this.backoffMultiplier = backoffMultiplier;
return this;
}
public RetryPolicyBuilder withMaxDelay(long maxDelay) {
this.maxDelay = maxDelay;
return this;
}
public RetryPolicyBuilder withJitterFactor(double jitterFactor) {
this.jitterFactor = jitterFactor;
return this;
}
@SafeVarargs
public final RetryPolicyBuilder withRetryOn(Class<? extends Throwable>... retryOn) {
this.retryOn = retryOn;
return this;
}
@SafeVarargs
public final RetryPolicyBuilder withAbortOn(Class<? extends Throwable>... abortOn) {
this.abortOn = abortOn;
return this;
}
public RetryPolicyBuilder withRetryOnFailResult(boolean retryOnFailResult) {
this.retryOnFailResult = retryOnFailResult;
return this;
}
/**
* Convenience method for exponential backoff with default settings.
*/
public RetryPolicyBuilder exponentialBackoff() {
return this.withBackoffMultiplier(2.0).withMaxDelay(30000);
}
/**
* Convenience method for linear backoff (constant delay).
*/
public RetryPolicyBuilder linearBackoff() {
return this.withBackoffMultiplier(1.0);
}
/**
* Builds the RetryPolicy instance.
*/
public RetryPolicy build() {
return new RetryPolicy() {
@Override
public Class<? extends Annotation> annotationType() {
return RetryPolicy.class;
}
@Override
public int maxAttempts() {
return maxAttempts;
}
@Override
public long delay() {
return delay;
}
@Override
public double backoffMultiplier() {
return backoffMultiplier;
}
@Override
public long maxDelay() {
return maxDelay;
}
@Override
public double jitterFactor() {
return jitterFactor;
}
@Override
public Class<? extends Throwable>[] retryOn() {
return retryOn;
}
@Override
public Class<? extends Throwable>[] abortOn() {
return abortOn;
}
@Override
public boolean retryOnFailResult() {
return retryOnFailResult;
}
};
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/builder/StepDefinition.java
|
package ai.driftkit.workflow.engine.builder;
import ai.driftkit.workflow.engine.annotations.OnInvocationsLimit;
import ai.driftkit.workflow.engine.annotations.RetryPolicy;
import ai.driftkit.workflow.engine.core.StepResult;
import ai.driftkit.workflow.engine.core.WorkflowContext;
import ai.driftkit.workflow.engine.utils.ReflectionUtils;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import java.io.Serializable;
import java.util.function.BiFunction;
import java.util.function.Function;
/**
* Definition of a workflow step for the Fluent API.
* Automatically derives step ID from method reference when possible.
*/
@Slf4j
@Getter
public class StepDefinition {
private final String id;
private final StepExecutor executor;
private final String description;
private final Class<?> inputType;
private final Class<?> outputType;
private final RetryPolicy retryPolicy;
private final int invocationLimit;
private final OnInvocationsLimit onInvocationsLimit;
private StepDefinition(String id, StepExecutor executor, String description,
Class<?> inputType, Class<?> outputType,
RetryPolicy retryPolicy, int invocationLimit,
OnInvocationsLimit onInvocationsLimit) {
if (id == null || id.isBlank()) {
throw new IllegalArgumentException("Step ID cannot be null or empty");
}
if (executor == null) {
throw new IllegalArgumentException("Step executor cannot be null");
}
this.id = id;
this.executor = executor;
this.description = description != null ? description : "Step: " + id;
this.inputType = inputType != null ? inputType : Object.class;
this.outputType = outputType != null ? outputType : Object.class;
this.retryPolicy = retryPolicy;
this.invocationLimit = invocationLimit > 0 ? invocationLimit : 100;
this.onInvocationsLimit = onInvocationsLimit != null ? onInvocationsLimit : OnInvocationsLimit.ERROR;
}
/**
* Creates a step definition from a method reference that takes input only.
* The step ID is automatically derived from the method name.
*
* @param stepFunction Method reference like paymentSteps::validateOrder
*/
public static <I, O> StepDefinition of(SerializableFunction<I, StepResult<O>> stepFunction) {
ReflectionUtils.MethodInfo methodInfo = ReflectionUtils.extractMethodInfo(stepFunction);
return new StepDefinition(
methodInfo.getMethodName(),
(input, context) -> {
try {
return stepFunction.apply((I) input);
} catch (ClassCastException e) {
log.error("Type mismatch in step '{}': expected {}, received {}",
methodInfo.getMethodName(),
methodInfo.getInputType(),
input != null ? input.getClass() : "null");
throw new IllegalArgumentException(
"Input type mismatch for step '" + methodInfo.getMethodName() +
"'. Expected: " + methodInfo.getInputType() +
", but received: " + (input != null ? input.getClass() : "null"), e);
}
},
null,
methodInfo.getInputType(),
methodInfo.getOutputType(),
null, 100, OnInvocationsLimit.ERROR
);
}
/**
* Creates a step definition from a method reference that takes input and context.
* The step ID is automatically derived from the method name.
*
* @param stepFunction Method reference like paymentSteps::processPayment
*/
public static <I, O> StepDefinition of(SerializableBiFunction<I, WorkflowContext, StepResult<O>> stepFunction) {
ReflectionUtils.MethodInfo methodInfo = ReflectionUtils.extractMethodInfo(stepFunction);
return new StepDefinition(
methodInfo.getMethodName(),
(input, context) -> {
try {
return stepFunction.apply((I) input, context);
} catch (ClassCastException e) {
log.error("Type mismatch in step '{}': expected {}, received {}",
methodInfo.getMethodName(),
methodInfo.getInputType(),
input != null ? input.getClass() : "null");
throw new IllegalArgumentException(
"Input type mismatch for step '" + methodInfo.getMethodName() +
"'. Expected: " + methodInfo.getInputType() +
", but received: " + (input != null ? input.getClass() : "null"), e);
}
},
null,
methodInfo.getInputType(),
methodInfo.getOutputType(),
null, 100, OnInvocationsLimit.ERROR
);
}
/**
* Creates a step definition with explicit ID (for lambdas).
* Type information must be provided via withTypes() method.
*
* @param id Explicit step ID
* @param stepFunction Lambda function
*/
public static <I, O> StepDefinition of(String id, Function<I, StepResult<O>> stepFunction) {
if (id == null || id.isBlank()) {
throw new IllegalArgumentException("Step ID cannot be null or empty for lambda functions");
}
return new StepDefinition(
id,
(input, context) -> {
try {
return stepFunction.apply((I) input);
} catch (ClassCastException e) {
throw new IllegalArgumentException(
"Input type mismatch for step '" + id + "'", e);
}
},
null,
null, // User must specify via withTypes()
null,
null, 100, OnInvocationsLimit.ERROR
);
}
/**
* Creates a step definition with explicit ID and context (for lambdas).
* Type information must be provided via withTypes() method.
*
* @param id Explicit step ID
* @param stepFunction Lambda function
*/
public static <I, O> StepDefinition of(String id, BiFunction<I, WorkflowContext, StepResult<O>> stepFunction) {
if (id == null || id.isBlank()) {
throw new IllegalArgumentException("Step ID cannot be null or empty for lambda functions");
}
return new StepDefinition(
id,
(input, context) -> {
try {
return stepFunction.apply((I) input, context);
} catch (ClassCastException e) {
throw new IllegalArgumentException(
"Input type mismatch for step '" + id + "'", e);
}
},
null,
null, // User must specify via withTypes()
null,
null, 100, OnInvocationsLimit.ERROR
);
}
/**
* Creates a step definition that requires only context (no input).
*
* @param stepFunction Function that takes only WorkflowContext
*/
public static <O> StepDefinition ofContextOnly(SerializableFunction<WorkflowContext, StepResult<O>> stepFunction) {
ReflectionUtils.MethodInfo methodInfo = ReflectionUtils.extractMethodInfo(stepFunction);
return new StepDefinition(
methodInfo.getMethodName(),
(input, context) -> stepFunction.apply(context),
null,
Void.class,
methodInfo.getOutputType(),
null, 100, OnInvocationsLimit.ERROR
);
}
/**
* Creates a step definition that requires only context with explicit ID.
* Type information for output must be provided via withTypes() method.
*/
public static <O> StepDefinition ofContextOnly(String id, Function<WorkflowContext, StepResult<O>> stepFunction) {
if (id == null || id.isBlank()) {
throw new IllegalArgumentException("Step ID cannot be null or empty");
}
return new StepDefinition(
id,
(input, context) -> stepFunction.apply(context),
null,
Void.class,
null, // User must specify via withTypes()
null, 100, OnInvocationsLimit.ERROR
);
}
/**
* Sets a custom description for this step.
*/
public StepDefinition withDescription(String description) {
return new StepDefinition(this.id, this.executor, description, this.inputType, this.outputType,
this.retryPolicy, this.invocationLimit, this.onInvocationsLimit);
}
/**
* Sets explicit type information for this step.
* This is required for lambda-based steps where type information cannot be extracted.
*/
public StepDefinition withTypes(Class<?> inputType, Class<?> outputType) {
return new StepDefinition(this.id, this.executor, this.description, inputType, outputType,
this.retryPolicy, this.invocationLimit, this.onInvocationsLimit);
}
/**
* Sets only the input type, keeping the output type as-is.
*/
public StepDefinition withInputType(Class<?> inputType) {
return new StepDefinition(this.id, this.executor, this.description, inputType, this.outputType,
this.retryPolicy, this.invocationLimit, this.onInvocationsLimit);
}
/**
* Sets only the output type, keeping the input type as-is.
*/
public StepDefinition withOutputType(Class<?> outputType) {
return new StepDefinition(this.id, this.executor, this.description, this.inputType, outputType,
this.retryPolicy, this.invocationLimit, this.onInvocationsLimit);
}
/**
* Sets the retry policy for this step.
*
* @param retryPolicy The retry policy to apply
* @return A new StepDefinition with the retry policy set
*/
public StepDefinition withRetryPolicy(RetryPolicy retryPolicy) {
return new StepDefinition(this.id, this.executor, this.description, this.inputType, this.outputType,
retryPolicy, this.invocationLimit, this.onInvocationsLimit);
}
/**
* Sets the invocation limit for this step.
*
* @param invocationLimit Maximum number of times this step can be invoked
* @return A new StepDefinition with the invocation limit set
*/
public StepDefinition withInvocationLimit(int invocationLimit) {
return new StepDefinition(this.id, this.executor, this.description, this.inputType, this.outputType,
this.retryPolicy, invocationLimit, this.onInvocationsLimit);
}
/**
* Sets the behavior when invocation limit is reached.
*
* @param onInvocationsLimit The behavior when limit is reached
* @return A new StepDefinition with the behavior set
*/
public StepDefinition withOnInvocationsLimit(OnInvocationsLimit onInvocationsLimit) {
return new StepDefinition(this.id, this.executor, this.description, this.inputType, this.outputType,
this.retryPolicy, this.invocationLimit, onInvocationsLimit);
}
/**
* Convenience method to set both invocation limit and behavior.
*
* @param invocationLimit Maximum number of times this step can be invoked
* @param onInvocationsLimit The behavior when limit is reached
* @return A new StepDefinition with both settings
*/
public StepDefinition withInvocationControl(int invocationLimit, OnInvocationsLimit onInvocationsLimit) {
return new StepDefinition(this.id, this.executor, this.description, this.inputType, this.outputType,
this.retryPolicy, invocationLimit, onInvocationsLimit);
}
/**
* Executor interface for step logic.
*/
@FunctionalInterface
public interface StepExecutor {
StepResult<?> execute(Object input, WorkflowContext context) throws Exception;
}
/**
* Serializable version of Function for method reference extraction.
*/
@FunctionalInterface
public interface SerializableFunction<T, R> extends Function<T, R>, Serializable {}
/**
* Serializable version of BiFunction for method reference extraction.
*/
@FunctionalInterface
public interface SerializableBiFunction<T, U, R> extends BiFunction<T, U, R>, Serializable {}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/builder/TryBuilder.java
|
package ai.driftkit.workflow.engine.builder;
import ai.driftkit.workflow.engine.core.StepResult;
import ai.driftkit.workflow.engine.core.WorkflowContext;
import java.util.LinkedHashMap;
import java.util.Map;
/**
* Builder for try-catch-finally pattern.
*/
public class TryBuilder<T, R> {
private final WorkflowBuilder<T, R> parentBuilder;
private final StepDefinition tryStep;
private final Map<Class<? extends Throwable>, ErrorHandler> errorHandlers = new LinkedHashMap<>();
private Runnable finallyBlock;
TryBuilder(WorkflowBuilder<T, R> parentBuilder, StepDefinition tryStep) {
if (parentBuilder == null) {
throw new IllegalArgumentException("Parent builder cannot be null");
}
if (tryStep == null) {
throw new IllegalArgumentException("Try step cannot be null");
}
this.parentBuilder = parentBuilder;
this.tryStep = tryStep;
}
public TryBuilder<T, R> catchError(Class<? extends Throwable> errorType, ErrorHandler handler) {
if (errorType == null) {
throw new IllegalArgumentException("Error type cannot be null");
}
if (handler == null) {
throw new IllegalArgumentException("Error handler cannot be null");
}
if (errorHandlers.containsKey(errorType)) {
throw new IllegalStateException("Duplicate error handler for type: " + errorType.getName());
}
errorHandlers.put(errorType, handler);
return this;
}
public WorkflowBuilder<T, R> finallyDo(Runnable cleanup) {
if (cleanup == null) {
throw new IllegalArgumentException("Finally block cannot be null");
}
this.finallyBlock = cleanup;
// Add try-catch step to parent builder
parentBuilder.addBuildStep(new WorkflowBuilder.TryCatchStep(tryStep, errorHandlers, finallyBlock));
return parentBuilder;
}
// Allow building without finally block
public WorkflowBuilder<T, R> endTry() {
parentBuilder.addBuildStep(new WorkflowBuilder.TryCatchStep(tryStep, errorHandlers, null));
return parentBuilder;
}
/**
* Interface for error handlers.
*/
@FunctionalInterface
public interface ErrorHandler {
StepResult<?> handle(Throwable error, WorkflowContext context);
}
StepDefinition getTryStep() {
return tryStep;
}
Map<Class<? extends Throwable>, ErrorHandler> getErrorHandlers() {
return errorHandlers;
}
Runnable getFinallyBlock() {
return finallyBlock;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/builder/WorkflowBuilder.java
|
package ai.driftkit.workflow.engine.builder;
import ai.driftkit.workflow.engine.core.WorkflowContext;
import ai.driftkit.workflow.engine.core.StepResult;
import ai.driftkit.workflow.engine.core.InternalStepListener;
import ai.driftkit.workflow.engine.core.RetryExecutor;
import ai.driftkit.workflow.engine.async.TaskProgressReporter;
import ai.driftkit.workflow.engine.core.WorkflowAnalyzer;
import ai.driftkit.workflow.engine.core.WorkflowAnalyzer.AsyncStepMetadata;
import ai.driftkit.workflow.engine.persistence.WorkflowInstance;
import ai.driftkit.workflow.engine.annotations.AsyncStep;
import ai.driftkit.workflow.engine.annotations.RetryPolicy;
import ai.driftkit.workflow.engine.annotations.OnInvocationsLimit;
import ai.driftkit.workflow.engine.graph.Edge;
import ai.driftkit.workflow.engine.graph.StepNode;
import ai.driftkit.workflow.engine.graph.WorkflowGraph;
import ai.driftkit.workflow.engine.utils.ReflectionUtils;
import ai.driftkit.workflow.engine.utils.BranchStepExecutor;
import ai.driftkit.workflow.engine.schema.SchemaUtils;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import java.lang.reflect.Method;
import java.util.*;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.function.BiFunction;
/**
* Fluent API builder for creating workflow graphs.
* Inspired by mastra.ai's workflow definition approach.
*/
@Slf4j
public class
WorkflowBuilder<T, R> {
private final String id;
private final Class<T> inputType;
private final Class<R> outputType;
private final List<BuildStep> buildSteps = new ArrayList<>();
private String version = "1.0";
private String description;
private StepDefinition lastStepDefinition = null;
private final Set<Object> asyncHandlers = new HashSet<>(); // Collect all async handlers
private final Map<String, AsyncHandlerInfo> registeredAsyncHandlers = new HashMap<>();
private WorkflowBuilder(String id, Class<T> inputType, Class<R> outputType) {
if (StringUtils.isBlank(id)) {
throw new IllegalArgumentException("WorkflowBuilder ID cannot be null or empty");
}
if (inputType == null) {
throw new IllegalArgumentException("Input type cannot be null");
}
if (outputType == null) {
throw new IllegalArgumentException("Output type cannot be null");
}
this.id = id;
this.inputType = inputType;
this.outputType = outputType;
}
/**
* Starts defining a new workflow.
*
* @param id Unique workflow identifier
* @param inputType Type of input data for the workflow
* @param outputType Type of the final result
*/
public static <T, R> WorkflowBuilder<T, R> define(String id, Class<T> inputType, Class<R> outputType) {
return new WorkflowBuilder<>(id, inputType, outputType);
}
/**
* Sets the version of this workflow.
*/
public WorkflowBuilder<T, R> withVersion(String version) {
this.version = version;
return this;
}
/**
* Sets the description of this workflow.
*/
public WorkflowBuilder<T, R> withDescription(String description) {
this.description = description;
return this;
}
/**
* Registers an async handler for processing async tasks.
* The handler will be called when a step returns StepResult.Async with matching taskId pattern.
*
* @param taskIdPattern Pattern to match task IDs (supports wildcards like "*")
* @param asyncHandler Trifunction that processes async tasks
* @return this builder for chaining
*/
public WorkflowBuilder<T, R> withAsyncHandler(String taskIdPattern,
TriFunction<Map<String, Object>, WorkflowContext, TaskProgressReporter, StepResult<?>> asyncHandler) {
if (StringUtils.isBlank(taskIdPattern)) {
throw new IllegalArgumentException("Task ID pattern cannot be null or empty");
}
if (asyncHandler == null) {
throw new IllegalArgumentException("Async handler cannot be null");
}
// Extract method info if possible
String methodName = ReflectionUtils.extractLambdaMethodName(asyncHandler);
AsyncHandlerInfo handlerInfo = new AsyncHandlerInfo(taskIdPattern, asyncHandler, methodName);
registeredAsyncHandlers.put(taskIdPattern, handlerInfo);
log.debug("Registered async handler for pattern '{}' (method: {})", taskIdPattern, methodName);
return this;
}
/**
* Registers an object containing @AsyncStep annotated methods.
* This scans the object for methods with @AsyncStep and registers them automatically.
*
* @param asyncHandlerObject Object containing @AsyncStep methods
* @return this builder for chaining
*/
public WorkflowBuilder<T, R> withAsyncHandler(Object asyncHandlerObject) {
if (asyncHandlerObject == null) {
throw new IllegalArgumentException("Async handler object cannot be null");
}
// Find all @AsyncStep methods in the object
Map<String, AsyncStepMetadata> asyncSteps = WorkflowAnalyzer.findAsyncSteps(asyncHandlerObject);
log.debug("Found {} @AsyncStep methods in {}", asyncSteps.size(), asyncHandlerObject.getClass().getSimpleName());
// Register each async step
for (Map.Entry<String, AsyncStepMetadata> entry : asyncSteps.entrySet()) {
String pattern = entry.getKey();
AsyncStepMetadata metadata = entry.getValue();
// Create a wrapper that calls the method via reflection
TriFunction<Map<String, Object>, WorkflowContext, TaskProgressReporter, StepResult<?>> wrapper =
(taskArgs, context, progress) -> {
try {
Method method = metadata.getMethod();
return (StepResult<?>) method.invoke(asyncHandlerObject, taskArgs, context, progress);
} catch (Exception e) {
log.error("Error invoking async handler method", e);
return StepResult.fail(e);
}
};
AsyncHandlerInfo handlerInfo = new AsyncHandlerInfo(pattern, wrapper, metadata.getMethod().getName());
handlerInfo.setFromAnnotation(true);
handlerInfo.setAnnotation(metadata.getAnnotation());
registeredAsyncHandlers.put(pattern, handlerInfo);
}
log.debug("Registered {} async handlers from object {}", asyncSteps.size(), asyncHandlerObject.getClass().getSimpleName());
asyncHandlers.add(asyncHandlerObject);
return this;
}
/**
* Adds a sequential step to the workflow.
*
* @param stepDef Step definition created via StepDefinition.of()
*/
public WorkflowBuilder<T, R> then(StepDefinition stepDef) {
buildSteps.add(new SequentialStep(stepDef));
lastStepDefinition = stepDef; // Track for type flow
return this;
}
/**
* Adds a sequential step to the workflow using a function.
* Automatically extracts the method name if it's a method reference.
*
* @param step Function that returns StepResult
*/
public <I, O> WorkflowBuilder<T, R> then(Function<I, StepResult<O>> step) {
// Extract step ID from method reference or generate one
String stepId = ReflectionUtils.extractLambdaMethodName(step);
// Create step definition
StepDefinition stepDef = StepDefinition.of(stepId, step);
buildSteps.add(new SequentialStep(stepDef));
lastStepDefinition = stepDef; // Track for type flow
return this;
}
/**
* Adds a sequential step to the workflow using a BiFunction with context.
* Automatically extracts the method name if it's a method reference.
*
* @param step BiFunction that takes input and context and returns StepResult
*/
public <I, O> WorkflowBuilder<T, R> then(BiFunction<I, WorkflowContext, StepResult<O>> step) {
// Extract step ID from method reference or generate one
String stepId = ReflectionUtils.extractLambdaMethodName(step);
// Create step definition
StepDefinition stepDef = StepDefinition.of(stepId, step);
buildSteps.add(new SequentialStep(stepDef));
lastStepDefinition = stepDef; // Track for type flow
return this;
}
/**
* Adds a sequential step with explicit ID using a lambda.
* Use this when you need to specify a custom ID for a lambda expression.
*
* @param id Explicit step ID
* @param step Function that returns StepResult
*/
public <I, O> WorkflowBuilder<T, R> then(String id, Function<I, StepResult<O>> step, Class<I> inputType, Class<O> outputType) {
if (StringUtils.isBlank(id)) {
throw new IllegalArgumentException("Step ID cannot be null or empty");
}
if (inputType == null || outputType == null) {
throw new IllegalArgumentException("Input and output types must be specified for lambda expressions");
}
StepDefinition stepDef = StepDefinition.of(id, step).withTypes(inputType, outputType);
buildSteps.add(new SequentialStep(stepDef));
lastStepDefinition = stepDef; // Track for type flow
return this;
}
/**
* Adds a sequential step with explicit ID using a BiFunction.
*
* @param id Explicit step ID
* @param step BiFunction that takes input and context and returns StepResult
*/
public <I, O> WorkflowBuilder<T, R> then(String id, BiFunction<I, WorkflowContext, StepResult<O>> step) {
if (StringUtils.isBlank(id)) {
throw new IllegalArgumentException("Step ID cannot be null or empty");
}
StepDefinition stepDef = StepDefinition.of(id, step);
buildSteps.add(new SequentialStep(stepDef));
lastStepDefinition = stepDef;
return this;
}
/**
* Adds a step with retry policy using Function.
*
* @param id Step ID
* @param step Function that takes input and returns StepResult
* @param retryPolicy Retry policy for this step
*/
public <I, O> WorkflowBuilder<T, R> thenWithRetry(String id, Function<I, StepResult<O>> step,
RetryPolicy retryPolicy) {
if (StringUtils.isBlank(id)) {
throw new IllegalArgumentException("Step ID cannot be null or empty");
}
StepDefinition stepDef = StepDefinition.of(id, step).withRetryPolicy(retryPolicy);
buildSteps.add(new SequentialStep(stepDef));
lastStepDefinition = stepDef;
return this;
}
/**
* Adds a step with retry policy using BiFunction with context.
*
* @param id Step ID
* @param step BiFunction that takes input and context and returns StepResult
* @param retryPolicy Retry policy for this step
*/
public <I, O> WorkflowBuilder<T, R> thenWithRetry(String id, BiFunction<I, WorkflowContext, StepResult<O>> step,
RetryPolicy retryPolicy) {
if (StringUtils.isBlank(id)) {
throw new IllegalArgumentException("Step ID cannot be null or empty");
}
StepDefinition stepDef = StepDefinition.of(id, step).withRetryPolicy(retryPolicy);
buildSteps.add(new SequentialStep(stepDef));
lastStepDefinition = stepDef;
return this;
}
/**
* Applies a retry policy to the last added step.
* Can be chained after then() methods.
*
* @param retryPolicy Retry policy to apply
*/
public WorkflowBuilder<T, R> withRetryPolicy(RetryPolicy retryPolicy) {
if (lastStepDefinition == null) {
throw new IllegalStateException("No step to apply retry policy to. Add a step first.");
}
lastStepDefinition.withRetryPolicy(retryPolicy);
return this;
}
/**
* Adds a step that returns a plain value (not StepResult).
* The value will be automatically wrapped in StepResult.continueWith().
* Method name is automatically extracted from method reference.
*
* @param step Function that returns a plain object
*/
public <I, O> WorkflowBuilder<T, R> thenValue(Function<I, O> step) {
String stepId = ReflectionUtils.extractLambdaMethodName(step);
// Wrap the function to return StepResult
Function<I, StepResult<O>> wrappedStep = input -> {
O result = step.apply(input);
return StepResult.continueWith(result);
};
StepDefinition stepDef = StepDefinition.of(stepId, wrappedStep);
buildSteps.add(new SequentialStep(stepDef));
lastStepDefinition = stepDef;
return this;
}
/**
* Adds a step that returns a plain value with context access.
* The value will be automatically wrapped in StepResult.continueWith().
*
* @param step BiFunction that returns a plain object
*/
public <I, O> WorkflowBuilder<T, R> thenValue(BiFunction<I, WorkflowContext, O> step) {
String stepId = ReflectionUtils.extractLambdaMethodName(step);
// Wrap the function to return StepResult
BiFunction<I, WorkflowContext, StepResult<O>> wrappedStep = (input, ctx) -> {
O result = step.apply(input, ctx);
return StepResult.continueWith(result);
};
StepDefinition stepDef = StepDefinition.of(stepId, wrappedStep);
buildSteps.add(new SequentialStep(stepDef));
lastStepDefinition = stepDef;
return this;
}
/**
* Adds a step with explicit ID that returns a plain value.
*
* @param id Explicit step ID
* @param step Function that returns a plain object
*/
public <I, O> WorkflowBuilder<T, R> thenValue(String id, Function<I, O> step, Class<I> inputType, Class<O> outputType) {
if (StringUtils.isBlank(id)) {
throw new IllegalArgumentException("Step ID cannot be null or empty");
}
if (inputType == null || outputType == null) {
throw new IllegalArgumentException("Input and output types must be specified");
}
// Wrap the function to return StepResult
Function<I, StepResult<O>> wrappedStep = input -> {
O result = step.apply(input);
return StepResult.continueWith(result);
};
StepDefinition stepDef = StepDefinition.of(id, wrappedStep).withTypes(inputType, outputType);
buildSteps.add(new SequentialStep(stepDef));
lastStepDefinition = stepDef;
return this;
}
/**
* Adds a final step that returns a plain value (automatically wrapped in StepResult.finish).
*/
public <I, O> WorkflowBuilder<T, R> finishWithValue(Function<I, O> step) {
String stepId = ReflectionUtils.extractLambdaMethodName(step);
// Wrap the function to return StepResult.finish
Function<I, StepResult<O>> wrappedStep = input -> {
O result = step.apply(input);
return StepResult.finish(result);
};
StepDefinition stepDef = StepDefinition.of(stepId, wrappedStep);
buildSteps.add(new SequentialStep(stepDef));
lastStepDefinition = stepDef;
return this;
}
/**
* Adds a final step with context that returns a plain value (automatically wrapped in StepResult.finish).
*/
public <I, O> WorkflowBuilder<T, R> finishWithValue(BiFunction<I, WorkflowContext, O> step) {
String stepId = ReflectionUtils.extractLambdaMethodName(step);
// Wrap the function to return StepResult.finish
BiFunction<I, WorkflowContext, StepResult<O>> wrappedStep = (input, ctx) -> {
O result = step.apply(input, ctx);
return StepResult.finish(result);
};
StepDefinition stepDef = StepDefinition.of(stepId, wrappedStep);
buildSteps.add(new SequentialStep(stepDef));
lastStepDefinition = stepDef;
return this;
}
/**
* Adds parallel steps to the workflow.
* All steps in the list will be executed concurrently.
*
* @param parallelSteps List of step definitions to execute in parallel
*/
public WorkflowBuilder<T, R> parallel(List<StepDefinition> parallelSteps) {
if (parallelSteps == null || parallelSteps.isEmpty()) {
throw new IllegalArgumentException("Parallel steps list cannot be null or empty");
}
buildSteps.add(new ParallelStep(parallelSteps));
return this;
}
/**
* Adds parallel steps to the workflow using varargs of functions.
* All steps will be executed concurrently.
*
* @param steps Variable number of functions to execute in parallel
*/
@SafeVarargs
public final <I, O> WorkflowBuilder<T, R> parallel(Function<I, StepResult<O>>... steps) {
if (steps == null || steps.length == 0) {
throw new IllegalArgumentException("Parallel steps cannot be null or empty");
}
List<StepDefinition> stepDefs = new ArrayList<>();
for (Function<I, StepResult<O>> step : steps) {
String stepId = ReflectionUtils.extractLambdaMethodName(step);
stepDefs.add(StepDefinition.of(stepId, step));
}
buildSteps.add(new ParallelStep(stepDefs));
return this;
}
/**
* Adds parallel steps to the workflow using varargs of BiFunctions.
* All steps will be executed concurrently.
*
* @param steps Variable number of BiFunctions to execute in parallel
*/
@SafeVarargs
public final <I, O> WorkflowBuilder<T, R> parallel(BiFunction<I, WorkflowContext, StepResult<O>>... steps) {
if (steps == null || steps.length == 0) {
throw new IllegalArgumentException("Parallel steps cannot be null or empty");
}
List<StepDefinition> stepDefs = new ArrayList<>();
for (BiFunction<I, WorkflowContext, StepResult<O>> step : steps) {
String stepId = ReflectionUtils.extractLambdaMethodName(step);
stepDefs.add(StepDefinition.of(stepId, step));
}
buildSteps.add(new ParallelStep(stepDefs));
return this;
}
// Note: Legacy branch methods that use Object types have been removed.
// Use the typed branch method below that properly tracks input types.
/**
* Creates a branch with automatic type inference from the previous step.
* The branch steps will receive the output type of the previous step as their input.
*/
/**
* Creates a branch with automatic type inference from the previous step.
* The branch steps will receive the output type of the previous step as their input.
*/
public WorkflowBuilder<T, R> branch(Predicate<WorkflowContext> condition,
Consumer<WorkflowBuilder<?, ?>> ifTrue,
Consumer<WorkflowBuilder<?, ?>> ifFalse) {
if (condition == null) {
throw new IllegalArgumentException("Branch condition cannot be null");
}
if (ifTrue == null) {
throw new IllegalArgumentException("True branch cannot be null");
}
if (ifFalse == null) {
throw new IllegalArgumentException("False branch cannot be null");
}
// Determine the input type for branches from the last step's output
Class<?> branchInputType = lastStepDefinition != null ?
lastStepDefinition.getOutputType() : inputType;
if (branchInputType == null) {
throw new IllegalStateException(
"Cannot determine input type for branch. Previous step must have explicit output type. " +
"Use method references or provide explicit types for lambda steps."
);
}
// Create branches with the proper input type
// The output type will be determined by the last step in each branch
WorkflowBuilder<?, ?> trueBranch = new WorkflowBuilder<>("branch-true", branchInputType, branchInputType);
ifTrue.accept(trueBranch);
WorkflowBuilder<?, ?> falseBranch = new WorkflowBuilder<>("branch-false", branchInputType, branchInputType);
ifFalse.accept(falseBranch);
buildSteps.add(new TypedBranchStep(condition, trueBranch, falseBranch, branchInputType));
return this;
}
/**
* Builds the workflow graph.
* This validates the workflow structure and creates an immutable WorkflowBuilderGraph.
*/
public WorkflowGraph<T, R> build() {
if (buildSteps.isEmpty()) {
throw new IllegalStateException("WorkflowBuilder must have at least one step");
}
// Register input type schema
if (inputType != null && inputType != void.class && inputType != Void.class) {
SchemaUtils.getSchemaFromClass(inputType);
log.debug("Registered input type schema for workflow {}: {}", id, inputType.getName());
}
// Initialize graph components
Map<String, StepNode> nodes = new HashMap<>();
Map<String, List<Edge>> edges = new HashMap<>();
// Build the graph from steps
GraphBuildResult buildResult = buildGraphFromSteps(nodes, edges);
String initialStepId = buildResult.initialStepId();
// Validate the graph
validateGraph(nodes, edges, initialStepId);
// Log graph information
logGraphInfo(nodes, edges);
// Convert async handlers to metadata
Map<String, AsyncStepMetadata> asyncStepMetadata = buildAsyncStepMetadata();
return WorkflowGraph.<T, R>builder()
.id(id)
.version(version)
.inputType(inputType)
.outputType(outputType)
.nodes(nodes)
.edges(edges)
.initialStepId(initialStepId)
.workflowInstance(null) // No instance for FluentAPI
.asyncStepMetadata(asyncStepMetadata)
.build();
}
/**
* Builds this workflow as a sub-workflow that can be embedded in another workflow.
* Used internally for branch construction.
*/
WorkflowBuilder<T, R> buildAsSubWorkflowBuilder() {
// This is a marker method for the documentation example
// In practice, branches are handled differently
return this;
}
/**
* Multi-way branching based on a selector function.
*
* @param selector Function that extracts the value to switch on
* @return OnBuilder for specifying cases
*/
public <V> OnBuilder<T, R, V> on(Function<WorkflowContext, V> selector) {
return new OnBuilder<>(this, selector);
}
/**
* Try-catch style error handling for a step.
*
* @param stepDef The step that might throw an error
* @return TryBuilder for specifying error handlers
*/
public TryBuilder<T, R> tryStep(StepDefinition stepDef) {
return new TryBuilder<>(this, stepDef);
}
/**
* Package-private method to add build steps from other builders.
*/
void addBuildStep(BuildStep step) {
buildSteps.add(step);
}
/**
* Package-private method to get build steps.
*/
List<BuildStep> getBuildSteps() {
return buildSteps;
}
/**
* Builds the graph from the build steps.
*/
private GraphBuildResult buildGraphFromSteps(Map<String, StepNode> nodes,
Map<String, List<Edge>> edges) {
GraphBuildContext context = new GraphBuildContext();
String lastStepId = null;
List<String> lastExitPoints = null;
String initialStepId = null;
for (int i = 0; i < buildSteps.size(); i++) {
BuildStep buildStep = buildSteps.get(i);
BuildStepResult result = buildStep.build(context, lastStepId);
// Process the build step result
processBuildStepResult(result, nodes, edges);
// Set initial step
if (i == 0 && !result.entryPoints.isEmpty()) {
initialStepId = result.entryPoints.get(0);
}
// Connect to previous step(s)
connectToPreviousSteps(result, edges, lastStepId, lastExitPoints);
// Update last step tracking
LastStepInfo lastStepInfo = updateLastStepTracking(result);
lastStepId = lastStepInfo.lastStepId();
lastExitPoints = lastStepInfo.lastExitPoints();
}
return new GraphBuildResult(initialStepId);
}
/**
* Processes the result from a build step.
*/
private void processBuildStepResult(BuildStepResult result,
Map<String, StepNode> nodes,
Map<String, List<Edge>> edges) {
// Add nodes
result.nodes.forEach(node -> {
if (nodes.containsKey(node.id())) {
throw new IllegalStateException("Duplicate step ID: " + node.id());
}
nodes.put(node.id(), node);
});
// Add edges
result.edges.forEach((from, edgeList) -> {
edges.computeIfAbsent(from, k -> new ArrayList<>()).addAll(edgeList);
});
}
/**
* Connects the current step to previous steps.
*/
private void connectToPreviousSteps(BuildStepResult result,
Map<String, List<Edge>> edges,
String lastStepId,
List<String> lastExitPoints) {
if (lastStepId != null && !result.entryPoints.isEmpty()) {
// Single previous step to multiple entry points
for (String entryPoint : result.entryPoints) {
edges.computeIfAbsent(lastStepId, k -> new ArrayList<>())
.add(Edge.sequential(lastStepId, entryPoint));
}
} else if (lastExitPoints != null && !lastExitPoints.isEmpty() && !result.entryPoints.isEmpty()) {
// Multiple previous exit points (from branch) to entry points
for (String exitPoint : lastExitPoints) {
for (String entryPoint : result.entryPoints) {
edges.computeIfAbsent(exitPoint, k -> new ArrayList<>())
.add(Edge.sequential(exitPoint, entryPoint));
}
}
}
}
/**
* Updates tracking of the last step based on exit points.
*/
private LastStepInfo updateLastStepTracking(BuildStepResult result) {
if (!result.exitPoints.isEmpty()) {
if (result.exitPoints.size() == 1) {
return new LastStepInfo(result.exitPoints.get(0), null);
} else {
// Multiple exit points (e.g., from branches)
return new LastStepInfo(null, new ArrayList<>(result.exitPoints));
}
} else {
return new LastStepInfo(null, null);
}
}
/**
* Logs information about the constructed graph.
*/
private void logGraphInfo(Map<String, StepNode> nodes, Map<String, List<Edge>> edges) {
int totalEdges = edges.values().stream().mapToInt(List::size).sum();
log.info("Built workflow graph: {} with {} nodes and {} edges",
id, nodes.size(), totalEdges);
// Debug: print graph structure
if (log.isDebugEnabled()) {
log.debug("Graph structure for {}:", id);
nodes.forEach((nodeId, node) -> {
log.debug(" Node: {} ({})", nodeId, node.description());
List<Edge> outgoing = edges.getOrDefault(nodeId, List.of());
outgoing.forEach(edge -> {
log.debug(" -> {} ({})", edge.toStepId(), edge.type());
});
});
}
}
/**
* Builds async step metadata from registered handlers.
*/
private Map<String, AsyncStepMetadata> buildAsyncStepMetadata() {
Map<String, AsyncStepMetadata> asyncStepMetadata = new HashMap<>();
for (Map.Entry<String, AsyncHandlerInfo> entry : registeredAsyncHandlers.entrySet()) {
String pattern = entry.getKey();
AsyncHandlerInfo info = entry.getValue();
// Create a wrapper method that delegates to the TriFunction
Method proxyMethod = createProxyMethodForTriFunction();
// Create AsyncStepMetadata with null instance - the handler is in the metadata itself
AsyncStepMetadata metadata = new FluentApiAsyncStepMetadata(
proxyMethod,
info.annotation != null ? info.annotation : createSyntheticAsyncAnnotation(pattern),
info.handler // Store the actual handler
);
asyncStepMetadata.put(pattern, metadata);
}
return asyncStepMetadata;
}
/**
* Result of building the graph.
*/
private record GraphBuildResult(String initialStepId) {}
/**
* Information about the last step in the build process.
*/
private record LastStepInfo(String lastStepId, List<String> lastExitPoints) {}
/**
* Validates the constructed graph.
*/
private void validateGraph(Map<String, StepNode> nodes,
Map<String, List<Edge>> edges,
String initialStepId) {
if (initialStepId == null) {
throw new IllegalStateException("No initial step defined");
}
if (!nodes.containsKey(initialStepId)) {
throw new IllegalStateException("Initial step not found: " + initialStepId);
}
// Check for orphaned nodes (except initial)
Set<String> reachable = new HashSet<>();
Queue<String> toVisit = new LinkedList<>();
toVisit.offer(initialStepId);
while (!toVisit.isEmpty()) {
String current = toVisit.poll();
if (reachable.contains(current)) {
continue;
}
reachable.add(current);
List<Edge> outgoing = edges.getOrDefault(current, Collections.emptyList());
for (Edge edge : outgoing) {
toVisit.offer(edge.toStepId());
}
}
Set<String> unreachable = new HashSet<>(nodes.keySet());
unreachable.removeAll(reachable);
if (!unreachable.isEmpty()) {
log.warn("Unreachable nodes in workflow {}: {}", id, unreachable);
}
}
/**
* Base interface for build steps.
*/
static interface BuildStep {
BuildStepResult build(GraphBuildContext context, String previousStepId);
}
/**
* Sequential step implementation.
*/
private record SequentialStep(StepDefinition stepDef) implements BuildStep {
@Override
public BuildStepResult build(GraphBuildContext context, String previousStepId) {
StepNode node = createStepNode(stepDef, context);
BuildStepResult result = new BuildStepResult();
result.nodes.add(node);
result.entryPoints.add(node.id());
result.exitPoints.add(node.id());
return result;
}
}
/**
* Parallel step implementation - executes all steps in parallel and collects results.
*/
private record ParallelStep(List<StepDefinition> parallelSteps) implements BuildStep {
@Override
public BuildStepResult build(GraphBuildContext context, String previousStepId) {
BuildStepResult result = new BuildStepResult();
// Determine output type from first step
Class<?> outputType = parallelSteps.isEmpty() ? Object.class :
parallelSteps.get(0).getOutputType() != null ? parallelSteps.get(0).getOutputType() : Object.class;
// Create a single node that executes all steps in parallel
String parallelNodeId = "parallel_" + context.nextId();
StepNode parallelNode = StepNode.fromBiFunction(
parallelNodeId,
(Object input, WorkflowContext ctx) -> {
// Execute all steps in parallel using CompletableFuture
List<CompletableFuture<StepResult<?>>> futures = new ArrayList<>();
for (StepDefinition stepDef : parallelSteps) {
CompletableFuture<StepResult<?>> future = CompletableFuture.supplyAsync(() -> {
try {
// Execute the step with the same input
return (StepResult<?>) stepDef.getExecutor().execute(input, ctx);
} catch (Exception e) {
log.error("Parallel step {} failed", stepDef.getId(), e);
return StepResult.fail(e);
}
});
futures.add(future);
}
// Wait for all to complete
try {
CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).join();
// Collect all results with proper typing
List<Object> results = new ArrayList<>();
for (CompletableFuture<StepResult<?>> future : futures) {
StepResult<?> stepResult = future.get();
if (stepResult instanceof StepResult.Continue<?> cont) {
results.add(cont.data());
} else if (stepResult instanceof StepResult.Fail<?> fail) {
// If any step failed, fail the whole parallel execution
return StepResult.fail(fail.error());
}
}
// Return the typed list of results
return StepResult.continueWith(results);
} catch (Exception e) {
log.error("Parallel execution failed", e);
return StepResult.fail(e);
}
},
Object.class, // Input type
List.class // Output type is List
).withDescription("Parallel execution of " + parallelSteps.size() + " steps");
result.nodes.add(parallelNode);
result.entryPoints.add(parallelNodeId);
result.exitPoints.add(parallelNodeId);
return result;
}
}
/**
* Typed branch step implementation - executes branch as a single step.
*/
private record TypedBranchStep<I>(Predicate<WorkflowContext> condition,
WorkflowBuilder<I, ?> ifTrue,
WorkflowBuilder<I, ?> ifFalse,
Class<I> inputType) implements BuildStep {
@Override
public BuildStepResult build(GraphBuildContext context, String previousStepId) {
BuildStepResult result = new BuildStepResult();
// Collect all steps from both branches
final List<StepDefinition> trueSteps = collectSteps(ifTrue);
final List<StepDefinition> falseSteps = collectSteps(ifFalse);
// Create a single branch node that executes the appropriate branch
String branchNodeId = "branch_" + context.nextId();
StepNode branchNode = StepNode.fromBiFunction(
branchNodeId,
(Object input, WorkflowContext ctx) -> {
boolean conditionResult = condition.test(ctx);
log.debug("Branch condition evaluated to: {}", conditionResult);
// Select which steps to execute
List<StepDefinition> stepsToExecute = conditionResult ? trueSteps : falseSteps;
String branchName = conditionResult ? "true-branch" : "false-branch";
// Use utility to execute branch steps
return BranchStepExecutor.executeBranchSteps(stepsToExecute, input, ctx, branchName);
},
inputType, // Input type
Object.class // Output type
).withDescription("Branch: " + (trueSteps.size() + falseSteps.size()) + " possible steps");
result.nodes.add(branchNode);
result.entryPoints.add(branchNodeId);
result.exitPoints.add(branchNodeId);
return result;
}
private List<StepDefinition> collectSteps(WorkflowBuilder<?, ?> workflow) {
List<StepDefinition> steps = new ArrayList<>();
for (BuildStep buildStep : workflow.buildSteps) {
if (buildStep instanceof SequentialStep sequential) {
steps.add(sequential.stepDef());
}
// For nested branches/parallel, we'd need to handle them recursively
// For now, keeping it simple
}
return steps;
}
}
/**
* Helper to create StepNode from StepDefinition.
*/
private static StepNode createStepNode(StepDefinition stepDef, GraphBuildContext context) {
String nodeId = context.prefix + stepDef.getId();
return new StepNode(
nodeId,
stepDef.getDescription(),
new StepNode.StepExecutor() {
@Override
public Object execute(Object input, WorkflowContext context) throws Exception {
return stepDef.getExecutor().execute(input, context);
}
@Override
public Class<?> getInputType() {
return stepDef.getInputType();
}
@Override
public Class<?> getOutputType() {
return stepDef.getOutputType();
}
@Override
public boolean requiresContext() {
return true; // Conservative default
}
},
false,
context.isFirst(),
stepDef.getRetryPolicy(),
stepDef.getInvocationLimit(),
stepDef.getOnInvocationsLimit()
);
}
/**
* Context for building the graph.
*/
static class GraphBuildContext {
private final AtomicInteger idCounter = new AtomicInteger();
private String prefix = "";
private boolean first = true;
int nextId() {
return idCounter.incrementAndGet();
}
boolean isFirst() {
if (first) {
first = false;
return true;
}
return false;
}
GraphBuildContext withPrefix(String prefix) {
GraphBuildContext newContext = new GraphBuildContext();
newContext.prefix = prefix;
newContext.idCounter.set(this.idCounter.get());
newContext.first = false; // Branch contexts should never have initial steps
return newContext;
}
}
/**
* Result of building a step or group of steps.
*/
static class BuildStepResult {
final List<StepNode> nodes = new ArrayList<>();
final Map<String, List<Edge>> edges = new HashMap<>();
final List<String> entryPoints = new ArrayList<>();
final List<String> exitPoints = new ArrayList<>();
}
/**
* Multi-branch step implementation - executes as a single step.
*/
static class MultiBranchStep<V> implements BuildStep {
private final Function<WorkflowContext, V> selector;
private final Map<V, Consumer<WorkflowBuilder<?, ?>>> cases;
private final Consumer<WorkflowBuilder<?, ?>> otherwiseCase;
MultiBranchStep(Function<WorkflowContext, V> selector,
Map<V, Consumer<WorkflowBuilder<?, ?>>> cases,
Consumer<WorkflowBuilder<?, ?>> otherwiseCase) {
this.selector = selector;
this.cases = new LinkedHashMap<>(cases); // Preserve order
this.otherwiseCase = otherwiseCase;
}
@Override
public BuildStepResult build(GraphBuildContext context, String previousStepId) {
BuildStepResult result = new BuildStepResult();
// Collect all steps from all branches
final Map<V, List<StepDefinition>> caseSteps = new LinkedHashMap<>();
for (Map.Entry<V, Consumer<WorkflowBuilder<?, ?>>> entry : cases.entrySet()) {
WorkflowBuilder<Object, Object> caseBuilder = new WorkflowBuilder<>(
"case-" + entry.getKey(), Object.class, Object.class
);
entry.getValue().accept(caseBuilder);
caseSteps.put(entry.getKey(), collectSteps(caseBuilder));
}
final List<StepDefinition> otherwiseSteps;
if (otherwiseCase != null) {
WorkflowBuilder<Object, Object> otherwiseBuilder = new WorkflowBuilder<>(
"otherwise", Object.class, Object.class
);
otherwiseCase.accept(otherwiseBuilder);
otherwiseSteps = collectSteps(otherwiseBuilder);
} else {
otherwiseSteps = null;
}
// Create a single multi-branch node
String multiBranchNodeId = "multi_branch_" + context.nextId();
StepNode multiBranchNode = StepNode.fromBiFunction(
multiBranchNodeId,
(Object input, WorkflowContext ctx) -> {
// Get the selector value
V value = selector.apply(ctx);
log.debug("Multi-branch selector returned: {} (type: {})",
value, value != null ? value.getClass().getSimpleName() : "null");
// Find the matching case
List<StepDefinition> stepsToExecute = caseSteps.get(value);
if (stepsToExecute == null && otherwiseSteps != null) {
stepsToExecute = otherwiseSteps;
log.debug("No case matched, using otherwise branch");
}
if (stepsToExecute == null) {
throw new IllegalStateException("No branch matched for value: " + value);
}
// Use utility to execute branch steps
String branchName = String.format("multi-branch-%s-case-%s",
multiBranchNodeId, value != null ? value.toString() : "otherwise");
return BranchStepExecutor.executeBranchSteps(stepsToExecute, input, ctx, branchName);
},
Object.class, // Input type
Object.class // Output type
).withDescription("Multi-branch with " + cases.size() + " cases");
result.nodes.add(multiBranchNode);
result.entryPoints.add(multiBranchNodeId);
result.exitPoints.add(multiBranchNodeId);
return result;
}
private List<StepDefinition> collectSteps(WorkflowBuilder<?, ?> workflow) {
List<StepDefinition> steps = new ArrayList<>();
for (BuildStep buildStep : workflow.buildSteps) {
if (buildStep instanceof SequentialStep sequential) {
steps.add(sequential.stepDef());
}
// For nested branches/parallel, we'd need to handle them recursively
// For now, keeping it simple
}
return steps;
}
}
/**
* Try-catch step implementation.
*/
static class TryCatchStep implements BuildStep {
private final StepDefinition tryStep;
private final Map<Class<? extends Throwable>, TryBuilder.ErrorHandler> errorHandlers;
private final Runnable finallyBlock;
TryCatchStep(StepDefinition tryStep,
Map<Class<? extends Throwable>, TryBuilder.ErrorHandler> errorHandlers,
Runnable finallyBlock) {
this.tryStep = tryStep;
this.errorHandlers = new HashMap<>(errorHandlers);
this.finallyBlock = finallyBlock;
}
@Override
public BuildStepResult build(GraphBuildContext context, String previousStepId) {
BuildStepResult result = new BuildStepResult();
// Create try step with error handling wrapper
String tryStepId = context.prefix + tryStep.getId();
StepNode tryNode = new StepNode(
tryStepId,
tryStep.getDescription() + " (with error handling)",
new StepNode.StepExecutor() {
@Override
public Object execute(Object input, WorkflowContext ctx) throws Exception {
try {
Object result = tryStep.getExecutor().execute(input, ctx);
// Execute finally block if present
if (finallyBlock != null) {
finallyBlock.run();
}
return result;
} catch (Throwable t) {
// Find matching error handler
for (Map.Entry<Class<? extends Throwable>, TryBuilder.ErrorHandler> entry :
errorHandlers.entrySet()) {
if (entry.getKey().isAssignableFrom(t.getClass())) {
StepResult<?> handled =
entry.getValue().handle(t, ctx);
// Execute finally block
if (finallyBlock != null) {
finallyBlock.run();
}
if (handled instanceof StepResult.Continue) {
return ((StepResult.Continue<?>) handled).data();
} else if (handled instanceof StepResult.Finish) {
return ((StepResult.Finish<?>) handled).result();
} else if (handled instanceof StepResult.Fail) {
throw new RuntimeException("Error handler failed",
((StepResult.Fail<?>) handled).error());
}
}
}
// No handler found, execute finally and rethrow
if (finallyBlock != null) {
finallyBlock.run();
}
throw t;
}
}
@Override
public Class<?> getInputType() {
return tryStep.getInputType();
}
@Override
public Class<?> getOutputType() {
return tryStep.getOutputType();
}
@Override
public boolean requiresContext() {
return true;
}
},
false,
context.isFirst(),
tryStep.getRetryPolicy(),
tryStep.getInvocationLimit(),
tryStep.getOnInvocationsLimit()
);
result.nodes.add(tryNode);
result.entryPoints.add(tryStepId);
result.exitPoints.add(tryStepId);
return result;
}
}
/**
* Marker types for branch decisions.
*/
private record BranchTrue() implements InternalRoutingMarker {}
private record BranchFalse() implements InternalRoutingMarker {}
public record BranchValue<V>(V value) implements InternalRoutingMarker {}
private record BranchOtherwise() implements InternalRoutingMarker {}
/**
* Creates a proxy method for TriFunction to satisfy AsyncStepMetadata requirements.
*/
private Method createProxyMethodForTriFunction() {
try {
// Get a method with the right signature from TriFunction interface
return TriFunction.class.getMethod("apply", Object.class, Object.class, Object.class);
} catch (NoSuchMethodException e) {
throw new RuntimeException("Failed to create proxy method", e);
}
}
/**
* Creates a synthetic AsyncStep annotation.
*/
private AsyncStep createSyntheticAsyncAnnotation(String value) {
return new AsyncStep() {
@Override
public String value() {
return value;
}
@Override
public String description() {
return "Async handler for pattern: " + value;
}
@Override
public Class<?> inputClass() {
return Map.class;
}
@Override
public Class<? extends java.lang.annotation.Annotation> annotationType() {
return AsyncStep.class;
}
};
}
/**
* Information about a registered async handler.
*/
private static class AsyncHandlerInfo {
private final String pattern;
private final TriFunction<Map<String, Object>, WorkflowContext, TaskProgressReporter, StepResult<?>> handler;
private final String methodName;
private boolean fromAnnotation = false;
private AsyncStep annotation;
AsyncHandlerInfo(String pattern,
TriFunction<Map<String, Object>, WorkflowContext, TaskProgressReporter, StepResult<?>> handler,
String methodName) {
this.pattern = pattern;
this.handler = handler;
this.methodName = methodName != null ? methodName : "asyncHandler";
}
void setFromAnnotation(boolean fromAnnotation) {
this.fromAnnotation = fromAnnotation;
}
void setAnnotation(AsyncStep annotation) {
this.annotation = annotation;
}
}
/**
* Functional interface for async handlers.
*/
@FunctionalInterface
public interface TriFunction<T, U, V, R> {
R apply(T t, U u, V v);
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/chat/ChatContextHelper.java
|
package ai.driftkit.workflow.engine.chat;
import ai.driftkit.workflow.engine.core.WorkflowContext;
import ai.driftkit.workflow.engine.core.WorkflowContext.Keys;
import ai.driftkit.workflow.engine.schema.AIFunctionSchema;
import lombok.Data;
import lombok.NoArgsConstructor;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import java.util.*;
/**
* Helper class to work with chat-specific data in WorkflowContext.
* Provides convenient methods to store and retrieve chat-related information.
*/
@Slf4j
public class ChatContextHelper {
private ChatContextHelper() {} // prevent instantiation
// ========== Chat Session Management ==========
/**
* Set the chat ID in the context.
*/
public static void setChatId(WorkflowContext context, String chatId) {
context.setStepOutput(Keys.CHAT_ID, chatId);
}
/**
* Get the chat ID from the context.
*/
public static String getChatId(WorkflowContext context) {
return context.getStepResultOrDefault(Keys.CHAT_ID, String.class, null);
}
/**
* Set the user ID in the context.
*/
public static void setUserId(WorkflowContext context, String userId) {
context.setStepOutput(Keys.USER_ID, userId);
}
/**
* Get the user ID from the context.
*/
public static String getUserId(WorkflowContext context) {
return context.getStepResultOrDefault(Keys.USER_ID, String.class, null);
}
// ========== Conversation History ==========
/**
* Add a user message to the conversation.
*/
public static void addUserMessage(WorkflowContext context, String content) {
if (StringUtils.isEmpty(content)) {
return;
}
// Store user message in context for workflow processing
context.setStepOutput("lastUserMessage", content);
}
// ========== Step Invocation Tracking ==========
/**
* Track a step invocation.
*/
public static void trackStepInvocation(WorkflowContext context, String stepName) {
if (StringUtils.isEmpty(stepName)) {
return;
}
Map<String, Integer> counts = context.getMap(
Keys.STEP_INVOCATION_COUNTS, String.class, Integer.class);
if (counts == null) {
counts = new HashMap<>();
}
Map<String, Integer> newCounts = new HashMap<>(counts);
newCounts.merge(stepName, 1, Integer::sum);
context.setStepOutput(Keys.STEP_INVOCATION_COUNTS, newCounts);
}
/**
* Get the invocation count for a specific step.
*/
public static int getStepInvocationCount(WorkflowContext context, String stepName) {
if (StringUtils.isEmpty(stepName)) {
return 0;
}
Map<String, Integer> counts = context.getMap(
Keys.STEP_INVOCATION_COUNTS, String.class, Integer.class);
return counts != null ? counts.getOrDefault(stepName, 0) : 0;
}
// ========== Async Message Tracking ==========
/**
* Initialize a context for chat workflow.
*/
public static WorkflowContext initChatContext(String chatId, String userId, Object triggerData) {
WorkflowContext context = WorkflowContext.newRun(triggerData);
setChatId(context, chatId);
setUserId(context, userId);
context.setStepOutput(Keys.STEP_INVOCATION_COUNTS, new HashMap<>());
return context;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/chat/ChatMessageTask.java
|
package ai.driftkit.workflow.engine.chat;
import ai.driftkit.common.domain.chat.ChatMessage.DataProperty;
import ai.driftkit.common.domain.chat.ChatMessage.MessageType;
import ai.driftkit.common.domain.chat.ChatResponse.NextSchema;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonInclude.Include;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.util.List;
/**
* Represents a task in the chat conversation UI.
* Used for displaying message tasks in the frontend with progress tracking.
*/
@Data
@NoArgsConstructor
@AllArgsConstructor
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(Include.NON_NULL)
public class ChatMessageTask {
private String id;
private String nameId;
private MessageType type;
private List<DataProperty> properties;
private NextSchema nextSchema;
private long timestamp;
private Boolean completed;
private Integer percentComplete;
private Boolean required;
private Boolean system;
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/chat/ChatResponseExtensions.java
|
package ai.driftkit.workflow.engine.chat;
import ai.driftkit.common.domain.chat.ChatMessage;
import ai.driftkit.common.domain.chat.ChatResponse;
import ai.driftkit.workflow.engine.schema.AIFunctionSchema;
import ai.driftkit.workflow.engine.schema.AIFunctionSchema.AIFunctionProperty;
import java.util.ArrayList;
import java.util.List;
/**
* Extension methods for ChatResponse to work with AIFunctionSchema.
* These are in workflow-engine-core because AIFunctionSchema is part of this module.
*/
public class ChatResponseExtensions {
/**
* Sets the NextSchema from an AIFunctionSchema.
* This method is here because common module cannot depend on workflow-engine-core.
*/
public static void setNextSchemaAsSchema(ChatResponse response, AIFunctionSchema schema) {
if (schema == null) {
response.setNextSchema(null);
return;
}
ChatResponse.NextSchema nextSchema = new ChatResponse.NextSchema();
nextSchema.setSchemaName(schema.getSchemaName());
if (schema.getProperties() != null) {
List<ChatResponse.NextProperties> nextProps = new ArrayList<>();
for (AIFunctionProperty prop : schema.getProperties()) {
ChatResponse.NextProperties nextProp = new ChatResponse.NextProperties();
nextProp.setName(prop.getName());
nextProp.setNameId(prop.getNameId());
nextProp.setType(prop.getType());
if (prop.getValues() != null) {
nextProp.setValues(prop.getValues());
}
nextProp.setMultiSelect(prop.isMultiSelect());
nextProps.add(nextProp);
}
nextSchema.setProperties(nextProps);
}
response.setNextSchema(nextSchema);
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/chat
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/chat/converter/ChatMessageTaskConverter.java
|
package ai.driftkit.workflow.engine.chat.converter;
import ai.driftkit.common.domain.chat.ChatMessage;
import ai.driftkit.common.domain.chat.ChatMessage.DataProperty;
import ai.driftkit.common.domain.chat.ChatMessage.MessageType;
import ai.driftkit.common.domain.chat.ChatRequest;
import ai.driftkit.common.domain.chat.ChatResponse;
import ai.driftkit.workflow.engine.chat.ChatMessageTask;
import ai.driftkit.workflow.engine.schema.AIFunctionSchema;
import ai.driftkit.workflow.engine.schema.SchemaUtils;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
/**
* Converts ChatRequest and ChatResponse objects to ChatMessageTask format.
*/
@Slf4j
public class ChatMessageTaskConverter {
/**
* Converts a ChatMessage to one or more ChatMessageTask objects.
* For composable ChatRequests, creates a separate ChatMessageTask for each nameId-value pair.
* For other messages, creates a single ChatMessageTask.
*
* @param message The message to convert
* @return List of converted ChatMessageTask objects
*/
public static List<ChatMessageTask> convert(ChatMessage message) {
if (message instanceof ChatRequest) {
return convertRequest((ChatRequest) message);
} else if (message instanceof ChatResponse) {
List<ChatMessageTask> tasks = new ArrayList<>();
tasks.add(convertResponse((ChatResponse) message));
return tasks;
} else {
throw new IllegalArgumentException("Unsupported message type: " + message.getClass().getName());
}
}
/**
* Converts a ChatRequest to one or more ChatMessageTask objects.
*
* 1. If ChatRequest is composable=true, creates a separate ChatMessageTask for each nameId-value pair
* 2. Otherwise creates a single ChatMessageTask with all properties
*
* @param request The ChatRequest to convert
* @return List of converted ChatMessageTask objects
*/
private static List<ChatMessageTask> convertRequest(ChatRequest request) {
List<ChatMessageTask> tasks = new ArrayList<>();
// If it's a composable request (composable=true), create a task for each property
if (request.getComposable() != null && request.getComposable()) {
long timestamp = request.getTimestamp();
for (DataProperty prop : request.getProperties()) {
// Skip properties with no nameId or value
if (prop.getNameId() == null || prop.getValue() == null) {
continue;
}
ChatMessageTask task = new ChatMessageTask();
task.setId(request.getId() + "_" + prop.getNameId() + "_AI");
task.setType(MessageType.AI);
task.setNameId(prop.getNameId());
task.setTimestamp(timestamp++);
task.setRequired(true);
tasks.add(task);
task = new ChatMessageTask();
task.setId(request.getId() + "_" + prop.getNameId() + "_USER");
task.setType(request.getType());
task.setTimestamp(timestamp++);
List<DataProperty> properties = new ArrayList<>();
DataProperty valueProp = getDataProperty(prop);
properties.add(valueProp);
// Check if property has valueAsNameId=true and set nameId to property value
if (prop.isValueAsNameId()) {
task.setNameId(prop.getValue());
log.debug("Using field value as nameId: {} -> {}", prop.getNameId(), prop.getValue());
}
task.setProperties(properties);
tasks.add(task);
}
} else {
// For non-composable requests, create a single task with all properties
ChatMessageTask task = new ChatMessageTask();
task.setId(request.getId());
task.setType(request.getType());
task.setTimestamp(request.getTimestamp());
// Use requestSchemaName as messageNameId
task.setNameId(request.getRequestSchemaName());
if (request.getProperties().size() == 1) {
DataProperty valueProp = getDataProperty(request.getProperties().getFirst());
task.setProperties(List.of(valueProp));
// Check if property has valueAsNameId=true and set nameId to property value
DataProperty prop = request.getProperties().getFirst();
if (prop.isValueAsNameId()) {
task.setNameId(prop.getValue());
log.debug("Using field value as nameId: {} -> {}", prop.getNameId(), prop.getValue());
}
} else {
task.setProperties(new ArrayList<>(request.getProperties()));
// Check if any property has valueAsNameId=true, and use its value as nameId
for (DataProperty prop : request.getProperties()) {
if (prop.isValueAsNameId()) {
task.setNameId(prop.getValue());
log.debug("Using field value as nameId: {} -> {}", prop.getNameId(), prop.getValue());
break; // Use the first match only
}
}
}
task.setType(request.getType());
tasks.add(task);
}
return tasks;
}
private static DataProperty getDataProperty(DataProperty prop) {
DataProperty valueProp = new DataProperty();
valueProp.setName(prop.getName());
valueProp.setValue(prop.getValue());
valueProp.setNameId(prop.getNameId());
valueProp.setData(prop.getData());
valueProp.setType(prop.getType());
valueProp.setMultiSelect(prop.getMultiSelect());
return valueProp;
}
/**
* Converts a ChatResponse to a ChatMessageTask.
*
* 1. Transfers properties to ChatMessageTask properties
* 2. Uses nextSchema.properties.nameId for the first element with a defined nameId as messageNameId
* 3. Sets the nextSchema in the ChatMessageTask
*
* @param response The ChatResponse to convert
* @return The converted ChatMessageTask
*/
private static ChatMessageTask convertResponse(ChatResponse response) {
ChatMessageTask task = new ChatMessageTask();
task.setId(response.getId());
task.setType(response.getType());
task.setCompleted(response.isCompleted());
task.setPercentComplete(response.getPercentComplete());
task.setRequired(response.isRequired());
// Set properties
List<DataProperty> properties = response.getProperties();
if (response.getNextSchema() != null && response.getNextSchema().getProperties() != null) {
Optional<String> messageNameId = response.getNextSchema().getProperties().stream()
.filter(prop -> StringUtils.isNotBlank(prop.getNameId()))
.map(ChatResponse.NextProperties::getNameId)
.findFirst();
task.setNameId(messageNameId.orElse(null));
}
if (properties.size() == 1) {
DataProperty prop = properties.get(0);
if (prop.isValueAsNameId()) {
task.setNameId(prop.getValue());
}
}
task.setProperties(properties);
task.setTimestamp(response.getTimestamp());
task.setNextSchema(response.getNextSchema());
task.setType(response.getType());
// Check if the schema has system flag - only set if true
if (response.getNextSchema() != null && response.getNextSchema().getSchemaName() != null) {
Class<?> schemaClass = SchemaUtils.getSchemaClass(response.getNextSchema().getSchemaName());
if (schemaClass != null) {
AIFunctionSchema schema = SchemaUtils.getSchemaFromClass(schemaClass);
if (schema != null && schema.isSystem()) {
task.setSystem(true);
}
}
}
return task;
}
/**
* Converts a list of ChatMessage objects to ChatMessageTask objects.
* For composable ChatRequests, creates multiple ChatMessageTask objects.
*
* @param messages The messages to convert
* @return The converted ChatMessageTask objects
*/
public static List<ChatMessageTask> convertAll(List<ChatMessage> messages) {
List<ChatMessageTask> tasks = new ArrayList<>();
for (ChatMessage message : messages) {
try {
// Convert message to one or more tasks
List<ChatMessageTask> messageTasks = convert(message);
tasks.addAll(messageTasks);
} catch (Exception e) {
log.error("Failed to convert message to task: {}", e.getMessage(), e);
}
}
return tasks.stream()
.sorted(Comparator.comparing(ChatMessageTask::getTimestamp))
.collect(Collectors.toList());
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/config/RetryConfigurationManager.java
|
package ai.driftkit.workflow.engine.config;
import ai.driftkit.workflow.engine.annotations.RetryPolicy;
import ai.driftkit.workflow.engine.core.CircuitBreaker;
import lombok.extern.slf4j.Slf4j;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
/**
* Simple configuration manager for retry policies and circuit breakers.
* Allows runtime updates to retry configurations.
*/
@Slf4j
public class RetryConfigurationManager {
private final Map<String, RetryPolicy> stepRetryPolicies = new ConcurrentHashMap<>();
private final Map<String, CircuitBreaker.CircuitBreakerConfig> stepCircuitBreakerConfigs = new ConcurrentHashMap<>();
/**
* Sets retry policy for a specific step.
*/
public void setStepRetryPolicy(String stepId, RetryPolicy policy) {
stepRetryPolicies.put(stepId, policy);
log.debug("Set retry policy for step {}: maxAttempts={}, delay={}",
stepId, policy.maxAttempts(), policy.delay());
}
/**
* Gets retry policy for a specific step.
*/
public RetryPolicy getStepRetryPolicy(String stepId) {
return stepRetryPolicies.get(stepId);
}
/**
* Sets circuit breaker config for a specific step.
*/
public void setStepCircuitBreakerConfig(String stepId, CircuitBreaker.CircuitBreakerConfig config) {
stepCircuitBreakerConfigs.put(stepId, config);
log.debug("Set circuit breaker config for step {}: failureThreshold={}",
stepId, config.getFailureThreshold());
}
/**
* Gets circuit breaker config for a specific step.
*/
public CircuitBreaker.CircuitBreakerConfig getStepCircuitBreakerConfig(String stepId) {
return stepCircuitBreakerConfigs.get(stepId);
}
/**
* Clears all configurations.
*/
public void clear() {
stepRetryPolicies.clear();
stepCircuitBreakerConfigs.clear();
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/core/AsyncStepHandler.java
|
package ai.driftkit.workflow.engine.core;
import ai.driftkit.workflow.engine.annotations.AsyncStep;
import ai.driftkit.workflow.engine.async.TaskProgressReporter;
import ai.driftkit.workflow.engine.builder.FluentApiAsyncStepMetadata;
import ai.driftkit.workflow.engine.core.WorkflowAnalyzer.AsyncStepMetadata;
import ai.driftkit.workflow.engine.graph.StepNode;
import ai.driftkit.workflow.engine.graph.WorkflowGraph;
import ai.driftkit.workflow.engine.persistence.WorkflowInstance;
import ai.driftkit.workflow.engine.utils.ReflectionUtils;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import java.lang.reflect.Method;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.ConcurrentHashMap;
/**
* Handles execution of @AsyncStep annotated methods for asynchronous workflow operations.
* This class manages the mapping between async tasks and their handler methods.
*/
@Slf4j
@RequiredArgsConstructor
public class AsyncStepHandler {
private final Map<String, AsyncStepInfo> asyncStepCache = new ConcurrentHashMap<>();
/**
* Registers async steps from a workflow graph.
* This is called when a workflow is registered with the engine.
*/
public void registerWorkflow(WorkflowGraph<?, ?> graph) {
if (graph.asyncStepMetadata() == null || graph.asyncStepMetadata().isEmpty()) {
return;
}
String workflowId = graph.id();
Object workflowInstance = graph.workflowInstance();
for (Entry<String, AsyncStepMetadata> entry : graph.asyncStepMetadata().entrySet()) {
String asyncStepId = entry.getKey();
AsyncStepMetadata metadata = entry.getValue();
String key = createKey(workflowId, asyncStepId);
// Check if this is a FluentAPI metadata (which has no instance)
if (metadata instanceof FluentApiAsyncStepMetadata) {
// Store the metadata directly for FluentAPI handlers
asyncStepCache.put(key, new FluentApiAsyncStepInfo((FluentApiAsyncStepMetadata) metadata));
} else {
// Traditional annotation-based handler
asyncStepCache.put(key, new AsyncStepInfo(
metadata.getMethod(),
workflowInstance,
metadata.getAnnotation()
));
}
log.info("Registered async step handler {} for workflow {} with pattern '{}' (FluentAPI: {})",
metadata.getMethod().getName(), workflowId, asyncStepId,
metadata instanceof FluentApiAsyncStepMetadata);
}
}
/**
* Handles the result of an async operation by finding and invoking the appropriate @AsyncStep method.
* This method is called when an async task completes.
*
* @param graph The workflow graph
* @param primaryId The primary ID to look up (usually taskId)
* @param fallbackId The fallback ID to try if primary not found (usually stepId)
* @param asyncResult The async task arguments
* @param context The workflow context
* @param progressReporter The progress reporter for updating async progress
* @return The step result
*/
public StepResult<?> handleAsyncResult(WorkflowGraph<?, ?> graph,
String primaryId,
String fallbackId,
Object asyncResult,
WorkflowContext context,
TaskProgressReporter progressReporter) {
String workflowId = graph.id();
// Try primary ID first (usually taskId)
String primaryKey = createKey(workflowId, primaryId);
AsyncStepInfo info = asyncStepCache.get(primaryKey);
// If not found, try fallback ID (usually stepId)
if (info == null && fallbackId != null && !fallbackId.equals(primaryId)) {
String fallbackKey = createKey(workflowId, fallbackId);
info = asyncStepCache.get(fallbackKey);
if (info != null) {
log.debug("Found async handler using fallback ID {} for workflow {}", fallbackId, workflowId);
}
}
// If still not found, try wildcard patterns
if (info == null) {
info = findByPattern(workflowId, primaryId);
if (info != null) {
log.debug("Found async handler using pattern matching for id {} in workflow {}", primaryId, workflowId);
}
}
if (info == null) {
log.warn("No async handler found for workflow {} with id {} or {}", workflowId, primaryId, fallbackId);
// If no handler found, continue with the async result
return new StepResult.Continue<>(asyncResult);
}
try {
// Check if this is a FluentAPI handler
if (info instanceof FluentApiAsyncStepInfo) {
FluentApiAsyncStepInfo fluentInfo = (FluentApiAsyncStepInfo) info;
log.info("Invoking FluentAPI async handler for id {}", primaryId);
// Cast asyncResult to Map for FluentAPI handlers
@SuppressWarnings("unchecked")
Map<String, Object> taskArgs = (Map<String, Object>) asyncResult;
// Invoke the FluentAPI handler directly
return fluentInfo.invoke(taskArgs, context, progressReporter);
} else {
// Traditional annotation-based handler
log.debug("Invoking async handler {} for id {}", info.method.getName(), info == asyncStepCache.get(primaryKey) ? primaryId : fallbackId);
// Build method arguments including AsyncProgressReporter
Object[] args = ReflectionUtils.buildAsyncMethodArgs(info.method, asyncResult, context, progressReporter);
Object result = info.method.invoke(info.workflowInstance, args);
if (!(result instanceof StepResult)) {
throw new IllegalStateException(
"Async step handler must return StepResult, got: " +
(result != null ? result.getClass().getName() : "null")
);
}
return (StepResult<?>) result;
}
} catch (Exception e) {
log.error("Error invoking async handler for id {} or {}", primaryId, fallbackId, e);
return new StepResult.Fail<>(e);
}
}
/**
* Creates a unique key for async step registration.
*/
private String createKey(String workflowId, String asyncStepId) {
return workflowId + ":" + asyncStepId;
}
/**
* Finds async handler by pattern matching.
* Supports wildcard patterns like "*" or "prefix-*".
*/
private AsyncStepInfo findByPattern(String workflowId, String taskId) {
String prefix = workflowId + ":";
AsyncStepInfo wildcardHandler = null;
for (Map.Entry<String, AsyncStepInfo> entry : asyncStepCache.entrySet()) {
String key = entry.getKey();
// Only check entries for this workflow
if (!key.startsWith(prefix)) {
continue;
}
// Extract the pattern part after "workflowId:"
String pattern = key.substring(prefix.length());
// Check if pattern matches taskId
if (matchesPattern(pattern, taskId)) {
// If it's a wildcard, save it as fallback
if ("*".equals(pattern)) {
wildcardHandler = entry.getValue();
} else {
// Non-wildcard pattern has priority
return entry.getValue();
}
}
}
// Return wildcard handler if no specific pattern matched
return wildcardHandler;
}
/**
* Checks if a taskId matches a pattern.
* Supports "*" for match all and "prefix-*" patterns.
*/
private boolean matchesPattern(String pattern, String taskId) {
if ("*".equals(pattern)) {
return true;
}
if (pattern.endsWith("*")) {
String prefix = pattern.substring(0, pattern.length() - 1);
return taskId.startsWith(prefix);
}
return pattern.equals(taskId);
}
/**
* Clears cached async steps for a workflow.
* This should be called when a workflow is unregistered.
*/
public void unregisterWorkflow(String workflowId) {
asyncStepCache.entrySet().removeIf(entry -> entry.getKey().startsWith(workflowId + ":"));
log.debug("Unregistered async steps for workflow {}", workflowId);
}
/**
* Internal metadata for registered async steps.
*/
private static class AsyncStepInfo {
final Method method;
final Object workflowInstance;
final AsyncStep annotation;
AsyncStepInfo(Method method, Object workflowInstance, AsyncStep annotation) {
this.method = method;
this.workflowInstance = workflowInstance;
this.annotation = annotation;
method.setAccessible(true);
}
}
/**
* Internal metadata for FluentAPI async steps.
*/
private static class FluentApiAsyncStepInfo extends AsyncStepInfo {
private final FluentApiAsyncStepMetadata fluentMetadata;
FluentApiAsyncStepInfo(FluentApiAsyncStepMetadata metadata) {
super(metadata.getMethod(), null, metadata.getAnnotation());
this.fluentMetadata = metadata;
}
StepResult<?> invoke(Map<String, Object> taskArgs, WorkflowContext context, TaskProgressReporter progress) {
return fluentMetadata.invoke(taskArgs, context, progress);
}
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/core/AsyncTaskManager.java
|
package ai.driftkit.workflow.engine.core;
import ai.driftkit.workflow.engine.async.ProgressTracker;
import ai.driftkit.workflow.engine.async.TaskProgressReporter;
import ai.driftkit.workflow.engine.domain.AsyncStepState;
import ai.driftkit.workflow.engine.domain.WorkflowEvent;
import ai.driftkit.workflow.engine.graph.WorkflowGraph;
import ai.driftkit.workflow.engine.persistence.AsyncStepStateRepository;
import ai.driftkit.workflow.engine.persistence.WorkflowInstance;
import ai.driftkit.workflow.engine.persistence.WorkflowStateRepository;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import java.util.Map;
import java.util.Optional;
import java.util.UUID;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.function.Consumer;
import java.util.function.Supplier;
/**
* Manages asynchronous task execution for workflows.
* This component handles async step execution, progress tracking,
* and result handling.
*/
@Slf4j
@RequiredArgsConstructor
public class AsyncTaskManager {
private final ExecutorService executorService;
private final ProgressTracker progressTracker;
private final WorkflowStateRepository stateRepository;
private final AsyncStepHandler asyncStepHandler;
private final AsyncStepStateRepository asyncStepStateRepository;
private final Map<String, AsyncTaskInfo> runningTasks = new ConcurrentHashMap<>();
/**
* Handles an async step result with unified CompletableFuture approach.
*/
public CompletableFuture<StepResult<?>> handleAsyncStep(
WorkflowInstance instance,
WorkflowGraph<?, ?> graph,
String stepId,
StepResult.Async<?> asyncResult) {
String taskId = asyncResult.taskId();
Object immediateData = asyncResult.immediateData();
// Create async state
AsyncStepState asyncState = AsyncStepState.started(taskId, immediateData);
asyncStepStateRepository.save(asyncState);
// Track the execution
WorkflowEvent initialEvent = immediateData instanceof WorkflowEvent ?
(WorkflowEvent) immediateData :
WorkflowEvent.asyncStarted(taskId, "");
progressTracker.trackExecution(taskId, initialEvent);
// Create unified progress reporter
TaskProgressReporter progressReporter = progressTracker.createReporter(taskId);
// Convert to unified CompletableFuture handling
CompletableFuture<StepResult<?>> future = createAsyncFuture(
instance, graph, stepId, asyncResult, progressReporter);
// Apply timeout if specified
if (asyncResult.estimatedDurationMs() > 0) {
future = future.orTimeout(asyncResult.estimatedDurationMs(), TimeUnit.MILLISECONDS)
.exceptionally(error -> {
if (error instanceof TimeoutException) {
log.error("Async task {} timed out after {}ms", taskId, asyncResult.estimatedDurationMs());
return new StepResult.Fail<>(new RuntimeException("Async task timeout", error));
}
return new StepResult.Fail<>(error);
});
}
// Handle completion uniformly
CompletableFuture<StepResult<?>> resultFuture = future
.whenComplete((result, error) -> {
updateAsyncState(instance.getInstanceId(), stepId, state -> {
if (error != null) {
state.fail(error);
progressTracker.onError(taskId, error);
} else {
state.complete(result);
progressTracker.updateExecutionStatus(taskId,
WorkflowEvent.completed(Map.of("taskId", taskId, "status", "completed")));
}
});
})
.exceptionally(error -> {
log.error("Async task {} failed", taskId, error);
return new StepResult.Fail<>(error);
});
// Store task info
runningTasks.put(taskId, new AsyncTaskInfo(
instance.getInstanceId(), stepId, taskId, resultFuture
));
// Clean up on completion
resultFuture.whenComplete((result, error) -> {
runningTasks.remove(taskId);
if (error == null) {
log.debug("Async task {} completed successfully", taskId);
progressTracker.onComplete(taskId, result);
}
});
return resultFuture;
}
/**
* Creates a unified CompletableFuture for async execution.
* Handles both CompletableFuture-based and traditional async handler approaches.
*/
private CompletableFuture<StepResult<?>> createAsyncFuture(
WorkflowInstance instance,
WorkflowGraph<?, ?> graph,
String stepId,
StepResult.Async<?> asyncResult,
TaskProgressReporter progressReporter) {
String taskId = asyncResult.taskId();
// Check if taskArgs contains a CompletableFuture
Object futureObj = asyncResult.taskArgs().get(WorkflowContext.Keys.ASYNC_FUTURE);
if (futureObj instanceof CompletableFuture<?>) {
// Handle existing CompletableFuture
@SuppressWarnings("unchecked")
CompletableFuture<Object> existingFuture = (CompletableFuture<Object>) futureObj;
return existingFuture.thenApply(result -> {
log.debug("Async future completed for task {}", taskId);
return normalizeResult(result, graph, stepId);
});
}
// Create CompletableFuture for traditional async handler
return CompletableFuture.supplyAsync(() -> {
try {
log.debug("Executing async handler for task {} on step {}", taskId, stepId);
StepResult<?> handlerResult = asyncStepHandler.handleAsyncResult(
graph,
taskId,
stepId,
asyncResult.taskArgs(),
instance.getContext(),
progressReporter
);
if (handlerResult == null) {
throw new IllegalStateException(
"Async handler returned null result for task " + taskId +
". Async handlers must return a valid StepResult.");
}
return handlerResult;
} catch (Exception e) {
log.error("Async handler failed for task {}", taskId, e);
throw new RuntimeException("Async execution failed", e);
}
}, executorService);
}
/**
* Normalizes the result to ensure it's a StepResult.
* Automatically wraps plain values and determines Continue vs Finish based on graph structure.
*/
private StepResult<?> normalizeResult(Object result, WorkflowGraph<?, ?> graph, String stepId) {
if (result instanceof StepResult<?>) {
return (StepResult<?>) result;
}
// Check if current step has any outgoing edges
var node = graph.nodes().get(stepId);
boolean isFinalStep = node != null && graph.getOutgoingEdges(stepId).isEmpty();
if (isFinalStep) {
return new StepResult.Finish<>(result);
} else {
return new StepResult.Continue<>(result);
}
}
/**
* Cancels an async task if it's running.
*/
public boolean cancelAsyncTask(String instanceId, String stepId) {
var tasksToCancel = runningTasks.values().stream()
.filter(task -> task.instanceId.equals(instanceId) &&
task.stepId.equals(stepId))
.toList();
boolean anyCancelled = false;
for (var task : tasksToCancel) {
if (task.future.cancel(true)) {
anyCancelled = true;
runningTasks.remove(task.taskId);
progressTracker.onError(task.taskId,
new RuntimeException("Task cancelled"));
}
}
return anyCancelled;
}
/**
* Gets the current progress of an async task.
*/
public Optional<WorkflowEvent> getAsyncProgress(String instanceId, String stepId) {
return runningTasks.values().stream()
.filter(task -> task.instanceId.equals(instanceId) &&
task.stepId.equals(stepId))
.findFirst()
.flatMap(task -> progressTracker.getProgress(task.taskId))
.map(progress -> WorkflowEvent.withProgress(
progress.percentComplete(),
progress.message()
));
}
/**
* Updates async state for a step.
*/
private void updateAsyncState(String instanceId, String stepId,
Consumer<AsyncStepState> updater) {
// This would need to be implemented with proper state loading/saving
// For now, we'll assume the state is updated in the instance
log.debug("Updating async state for instance {} step {}", instanceId, stepId);
}
/**
* Cancels all async tasks for a workflow instance.
*
* @param instanceId The workflow instance ID
* @return true if any tasks were cancelled, false otherwise
*/
public boolean cancelAsyncTasks(String instanceId) {
var tasksToCancel = runningTasks.values().stream()
.filter(task -> task.instanceId.equals(instanceId))
.toList();
boolean anyCancelled = false;
for (var task : tasksToCancel) {
if (task.future.cancel(true)) {
runningTasks.remove(task.taskId);
progressTracker.onError(task.taskId,
new RuntimeException("Workflow cancelled"));
log.info("Cancelled async task {} for instance {}",
task.taskId, instanceId);
anyCancelled = true;
}
}
return anyCancelled;
}
/**
* Information about a running async task.
*/
private record AsyncTaskInfo(
String instanceId,
String stepId,
String taskId,
CompletableFuture<StepResult<?>> future
) {}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/core/ChatTrackingInterceptor.java
|
package ai.driftkit.workflow.engine.core;
import ai.driftkit.common.domain.chat.ChatMessage;
import ai.driftkit.common.domain.chat.ChatMessage.MessageType;
import ai.driftkit.common.service.ChatStore;
import ai.driftkit.workflow.engine.graph.StepNode;
import ai.driftkit.workflow.engine.persistence.WorkflowInstance;
import ai.driftkit.workflow.engine.schema.SchemaUtils;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import java.util.HashMap;
import java.util.Map;
/**
* Interceptor that automatically tracks chat messages to ChatStore
* for Suspend, Async, and Finish step results.
*/
@Slf4j
@RequiredArgsConstructor
public class ChatTrackingInterceptor implements ExecutionInterceptor {
private final ChatStore chatStore;
@Override
public void beforeStep(WorkflowInstance instance, StepNode step, Object input) {
// No action needed before step
}
@Override
public void afterStep(WorkflowInstance instance, StepNode step, StepResult<?> result) {
String chatId = instance.getInstanceId();
switch (result) {
case StepResult.Suspend<?> suspend -> {
// For simple types like String, create a simple properties map
if (suspend.promptToUser() instanceof String prompt) {
chatStore.add(chatId, prompt, MessageType.AI);
} else {
Map<String, String> properties = SchemaUtils.extractProperties(suspend.promptToUser());
chatStore.add(chatId, properties, MessageType.AI);
}
log.debug("Tracked suspend message for chat: {}", chatId);
}
case StepResult.Async<?> async -> {
if (async.immediateData() instanceof String message) {
chatStore.add(chatId, message, MessageType.AI);
} else if (async.immediateData() != null) {
Map<String, String> properties = SchemaUtils.extractProperties(async.immediateData());
chatStore.add(chatId, properties, MessageType.AI);
}
log.debug("Tracked async start message for chat: {}", chatId);
}
case StepResult.Finish<?> finish -> {
if (finish.result() instanceof String message) {
chatStore.add(chatId, message, MessageType.AI);
} else if (finish.result() != null) {
Map<String, String> properties = SchemaUtils.extractProperties(finish.result());
chatStore.add(chatId, properties, MessageType.AI);
}
log.debug("Tracked finish message for chat: {}", chatId);
}
default -> {
// Continue, Branch, and Fail results are not tracked automatically
}
}
}
@Override
public void onStepError(WorkflowInstance instance, StepNode step, Exception error) {
String chatId = instance.getInstanceId();
Map<String, String> properties = new HashMap<>();
properties.put(ChatMessage.PROPERTY_MESSAGE, "Error in step " + step.id() + ": " + error.getMessage());
properties.put("stepId", step.id());
properties.put("status", "error");
properties.put("error", error.getMessage());
chatStore.add(chatId, properties, MessageType.SYSTEM);
log.debug("Tracked error message for chat: {}", chatId);
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/core/CircuitBreaker.java
|
package ai.driftkit.workflow.engine.core;
import lombok.Builder;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
/**
* Circuit breaker implementation for workflow steps.
* Prevents cascading failures by temporarily blocking execution of failing steps.
*
* States:
* - CLOSED: Normal operation, requests pass through
* - OPEN: Failure threshold exceeded, requests are blocked
* - HALF_OPEN: Testing if the service has recovered
*/
@Slf4j
public class CircuitBreaker {
private final ConcurrentHashMap<String, CircuitState> circuitStates = new ConcurrentHashMap<>();
private final CircuitBreakerConfig config;
public CircuitBreaker(CircuitBreakerConfig config) {
this.config = config;
}
public CircuitBreaker() {
this(CircuitBreakerConfig.defaultConfig());
}
/**
* Checks if the circuit allows execution for the given step.
*
* @param stepId The step identifier
* @return true if execution is allowed, false if circuit is open
*/
public boolean allowExecution(String stepId) {
CircuitState state = circuitStates.computeIfAbsent(stepId,
k -> new CircuitState(config));
return state.allowExecution();
}
/**
* Records a successful execution.
*
* @param stepId The step identifier
*/
public void recordSuccess(String stepId) {
CircuitState state = circuitStates.get(stepId);
if (state != null) {
state.recordSuccess();
}
}
/**
* Records a failed execution.
*
* @param stepId The step identifier
* @param exception The exception that occurred
*/
public void recordFailure(String stepId, Exception exception) {
CircuitState state = circuitStates.computeIfAbsent(stepId,
k -> new CircuitState(config));
state.recordFailure(exception);
}
/**
* Gets the current state of a circuit.
*
* @param stepId The step identifier
* @return The current circuit state
*/
public State getState(String stepId) {
CircuitState state = circuitStates.get(stepId);
return state != null ? state.getState() : State.CLOSED;
}
/**
* Resets the circuit breaker for a specific step.
*
* @param stepId The step identifier
*/
public void reset(String stepId) {
circuitStates.remove(stepId);
}
/**
* Resets all circuit breakers.
*/
public void resetAll() {
circuitStates.clear();
}
/**
* Exports the state of a circuit breaker for persistence.
*
* @param stepId The step identifier
* @return The circuit state snapshot or null if not found
*/
public CircuitStateSnapshot exportState(String stepId) {
CircuitState state = circuitStates.get(stepId);
if (state == null) {
return null;
}
return state.toSnapshot();
}
/**
* Imports a circuit breaker state from persistence.
*
* @param stepId The step identifier
* @param snapshot The state snapshot to import
*/
public void importState(String stepId, CircuitStateSnapshot snapshot) {
if (snapshot != null) {
CircuitState state = CircuitState.fromSnapshot(snapshot, config);
circuitStates.put(stepId, state);
}
}
/**
* Circuit breaker states.
*/
public enum State {
CLOSED, // Normal operation
OPEN, // Blocking requests
HALF_OPEN // Testing recovery
}
/**
* Snapshot of circuit breaker state for persistence.
*/
public record CircuitStateSnapshot(
State state,
int failureCount,
int successCount,
int halfOpenAttempts,
long lastFailureTime,
long stateChangeTime
) {}
/**
* Configuration for circuit breaker behavior.
*/
@Getter
@Builder
public static class CircuitBreakerConfig {
@Builder.Default
private final int failureThreshold = 5;
@Builder.Default
private final int successThreshold = 2;
@Builder.Default
private final long openDurationMs = 60000; // 1 minute
@Builder.Default
private final long halfOpenDurationMs = 30000; // 30 seconds
@Builder.Default
private final int halfOpenMaxAttempts = 3;
public static CircuitBreakerConfig defaultConfig() {
return CircuitBreakerConfig.builder().build();
}
}
/**
* Internal state management for a single circuit.
*/
private static class CircuitState {
private final CircuitBreakerConfig config;
private final AtomicInteger failureCount = new AtomicInteger(0);
private final AtomicInteger successCount = new AtomicInteger(0);
private final AtomicInteger halfOpenAttempts = new AtomicInteger(0);
private final AtomicLong lastFailureTime = new AtomicLong(0);
private final AtomicLong stateChangeTime = new AtomicLong(System.currentTimeMillis());
private volatile State state = State.CLOSED;
CircuitState(CircuitBreakerConfig config) {
this.config = config;
}
synchronized boolean allowExecution() {
long now = System.currentTimeMillis();
switch (state) {
case CLOSED:
return true;
case OPEN:
// Check if we should transition to half-open
if (now - stateChangeTime.get() >= config.openDurationMs) {
transitionTo(State.HALF_OPEN);
halfOpenAttempts.set(1); // Count this as the first attempt
return true; // Allow one attempt
}
return false;
case HALF_OPEN:
// Allow limited attempts in half-open state
int currentAttempts = halfOpenAttempts.get();
if (currentAttempts < config.halfOpenMaxAttempts) {
halfOpenAttempts.incrementAndGet();
return true;
}
// If we've exhausted half-open attempts without success, go back to open
if (now - stateChangeTime.get() >= config.halfOpenDurationMs) {
transitionTo(State.OPEN);
}
return false;
default:
return false;
}
}
synchronized void recordSuccess() {
successCount.incrementAndGet();
switch (state) {
case HALF_OPEN:
if (successCount.get() >= config.successThreshold) {
// Recovered successfully
transitionTo(State.CLOSED);
failureCount.set(0);
successCount.set(0);
}
break;
case CLOSED:
// Reset failure count on success in closed state
failureCount.set(0);
break;
default:
// No action in OPEN state
break;
}
}
synchronized void recordFailure(Exception exception) {
lastFailureTime.set(System.currentTimeMillis());
failureCount.incrementAndGet();
switch (state) {
case CLOSED:
if (failureCount.get() >= config.failureThreshold) {
transitionTo(State.OPEN);
log.warn("Circuit breaker opened due to {} failures", failureCount.get());
}
break;
case HALF_OPEN:
// Any failure in half-open state sends us back to open
transitionTo(State.OPEN);
log.warn("Circuit breaker reopened due to failure in half-open state");
break;
default:
// Already open
break;
}
}
private void transitionTo(State newState) {
State oldState = this.state;
this.state = newState;
stateChangeTime.set(System.currentTimeMillis());
if (newState == State.CLOSED) {
failureCount.set(0);
successCount.set(0);
halfOpenAttempts.set(0);
}
log.info("Circuit breaker state transition: {} -> {}", oldState, newState);
}
State getState() {
return state;
}
CircuitStateSnapshot toSnapshot() {
return new CircuitStateSnapshot(
state,
failureCount.get(),
successCount.get(),
halfOpenAttempts.get(),
lastFailureTime.get(),
stateChangeTime.get()
);
}
static CircuitState fromSnapshot(CircuitStateSnapshot snapshot, CircuitBreakerConfig config) {
CircuitState state = new CircuitState(config);
state.state = snapshot.state();
state.failureCount.set(snapshot.failureCount());
state.successCount.set(snapshot.successCount());
state.halfOpenAttempts.set(snapshot.halfOpenAttempts());
state.lastFailureTime.set(snapshot.lastFailureTime());
state.stateChangeTime.set(snapshot.stateChangeTime());
return state;
}
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/core/ConditionalRetryStrategy.java
|
package ai.driftkit.workflow.engine.core;
import ai.driftkit.workflow.engine.annotations.RetryPolicy;
import ai.driftkit.workflow.engine.domain.RetryContext;
import lombok.extern.slf4j.Slf4j;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
/**
* Retry strategy that applies conditions based on exception types.
* Extends the default strategy with conditional retry logic.
*/
@Slf4j
public class ConditionalRetryStrategy extends DefaultRetryStrategy {
@Override
public boolean shouldRetry(Throwable exception, RetryContext context, RetryPolicy policy) {
// Check if we have attempts remaining
if (context.getAttemptNumber() >= policy.maxAttempts()) {
log.debug("No more retry attempts for step {}: {}/{}",
context.getStepId(), context.getAttemptNumber(), policy.maxAttempts());
return false;
}
// Extract the root cause
Throwable rootCause = getRootCause(exception);
Class<? extends Throwable> exceptionClass = rootCause.getClass();
// Check abort conditions first (takes precedence)
Class<? extends Throwable>[] abortOn = policy.abortOn();
if (abortOn.length > 0) {
Set<Class<? extends Throwable>> abortSet = new HashSet<>(Arrays.asList(abortOn));
if (matchesAnyInChain(exception, abortSet)) {
log.debug("Exception {} or its cause matches abort condition, not retrying", exceptionClass.getSimpleName());
return false;
}
}
// Check retry conditions
Class<? extends Throwable>[] retryOn = policy.retryOn();
if (retryOn.length > 0) {
Set<Class<? extends Throwable>> retrySet = new HashSet<>(Arrays.asList(retryOn));
boolean shouldRetry = matchesAnyInChain(exception, retrySet);
if (!shouldRetry) {
log.debug("Exception {} and its causes do not match retry conditions {}",
exceptionClass.getSimpleName(), retrySet);
// Debug: print exception chain
Throwable current = exception;
while (current != null) {
log.debug(" - Exception in chain: {}", current.getClass().getName());
current = current.getCause();
}
}
return shouldRetry;
}
// If no specific conditions, allow retry (default behavior)
return true;
}
/**
* Gets the root cause of an exception chain.
*
* @param throwable The throwable to analyze
* @return The root cause
*/
private Throwable getRootCause(Throwable throwable) {
Throwable cause = throwable;
while (cause.getCause() != null && cause.getCause() != cause) {
cause = cause.getCause();
}
return cause;
}
/**
* Checks if an exception class matches any in the set, considering inheritance.
* Also checks the entire exception chain for matches.
*
* @param exceptionClass The exception class to check
* @param targetClasses The set of target classes
* @return true if there's a match
*/
private boolean matchesAny(Class<? extends Throwable> exceptionClass,
Set<Class<? extends Throwable>> targetClasses) {
// Check direct match
for (Class<? extends Throwable> targetClass : targetClasses) {
if (targetClass.isAssignableFrom(exceptionClass)) {
return true;
}
}
return false;
}
/**
* Checks if any exception in the chain matches the target classes.
*
* @param throwable The throwable to check (including its causes)
* @param targetClasses The set of target classes
* @return true if there's a match in the chain
*/
private boolean matchesAnyInChain(Throwable throwable,
Set<Class<? extends Throwable>> targetClasses) {
Throwable current = throwable;
while (current != null) {
if (matchesAny(current.getClass(), targetClasses)) {
return true;
}
current = current.getCause();
}
return false;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/core/DefaultRetryStrategy.java
|
package ai.driftkit.workflow.engine.core;
import ai.driftkit.workflow.engine.annotations.RetryPolicy;
import ai.driftkit.workflow.engine.domain.RetryContext;
import lombok.extern.slf4j.Slf4j;
import java.util.concurrent.ThreadLocalRandom;
/**
* Default implementation of RetryStrategy that supports exponential backoff with jitter.
*/
@Slf4j
public class DefaultRetryStrategy implements RetryStrategy {
@Override
public boolean shouldRetry(Throwable failure, RetryContext context, RetryPolicy policy) {
// Check if we have attempts remaining
if (context.getAttemptNumber() >= policy.maxAttempts()) {
log.debug("No more retry attempts for step {}: {}/{}",
context.getStepId(), context.getAttemptNumber(), policy.maxAttempts());
return false;
}
// Check if the exception type should be retried
if (!isRetryableException(failure, policy)) {
log.debug("Exception {} is not retryable for step {}",
failure.getClass().getName(), context.getStepId());
return false;
}
return true;
}
@Override
public long calculateDelay(RetryContext context, RetryPolicy policy) {
// Calculate base delay with exponential backoff
long baseDelay = policy.delay();
int retryCount = context.getAttemptNumber() - 1; // Convert to 0-based
if (policy.backoffMultiplier() > 1.0 && retryCount > 0) {
baseDelay = (long) (baseDelay * Math.pow(policy.backoffMultiplier(), retryCount));
}
// Apply max delay cap
baseDelay = Math.min(baseDelay, policy.maxDelay());
// Apply jitter
if (policy.jitterFactor() > 0) {
double jitter = policy.jitterFactor() * ThreadLocalRandom.current().nextDouble();
baseDelay = (long) (baseDelay * (1 + jitter));
}
return baseDelay;
}
@Override
public void beforeRetry(RetryContext context, long delay) {
log.info("Retrying step {} (attempt {}/{}) after {}ms delay",
context.getStepId(),
context.getAttemptNumber() + 1,
context.getMaxAttempts(),
delay);
}
@Override
public void afterRetry(RetryContext context, boolean success, long duration) {
if (success) {
log.info("Retry successful for step {} on attempt {} (took {}ms)",
context.getStepId(), context.getAttemptNumber(), duration);
} else {
log.warn("Retry failed for step {} on attempt {} (took {}ms)",
context.getStepId(), context.getAttemptNumber(), duration);
}
}
private boolean isRetryableException(Throwable failure, RetryPolicy policy) {
Class<? extends Throwable> failureType = failure.getClass();
// Check abort list first (takes precedence)
Class<? extends Throwable>[] abortOn = policy.abortOn();
if (abortOn.length > 0) {
for (Class<? extends Throwable> abortType : abortOn) {
if (abortType.isAssignableFrom(failureType)) {
return false; // Should not retry
}
}
}
// Check retry list
Class<? extends Throwable>[] retryOn = policy.retryOn();
if (retryOn.length == 0) {
// If no specific exceptions specified, retry all (except those in abortOn)
return true;
}
// Check if exception is in retry list
for (Class<? extends Throwable> retryType : retryOn) {
if (retryType.isAssignableFrom(failureType)) {
return true;
}
}
return false;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/core/DefaultStepRouter.java
|
package ai.driftkit.workflow.engine.core;
import ai.driftkit.workflow.engine.graph.Edge;
import ai.driftkit.workflow.engine.graph.StepNode;
import ai.driftkit.workflow.engine.graph.WorkflowGraph;
import lombok.extern.slf4j.Slf4j;
import java.util.List;
import java.util.stream.Collectors;
/**
* Default implementation of StepRouter that provides type-based routing logic.
* This implementation uses the following priority order:
* 1. Conditional edges (explicit predicates)
* 2. Type-based routing (finding steps that accept the data type)
* 3. Sequential edges (fallback behavior)
*/
@Slf4j
public class DefaultStepRouter implements StepRouter {
@Override
public String findNextStep(WorkflowGraph<?, ?> graph, String currentStepId, Object data) {
List<Edge> edges = graph.getOutgoingEdges(currentStepId);
if (edges.isEmpty()) {
// No explicit edges - try pure type-based routing
if (data != null) {
log.debug("No edges from step {}, attempting pure type-based routing for type {}",
currentStepId, data.getClass().getSimpleName());
return findStepForInputType(graph, data.getClass(), currentStepId);
}
return null;
}
// Priority 1: Check conditional edges (explicit predicates)
for (Edge edge : edges) {
if (edge.type() == Edge.EdgeType.CONDITIONAL && edge.shouldFollow(data)) {
log.debug("Following conditional edge from {} to {}", currentStepId, edge.toStepId());
return edge.toStepId();
}
}
// Priority 2: Type-based resolution - find step that accepts the data type
if (data != null) {
Class<?> dataType = data.getClass();
// Check all edges and find target steps that can accept this data type
for (Edge edge : edges) {
// Skip non-sequential edges for type matching
if (edge.type() != Edge.EdgeType.SEQUENTIAL && edge.type() != Edge.EdgeType.BRANCH) {
continue;
}
String targetStepId = edge.toStepId();
StepNode targetStep = graph.getNode(targetStepId).orElse(null);
if (targetStep != null && targetStep.canAcceptInput(dataType)) {
log.debug("Type-based routing: {} -> {} (data type: {})",
currentStepId, targetStepId, dataType.getSimpleName());
return targetStepId;
}
}
// If no direct edge target accepts the type, search all nodes
log.debug("No direct edge target accepts type {}, searching all nodes", dataType.getSimpleName());
String typeMatchingStep = findStepForInputType(graph, dataType, currentStepId);
if (typeMatchingStep != null) {
log.debug("Found step {} that accepts type {} (no direct edge)",
typeMatchingStep, dataType.getSimpleName());
return typeMatchingStep;
}
}
// Priority 3: Fall back to sequential edges (original behavior)
List<Edge> sequentialEdges = edges.stream()
.filter(e -> e.type() == Edge.EdgeType.SEQUENTIAL)
.collect(Collectors.toList());
if (sequentialEdges.size() == 1) {
return sequentialEdges.get(0).toStepId();
} else if (sequentialEdges.size() > 1) {
// If multiple sequential edges exist, prefer one where target accepts the data type
if (data != null) {
for (Edge edge : sequentialEdges) {
StepNode targetStep = graph.getNode(edge.toStepId()).orElse(null);
if (targetStep != null && targetStep.canAcceptInput(data.getClass())) {
log.debug("Multiple sequential edges: choosing {} which accepts type {}",
edge.toStepId(), data.getClass().getSimpleName());
return edge.toStepId();
}
}
}
log.warn("Multiple sequential edges from step: {}. Using first one.", currentStepId);
return sequentialEdges.get(0).toStepId();
}
return null;
}
@Override
public String findBranchTarget(WorkflowGraph<?, ?> graph, String currentStepId, Object event) {
log.debug("Finding branch target from step: {} for event: {} (type: {})",
currentStepId, event, event != null ? event.getClass().getName() : "null");
List<Edge> edges = graph.getOutgoingEdges(currentStepId);
log.debug("Available edges from {}: {}", currentStepId, edges.size());
// Priority 1: Find matching branch edge by event type
for (Edge edge : edges) {
log.debug("Checking edge: {} -> {}, type: {}, eventType: {}, branchValue: {}",
edge.fromStepId(), edge.toStepId(), edge.type(),
edge.eventType() != null ? edge.eventType().getSimpleName() : "null",
edge.branchValue());
if (edge.type() == Edge.EdgeType.BRANCH && edge.shouldFollow(event)) {
log.debug("Found matching branch edge: {} -> {}", currentStepId, edge.toStepId());
return edge.toStepId();
}
}
// Priority 2: Type-based resolution - find any step that accepts the event type
if (event != null) {
Class<?> eventType = event.getClass();
log.debug("No exact branch match, trying type-based resolution for {}", eventType.getName());
// First check branch edge targets
for (Edge edge : edges) {
if (edge.type() == Edge.EdgeType.BRANCH) {
StepNode targetStep = graph.getNode(edge.toStepId()).orElse(null);
if (targetStep != null && targetStep.canAcceptInput(eventType)) {
log.debug("Branch type-based routing: {} -> {} (event type: {})",
currentStepId, edge.toStepId(), eventType.getSimpleName());
return edge.toStepId();
}
}
}
// If no branch edge target accepts the type, search all nodes
String typeMatchingStep = findStepForInputType(graph, eventType, currentStepId);
if (typeMatchingStep != null) {
log.debug("Branch found step {} that accepts event type {} (no direct edge)",
typeMatchingStep, eventType.getSimpleName());
return typeMatchingStep;
}
}
log.warn("No branch target found for event: {} from step: {}", event, currentStepId);
return null;
}
@Override
public String findStepForInputType(WorkflowGraph<?, ?> graph, Class<?> inputType, String excludeStepId) {
// First, check if there are any outgoing edges from the current step
List<Edge> edges = graph.getOutgoingEdges(excludeStepId);
// Look for branch edges that match the input type
for (Edge edge : edges) {
if (edge.type() == Edge.EdgeType.BRANCH && edge.eventType() != null &&
edge.eventType().isAssignableFrom(inputType)) {
return edge.toStepId();
}
}
// If no direct edge found, search all nodes for one that accepts this input type
// First pass: look for steps other than the excluded one
for (StepNode node : graph.nodes().values()) {
// Skip the excluded step to prevent infinite loops
if (!node.isInitial() &&
!node.id().equals(excludeStepId)) {
Class<?> expectedType = node.executor() != null ? node.executor().getInputType() : null;
log.debug("Checking step {} with expected type {} against input type {}",
node.id(),
expectedType != null ? expectedType.getName() : "null",
inputType.getName());
if (node.canAcceptInput(inputType)) {
log.debug("Found step {} that can accept input type {}",
node.id(), inputType.getSimpleName());
return node.id();
}
}
}
// Second pass: if no other step found, check if the excluded step can handle it
// This allows steps to handle multiple inputs of the same type (e.g., multiple questions)
if (excludeStepId != null) {
StepNode excludedNode = graph.getNode(excludeStepId).orElse(null);
if (excludedNode != null && !excludedNode.isInitial() && excludedNode.canAcceptInput(inputType)) {
log.debug("No other step found, allowing return to the same step {} for input type {}",
excludeStepId, inputType.getSimpleName());
return excludeStepId;
}
}
log.warn("No step found that can accept input type {} (excluding {}). Available steps:",
inputType.getName(), excludeStepId);
for (StepNode node : graph.nodes().values()) {
if (!node.isInitial()) {
Class<?> expectedType = node.executor() != null ? node.executor().getInputType() : null;
log.warn(" Step {}: expects {}", node.id(),
expectedType != null ? expectedType.getName() : "null");
}
}
return null;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/core/ExecutionInterceptor.java
|
package ai.driftkit.workflow.engine.core;
import ai.driftkit.workflow.engine.graph.StepNode;
import ai.driftkit.workflow.engine.persistence.WorkflowInstance;
import java.util.Optional;
/**
* Interceptor interface for workflow step execution.
* Allows plugins to hook into the execution lifecycle of workflow steps.
*/
public interface ExecutionInterceptor {
/**
* Called before a step is executed.
*
* @param instance The workflow instance
* @param step The step about to be executed
* @param input The input that will be passed to the step
*/
void beforeStep(WorkflowInstance instance, StepNode step, Object input);
/**
* Called after a step has been executed successfully.
*
* @param instance The workflow instance
* @param step The step that was executed
* @param result The result returned by the step
*/
void afterStep(WorkflowInstance instance, StepNode step, StepResult<?> result);
/**
* Called when a step execution fails with an error.
*
* @param instance The workflow instance
* @param step The step that failed
* @param error The error that occurred
*/
void onStepError(WorkflowInstance instance, StepNode step, Exception error);
/**
* Called to check if this interceptor wants to override the step execution.
* If this method returns a non-empty Optional, the returned StepResult will be used
* instead of executing the actual step.
*
* @param instance The workflow instance
* @param step The step about to be executed
* @param input The input that will be passed to the step
* @return An Optional containing the mocked result, or empty to proceed with normal execution
*/
default Optional<StepResult<?>> interceptExecution(WorkflowInstance instance, StepNode step, Object input) {
return Optional.empty();
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/core/ExecutionListener.java
|
package ai.driftkit.workflow.engine.core;
import ai.driftkit.workflow.engine.domain.WorkflowEvent;
/**
* Listener interface for workflow execution events.
* Implementations can react to workflow state changes, progress updates, etc.
*/
@FunctionalInterface
public interface ExecutionListener {
/**
* Called when a workflow event occurs.
*
* @param event The workflow event
*/
void onEvent(WorkflowEvent event);
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/core/InputPreparer.java
|
package ai.driftkit.workflow.engine.core;
import ai.driftkit.workflow.engine.graph.StepNode;
import ai.driftkit.workflow.engine.persistence.WorkflowInstance;
import ai.driftkit.workflow.engine.utils.WorkflowInputOutputHandler;
import lombok.extern.slf4j.Slf4j;
import java.util.Map;
/**
* Prepares input data for workflow steps.
* Handles the logic of finding appropriate input based on step requirements,
* context state, and execution history.
*/
@Slf4j
public class InputPreparer {
/**
* Prepares input for a step execution.
*
* @param instance The workflow instance
* @param step The step to prepare input for
* @return The prepared input, or null if no suitable input found
*/
public Object prepareStepInput(WorkflowInstance instance, StepNode step) {
log.debug("Preparing input for step: {} (expected type: {})",
step.id(),
step.executor().getInputType() != null ? step.executor().getInputType().getSimpleName() : "any");
// For initial step, use trigger data
if (step.isInitial()) {
log.debug("Step {} is initial, using trigger data", step.id());
return instance.getContext().getTriggerData();
}
WorkflowContext ctx = instance.getContext();
Class<?> expectedInputType = step.executor().getInputType();
// Priority 1: Check if we're resuming from suspension with user input
Object userInput = WorkflowInputOutputHandler.getUserInputForStep(instance, step);
if (userInput != null) {
return userInput;
}
// Priority 2: Find the most recent compatible output from execution history
log.debug("Looking for recent compatible output. Available outputs: {}",
ctx.getStepOutputs().keySet());
Object recentOutput = WorkflowInputOutputHandler.findCompatibleOutputFromHistory(instance, step);
if (recentOutput != null) {
log.debug("Found recent compatible output for step {}: type={}",
step.id(), recentOutput.getClass().getSimpleName());
return recentOutput;
}
// Priority 3: If step has specific input type requirement, search all outputs for exact match
if (expectedInputType != null && expectedInputType != Object.class) {
Map<String, StepOutput> allResults = ctx.getStepOutputs();
// First pass: look for exact type match
for (Map.Entry<String, StepOutput> entry : allResults.entrySet()) {
if (entry.getKey().equals(step.id()) || entry.getValue() == null || !entry.getValue().hasValue()) {
continue;
}
if (expectedInputType.equals(entry.getValue().getActualClass())) {
log.debug("Found exact type match from step {} for input type {}",
entry.getKey(), expectedInputType.getSimpleName());
return entry.getValue().getValue();
}
}
// Second pass: look for compatible type (assignable)
for (Map.Entry<String, StepOutput> entry : allResults.entrySet()) {
if (entry.getKey().equals(step.id()) || entry.getValue() == null || !entry.getValue().hasValue()) {
continue;
}
if (entry.getValue().isCompatibleWith(expectedInputType)) {
log.debug("Found compatible type from step {} for input type {}",
entry.getKey(), expectedInputType.getSimpleName());
return entry.getValue().getValue();
}
}
}
// Priority 4: Check trigger data if it matches the expected type
Object triggerData = ctx.getTriggerData();
if (triggerData != null && step.canAcceptInput(triggerData.getClass())) {
log.debug("Using trigger data of type {} for step {}",
triggerData.getClass().getSimpleName(), step.id());
return triggerData;
}
// No suitable input available
log.error("No suitable input found for step {} (expected type: {})",
step.id(),
expectedInputType != null ? expectedInputType.getSimpleName() : "any");
return null;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/core/InternalStepListener.java
|
package ai.driftkit.workflow.engine.core;
/**
* Listener for internal step executions within composite steps like branches.
* This allows external components (like test frameworks) to track and potentially
* intercept internal step executions.
*/
public interface InternalStepListener {
/**
* Called before an internal step is executed.
*
* @param stepId the ID of the step being executed
* @param input the input to the step
* @param context the workflow context
*/
void beforeInternalStep(String stepId, Object input, WorkflowContext context);
/**
* Called after an internal step is executed successfully.
*
* @param stepId the ID of the step that was executed
* @param result the result of the step execution
* @param context the workflow context
*/
void afterInternalStep(String stepId, StepResult<?> result, WorkflowContext context);
/**
* Called if an internal step execution fails.
*
* @param stepId the ID of the step that failed
* @param error the error that occurred
* @param context the workflow context
*/
void onInternalStepError(String stepId, Exception error, WorkflowContext context);
/**
* Allows the listener to provide an alternative result for the step.
* This is used for mocking in test frameworks.
*
* @param stepId the ID of the step
* @param input the input to the step
* @param context the workflow context
* @return an optional alternative result, or empty to proceed with normal execution
*/
default java.util.Optional<StepResult<?>> interceptInternalStep(String stepId, Object input, WorkflowContext context) {
return java.util.Optional.empty();
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/core/RetryAbortedException.java
|
package ai.driftkit.workflow.engine.core;
/**
* Exception thrown when retry is aborted due to specific exception types.
* This is different from RetryExhaustedException which is thrown when all retry attempts are used up.
*/
public class RetryAbortedException extends Exception {
/**
* Creates a new retry aborted exception.
*
* @param message The error message
*/
public RetryAbortedException(String message) {
super(message);
}
/**
* Creates a new retry aborted exception with a cause.
*
* @param message The error message
* @param cause The underlying cause
*/
public RetryAbortedException(String message, Throwable cause) {
super(message, cause);
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/core/RetryExecutor.java
|
package ai.driftkit.workflow.engine.core;
import ai.driftkit.workflow.engine.annotations.OnInvocationsLimit;
import ai.driftkit.workflow.engine.annotations.RetryPolicy;
import ai.driftkit.workflow.engine.domain.RetryContext;
import ai.driftkit.workflow.engine.graph.StepNode;
import ai.driftkit.workflow.engine.persistence.WorkflowInstance;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.collections4.CollectionUtils;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.TimeUnit;
/**
* Handles retry logic for workflow step execution.
* Implements exponential backoff with jitter and invocation limit control.
* Supports circuit breaker, metrics collection, and retry listeners.
*/
@Slf4j
@RequiredArgsConstructor
public class RetryExecutor {
private final RetryStrategy retryStrategy;
private final CircuitBreaker circuitBreaker;
private final RetryMetrics retryMetrics;
private final List<RetryListener> retryListeners;
public RetryExecutor() {
this(new ConditionalRetryStrategy(),
new CircuitBreaker(),
new RetryMetrics(),
new CopyOnWriteArrayList<>());
}
public RetryExecutor(RetryStrategy retryStrategy) {
this(retryStrategy,
new CircuitBreaker(),
new RetryMetrics(),
new CopyOnWriteArrayList<>());
}
/**
* Adds a retry listener.
*
* @param listener The listener to add
*/
public void addRetryListener(RetryListener listener) {
retryListeners.add(listener);
}
/**
* Removes a retry listener.
*
* @param listener The listener to remove
*/
public void removeRetryListener(RetryListener listener) {
retryListeners.remove(listener);
}
/**
* Executes a step with retry logic.
*
* @param instance The workflow instance
* @param step The step to execute
* @param executor The underlying step executor
* @return The step result
* @throws Exception if all retries are exhausted or non-retryable error occurs
*/
public StepResult<?> executeWithRetry(WorkflowInstance instance,
StepNode step,
StepExecutor executor) throws Exception {
String stepId = step.id();
WorkflowContext context = instance.getContext();
// Check circuit breaker first
if (!circuitBreaker.allowExecution(stepId)) {
log.warn("Circuit breaker is open for step '{}', failing fast", stepId);
throw new CircuitBreakerOpenException(
String.format("Circuit breaker is open for step '%s'", stepId));
}
// Check invocation limit
int invocationCount = context.recordStepExecution(stepId);
int invocationLimit = step.invocationLimit();
if (invocationCount > invocationLimit) {
OnInvocationsLimit behavior = step.onInvocationsLimit();
switch (behavior) {
case ERROR -> throw new InvocationLimitExceededException(
String.format("Step '%s' exceeded invocation limit of %d", stepId, invocationLimit));
case STOP -> {
log.warn("Step '{}' reached invocation limit of {}, stopping execution", stepId, invocationLimit);
return StepResult.finish(null);
}
case CONTINUE -> log.warn("Step '{}' exceeded invocation limit of {}, continuing anyway",
stepId, invocationLimit);
}
}
// Get retry policy
RetryPolicy retryPolicy = step.retryPolicy();
if (retryPolicy == null || retryPolicy.maxAttempts() <= 1) {
// No retry configured
return executor.execute(instance, step);
}
// Initialize retry context
List<RetryContext.RetryAttempt> attempts = new ArrayList<>();
long firstAttemptTime = System.currentTimeMillis();
Exception lastException = null;
for (int attempt = 1; attempt <= retryPolicy.maxAttempts(); attempt++) {
long attemptStartTime = System.currentTimeMillis();
try {
// Calculate delay for this attempt (used in metrics)
long delay = attempt > 1 ? retryStrategy.calculateDelay(
RetryContext.builder()
.attemptNumber(attempt - 1)
.build(),
retryPolicy) : 0;
// Update retry context in workflow
RetryContext retryContext = RetryContext.builder()
.stepId(stepId)
.attemptNumber(attempt)
.maxAttempts(retryPolicy.maxAttempts())
.previousAttempts(attempts)
.firstAttemptTime(firstAttemptTime)
.currentAttemptTime(attemptStartTime)
.build();
context.updateRetryContext(stepId, retryContext);
// Notify listeners before retry
if (attempt > 1) {
notifyBeforeRetry(stepId, retryContext, retryPolicy);
retryMetrics.recordRetryAttempt(stepId, attempt, delay);
}
// Execute step
StepResult<?> result = executor.execute(instance, step);
// Check if we should retry on fail result
if (retryPolicy.retryOnFailResult() && result instanceof StepResult.Fail) {
throw new RetryableStepFailure("Step returned fail result", ((StepResult.Fail<?>) result).error());
}
// Success - clear retry context and return
context.clearRetryContext(stepId);
circuitBreaker.recordSuccess(stepId);
if (attempt > 1) {
long totalDuration = System.currentTimeMillis() - firstAttemptTime;
retryMetrics.recordRetrySuccess(stepId, attempt, totalDuration);
notifyRetrySuccess(stepId, retryContext, result);
}
return result;
} catch (Exception e) {
long attemptDuration = System.currentTimeMillis() - attemptStartTime;
// Record attempt
RetryContext.RetryAttempt attemptRecord = RetryContext.RetryAttempt.builder()
.attemptNumber(attempt)
.attemptTime(attemptStartTime)
.failure(e)
.durationMs(attemptDuration)
.build();
attempts.add(attemptRecord);
// Update retry context with failure
RetryContext retryContext = RetryContext.builder()
.stepId(stepId)
.attemptNumber(attempt)
.maxAttempts(retryPolicy.maxAttempts())
.previousAttempts(attempts)
.firstAttemptTime(firstAttemptTime)
.currentAttemptTime(attemptStartTime)
.build();
context.updateRetryContext(stepId, retryContext);
// Record failure metrics
circuitBreaker.recordFailure(stepId, e);
retryMetrics.recordRetryFailure(stepId, attempt, e);
// Check if we should retry
if (!retryStrategy.shouldRetry(e, retryContext, retryPolicy)) {
// Check if this is because we've exhausted attempts
if (attempt >= retryPolicy.maxAttempts()) {
// Break out of loop to handle exhaustion properly
lastException = e;
break;
}
// Otherwise it's a non-retryable error
log.error("Step '{}' failed on attempt {} (non-retryable error)", stepId, attempt, e);
context.clearRetryContext(stepId);
notifyRetryAborted(stepId, retryContext, e);
throw e;
}
lastException = e;
if (attempt < retryPolicy.maxAttempts()) {
// Calculate delay
long delay = retryStrategy.calculateDelay(retryContext, retryPolicy);
log.warn("Step '{}' failed on attempt {} of {}, retrying in {}ms",
stepId, attempt, retryPolicy.maxAttempts(), delay, e);
// Notify listeners
notifyRetryFailure(stepId, retryContext, e, true);
// Wait before next attempt
try {
TimeUnit.MILLISECONDS.sleep(delay);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
throw new RuntimeException("Retry interrupted", ie);
}
} else {
log.error("Step '{}' failed after {} attempts", stepId, retryPolicy.maxAttempts(), e);
notifyRetryFailure(stepId, retryContext, e, false);
}
}
}
// All retries exhausted
context.clearRetryContext(stepId);
retryMetrics.recordRetryExhausted(stepId, retryPolicy.maxAttempts());
RetryContext finalContext = RetryContext.builder()
.stepId(stepId)
.attemptNumber(retryPolicy.maxAttempts())
.maxAttempts(retryPolicy.maxAttempts())
.previousAttempts(attempts)
.firstAttemptTime(firstAttemptTime)
.currentAttemptTime(System.currentTimeMillis())
.build();
notifyRetryExhausted(stepId, finalContext, lastException);
throw new RetryExhaustedException(
String.format("Step '%s' failed after %d attempts", stepId, retryPolicy.maxAttempts()),
lastException);
}
// Notification methods for listeners
private void notifyBeforeRetry(String stepId, RetryContext context, RetryPolicy policy) {
if (CollectionUtils.isEmpty(retryListeners)) {
return;
}
for (RetryListener listener : retryListeners) {
try {
listener.beforeRetry(stepId, context, policy);
} catch (Exception e) {
log.error("Error in retry listener beforeRetry", e);
}
}
}
private void notifyRetrySuccess(String stepId, RetryContext context, Object result) {
if (CollectionUtils.isEmpty(retryListeners)) {
return;
}
for (RetryListener listener : retryListeners) {
try {
listener.onRetrySuccess(stepId, context, result);
} catch (Exception e) {
log.error("Error in retry listener onRetrySuccess", e);
}
}
}
private void notifyRetryFailure(String stepId, RetryContext context, Exception exception, boolean willRetry) {
if (CollectionUtils.isEmpty(retryListeners)) {
return;
}
for (RetryListener listener : retryListeners) {
try {
listener.onRetryFailure(stepId, context, exception, willRetry);
} catch (Exception e) {
log.error("Error in retry listener onRetryFailure", e);
}
}
}
private void notifyRetryExhausted(String stepId, RetryContext context, Exception lastException) {
if (CollectionUtils.isEmpty(retryListeners)) {
return;
}
for (RetryListener listener : retryListeners) {
try {
listener.onRetryExhausted(stepId, context, lastException);
} catch (Exception e) {
log.error("Error in retry listener onRetryExhausted", e);
}
}
}
private void notifyRetryAborted(String stepId, RetryContext context, Exception exception) {
if (CollectionUtils.isEmpty(retryListeners)) {
return;
}
for (RetryListener listener : retryListeners) {
try {
listener.onRetryAborted(stepId, context, exception);
} catch (Exception e) {
log.error("Error in retry listener onRetryAborted", e);
}
}
}
/**
* Internal step executor interface.
*/
@FunctionalInterface
public interface StepExecutor {
StepResult<?> execute(WorkflowInstance instance, StepNode step) throws Exception;
}
/**
* Gets access to the retry metrics.
*
* @return The retry metrics instance
*/
public RetryMetrics getRetryMetrics() {
return retryMetrics;
}
protected RetryStrategy getStrategy() {
return retryStrategy;
}
protected CircuitBreaker getCircuitBreaker() {
return circuitBreaker;
}
protected RetryMetrics getMetrics() {
return retryMetrics;
}
protected List<RetryListener> getListeners() {
return retryListeners;
}
/**
* Exception thrown when invocation limit is exceeded.
*/
public static class InvocationLimitExceededException extends RuntimeException {
public InvocationLimitExceededException(String message) {
super(message);
}
}
/**
* Exception thrown when all retries are exhausted.
*/
public static class RetryExhaustedException extends RuntimeException {
public RetryExhaustedException(String message, Throwable cause) {
super(message, cause);
}
}
/**
* Exception thrown when circuit breaker is open.
*/
public static class CircuitBreakerOpenException extends RuntimeException {
public CircuitBreakerOpenException(String message) {
super(message);
}
}
/**
* Wrapper exception for retryable step failures.
*/
private static class RetryableStepFailure extends RuntimeException {
public RetryableStepFailure(String message, Throwable cause) {
super(message, cause);
}
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/core/RetryListener.java
|
package ai.driftkit.workflow.engine.core;
import ai.driftkit.workflow.engine.annotations.RetryPolicy;
import ai.driftkit.workflow.engine.domain.RetryContext;
/**
* Listener interface for retry events.
* Allows monitoring and reacting to retry attempts, successes, and failures.
*/
public interface RetryListener {
/**
* Called before a retry attempt is made.
*
* @param stepId The step being retried
* @param retryContext The current retry context
* @param retryPolicy The retry policy being applied
*/
default void beforeRetry(String stepId, RetryContext retryContext, RetryPolicy retryPolicy) {
// Default no-op implementation
}
/**
* Called after a retry attempt completes successfully.
*
* @param stepId The step that was retried
* @param retryContext The final retry context
* @param result The successful result
*/
default void onRetrySuccess(String stepId, RetryContext retryContext, Object result) {
// Default no-op implementation
}
/**
* Called after a retry attempt fails.
*
* @param stepId The step that failed
* @param retryContext The current retry context
* @param exception The exception that occurred
* @param willRetry Whether another retry will be attempted
*/
default void onRetryFailure(String stepId, RetryContext retryContext,
Exception exception, boolean willRetry) {
// Default no-op implementation
}
/**
* Called when all retry attempts have been exhausted.
*
* @param stepId The step that exhausted retries
* @param retryContext The final retry context
* @param lastException The last exception that occurred
*/
default void onRetryExhausted(String stepId, RetryContext retryContext, Exception lastException) {
// Default no-op implementation
}
/**
* Called when retry is aborted due to a non-retryable exception.
*
* @param stepId The step that was aborted
* @param retryContext The retry context at abort time
* @param exception The non-retryable exception
*/
default void onRetryAborted(String stepId, RetryContext retryContext, Exception exception) {
// Default no-op implementation
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/core/RetryMetrics.java
|
package ai.driftkit.workflow.engine.core;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.LongAdder;
/**
* Collects and provides metrics about retry operations.
* Thread-safe implementation for concurrent workflow execution.
*/
@Slf4j
public class RetryMetrics {
private final ConcurrentHashMap<String, StepMetrics> stepMetrics = new ConcurrentHashMap<>();
private final LongAdder totalRetryAttempts = new LongAdder();
private final LongAdder totalRetrySuccesses = new LongAdder();
private final LongAdder totalRetryFailures = new LongAdder();
private final LongAdder totalRetryExhausted = new LongAdder();
/**
* Records a retry attempt.
*
* @param stepId The step being retried
* @param attemptNumber The attempt number
* @param delayMs The delay before this attempt
*/
public void recordRetryAttempt(String stepId, int attemptNumber, long delayMs) {
StepMetrics metrics = stepMetrics.computeIfAbsent(stepId, k -> new StepMetrics());
metrics.recordAttempt(attemptNumber, delayMs);
totalRetryAttempts.increment();
}
/**
* Records a successful retry.
*
* @param stepId The step that succeeded
* @param totalAttempts Total number of attempts it took
* @param totalDurationMs Total time spent retrying
*/
public void recordRetrySuccess(String stepId, int totalAttempts, long totalDurationMs) {
StepMetrics metrics = stepMetrics.computeIfAbsent(stepId, k -> new StepMetrics());
metrics.recordSuccess(totalAttempts, totalDurationMs);
totalRetrySuccesses.increment();
}
/**
* Records a retry failure.
*
* @param stepId The step that failed
* @param attemptNumber The attempt that failed
* @param exception The exception that occurred
*/
public void recordRetryFailure(String stepId, int attemptNumber, Exception exception) {
StepMetrics metrics = stepMetrics.computeIfAbsent(stepId, k -> new StepMetrics());
metrics.recordFailure(attemptNumber, exception);
totalRetryFailures.increment();
}
/**
* Records when retries are exhausted.
*
* @param stepId The step that exhausted retries
* @param totalAttempts Total attempts made
*/
public void recordRetryExhausted(String stepId, int totalAttempts) {
StepMetrics metrics = stepMetrics.computeIfAbsent(stepId, k -> new StepMetrics());
metrics.recordExhausted(totalAttempts);
totalRetryExhausted.increment();
}
/**
* Gets metrics for a specific step.
*
* @param stepId The step ID
* @return Step metrics or null if no metrics exist
*/
public StepMetrics getStepMetrics(String stepId) {
return stepMetrics.get(stepId);
}
/**
* Gets global retry metrics.
*
* @return Global metrics summary
*/
public GlobalMetrics getGlobalMetrics() {
return new GlobalMetrics(
totalRetryAttempts.sum(),
totalRetrySuccesses.sum(),
totalRetryFailures.sum(),
totalRetryExhausted.sum(),
stepMetrics.size()
);
}
/**
* Resets metrics for a specific step.
*
* @param stepId The step ID
*/
public void resetStep(String stepId) {
stepMetrics.remove(stepId);
}
/**
* Resets all metrics.
*/
public void resetAll() {
stepMetrics.clear();
totalRetryAttempts.reset();
totalRetrySuccesses.reset();
totalRetryFailures.reset();
totalRetryExhausted.reset();
}
/**
* Gets all step metrics.
*
* @return Map of all step metrics
*/
public Map<String, StepMetrics> getAllStepMetrics() {
return new ConcurrentHashMap<>(stepMetrics);
}
/**
* Metrics for a specific step.
*/
@Getter
public static class StepMetrics {
private final AtomicInteger attemptCount = new AtomicInteger(0);
private final AtomicInteger successCount = new AtomicInteger(0);
private final AtomicInteger failureCount = new AtomicInteger(0);
private final AtomicInteger exhaustedCount = new AtomicInteger(0);
private final AtomicLong totalRetryDelayMs = new AtomicLong(0);
private final AtomicLong totalRetryDurationMs = new AtomicLong(0);
private final AtomicInteger maxAttempts = new AtomicInteger(0);
private final ConcurrentHashMap<String, AtomicInteger> exceptionCounts = new ConcurrentHashMap<>();
void recordAttempt(int attemptNumber, long delayMs) {
attemptCount.incrementAndGet();
totalRetryDelayMs.addAndGet(delayMs);
maxAttempts.updateAndGet(current -> Math.max(current, attemptNumber));
}
void recordSuccess(int totalAttempts, long totalDurationMs) {
successCount.incrementAndGet();
totalRetryDurationMs.addAndGet(totalDurationMs);
maxAttempts.updateAndGet(current -> Math.max(current, totalAttempts));
}
void recordFailure(int attemptNumber, Exception exception) {
failureCount.incrementAndGet();
String exceptionType = exception.getClass().getSimpleName();
exceptionCounts.computeIfAbsent(exceptionType, k -> new AtomicInteger(0))
.incrementAndGet();
}
void recordExhausted(int totalAttempts) {
exhaustedCount.incrementAndGet();
maxAttempts.updateAndGet(current -> Math.max(current, totalAttempts));
}
/**
* Gets the success rate as a percentage.
*
* @return Success rate (0-100) or -1 if no executions
*/
public double getSuccessRate() {
int total = successCount.get() + exhaustedCount.get();
if (total == 0) {
return -1;
}
return (double) successCount.get() / total * 100;
}
public long getTotalAttempts() {
return attemptCount.get();
}
public long getAbortedCount() {
return 0; // TODO: Track aborted count
}
public double getAverageDuration() {
int successes = successCount.get();
if (successes == 0) {
return 0;
}
return (double) totalRetryDurationMs.get() / successes;
}
/**
* Gets average retry delay in milliseconds.
*
* @return Average delay or 0 if no retries
*/
public double getAverageRetryDelay() {
int attempts = attemptCount.get();
if (attempts == 0) {
return 0;
}
return (double) totalRetryDelayMs.get() / attempts;
}
}
/**
* Global retry metrics summary.
*/
@Getter
public static class GlobalMetrics {
private final long totalAttempts;
private final long totalSuccesses;
private final long totalFailures;
private final long totalExhausted;
private final int uniqueSteps;
GlobalMetrics(long totalAttempts, long totalSuccesses, long totalFailures,
long totalExhausted, int uniqueSteps) {
this.totalAttempts = totalAttempts;
this.totalSuccesses = totalSuccesses;
this.totalFailures = totalFailures;
this.totalExhausted = totalExhausted;
this.uniqueSteps = uniqueSteps;
}
/**
* Gets the global success rate as a percentage.
*
* @return Success rate (0-100) or -1 if no executions
*/
public double getSuccessRate() {
long total = totalSuccesses + totalExhausted;
if (total == 0) {
return -1;
}
return (double) totalSuccesses / total * 100;
}
/**
* Gets the overall success rate.
*
* @return Success rate (0-100) or -1 if no executions
*/
public double getOverallSuccessRate() {
long total = totalSuccesses + totalExhausted;
if (total == 0) {
return -1;
}
return (double) totalSuccesses / total * 100;
}
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/core/RetryStrategy.java
|
package ai.driftkit.workflow.engine.core;
import ai.driftkit.workflow.engine.annotations.RetryPolicy;
import ai.driftkit.workflow.engine.domain.RetryContext;
/**
* Strategy interface for determining retry behavior.
* Implementations can provide different retry algorithms such as
* exponential backoff, linear delay, or custom strategies.
*/
public interface RetryStrategy {
/**
* Determines whether a retry should be attempted based on the failure and context.
*
* @param failure The exception that caused the failure
* @param context The current retry context
* @param policy The retry policy configuration
* @return True if retry should be attempted, false otherwise
*/
boolean shouldRetry(Throwable failure, RetryContext context, RetryPolicy policy);
/**
* Calculates the delay before the next retry attempt.
*
* @param context The current retry context
* @param policy The retry policy configuration
* @return The delay in milliseconds before the next attempt
*/
long calculateDelay(RetryContext context, RetryPolicy policy);
/**
* Called before a retry attempt is made.
* Can be used for logging or metrics collection.
*
* @param context The current retry context
* @param delay The calculated delay before this retry
*/
default void beforeRetry(RetryContext context, long delay) {
// Default implementation does nothing
}
/**
* Called after a retry attempt completes (success or failure).
*
* @param context The current retry context
* @param success True if the retry succeeded, false if it failed
* @param duration The duration of the retry attempt in milliseconds
*/
default void afterRetry(RetryContext context, boolean success, long duration) {
// Default implementation does nothing
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/core/StepExecutionTracker.java
|
package ai.driftkit.workflow.engine.core;
import lombok.extern.slf4j.Slf4j;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Tracks step execution counts within workflow instances.
* Used to enforce invocation limits and provide execution statistics.
*/
@Slf4j
public class StepExecutionTracker {
// Map from workflow instance ID to step execution counts
private final ConcurrentMap<String, ConcurrentMap<String, AtomicInteger>> executionCounts = new ConcurrentHashMap<>();
/**
* Records a step execution and returns the new count.
*
* @param workflowInstanceId The workflow instance ID
* @param stepId The step ID
* @return The new execution count for this step
*/
public int recordExecution(String workflowInstanceId, String stepId) {
ConcurrentMap<String, AtomicInteger> stepCounts = executionCounts.computeIfAbsent(
workflowInstanceId, k -> new ConcurrentHashMap<>()
);
AtomicInteger count = stepCounts.computeIfAbsent(stepId, k -> new AtomicInteger(0));
int newCount = count.incrementAndGet();
log.debug("Step {} in workflow {} executed {} times", stepId, workflowInstanceId, newCount);
return newCount;
}
/**
* Gets the current execution count for a step.
*
* @param workflowInstanceId The workflow instance ID
* @param stepId The step ID
* @return The current execution count, or 0 if never executed
*/
public int getExecutionCount(String workflowInstanceId, String stepId) {
ConcurrentMap<String, AtomicInteger> stepCounts = executionCounts.get(workflowInstanceId);
if (stepCounts == null) {
return 0;
}
AtomicInteger count = stepCounts.get(stepId);
return count != null ? count.get() : 0;
}
/**
* Resets the execution count for a specific step.
*
* @param workflowInstanceId The workflow instance ID
* @param stepId The step ID
*/
public void resetStepCount(String workflowInstanceId, String stepId) {
ConcurrentMap<String, AtomicInteger> stepCounts = executionCounts.get(workflowInstanceId);
if (stepCounts != null) {
stepCounts.remove(stepId);
log.debug("Reset execution count for step {} in workflow {}", stepId, workflowInstanceId);
}
}
/**
* Clears all execution counts for a workflow instance.
* Should be called when a workflow completes or is cleaned up.
*
* @param workflowInstanceId The workflow instance ID
*/
public void clearWorkflowCounts(String workflowInstanceId) {
ConcurrentMap<String, AtomicInteger> removed = executionCounts.remove(workflowInstanceId);
if (removed != null) {
log.debug("Cleared execution counts for workflow {}, had {} steps tracked",
workflowInstanceId, removed.size());
}
}
/**
* Gets all step execution counts for a workflow instance.
*
* @param workflowInstanceId The workflow instance ID
* @return Map of step IDs to execution counts
*/
public ConcurrentMap<String, Integer> getWorkflowCounts(String workflowInstanceId) {
ConcurrentMap<String, AtomicInteger> stepCounts = executionCounts.get(workflowInstanceId);
if (stepCounts == null) {
return new ConcurrentHashMap<>();
}
ConcurrentMap<String, Integer> result = new ConcurrentHashMap<>();
stepCounts.forEach((stepId, count) -> result.put(stepId, count.get()));
return result;
}
/**
* Gets the total number of workflows being tracked.
*
* @return The number of workflow instances with execution data
*/
public int getTrackedWorkflowCount() {
return executionCounts.size();
}
/**
* Clears all execution tracking data.
* Use with caution - this affects all workflows.
*/
public void clearAll() {
int workflowCount = executionCounts.size();
executionCounts.clear();
log.info("Cleared all execution tracking data for {} workflows", workflowCount);
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/core/StepOutput.java
|
package ai.driftkit.workflow.engine.core;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.SerializationFeature;
import lombok.Data;
import lombok.NoArgsConstructor;
import lombok.extern.slf4j.Slf4j;
/**
* Wrapper for step output that preserves type information.
* Values are stored as JSON and deserialized lazily with proper type restoration.
*/
@Slf4j
@Data
@NoArgsConstructor
public class StepOutput {
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper()
.configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false);
private JsonNode valueJson;
private String className;
@JsonIgnore
private transient Object cachedValue;
@JsonIgnore
private transient Class<?> cachedClass;
/**
* Creates a StepOutput from a value, automatically capturing its type.
*/
public static StepOutput of(Object value) {
if (value == null) {
return new StepOutput();
}
StepOutput output = new StepOutput();
output.className = value.getClass().getName();
try {
output.valueJson = OBJECT_MAPPER.valueToTree(value);
output.cachedValue = value;
output.cachedClass = value.getClass();
} catch (Exception e) {
// For empty beans (commands without fields), store empty JSON object
log.error("Cannot serialize value directly, attempting to store as empty object: {}",
value.getClass().getName());
output.valueJson = OBJECT_MAPPER.createObjectNode();
output.cachedValue = value;
output.cachedClass = value.getClass();
}
return output;
}
/**
* Gets the value, deserializing from JSON if needed.
* The value is cached after first deserialization.
*/
public Object getValue() {
if (cachedValue != null) {
return cachedValue;
}
if (valueJson == null || className == null) {
return null;
}
try {
Class<?> clazz = getActualClass();
// For empty JSON objects, try to instantiate the class directly
if (valueJson.isObject() && valueJson.size() == 0) {
try {
cachedValue = clazz.getDeclaredConstructor().newInstance();
return cachedValue;
} catch (Exception e) {
log.error("Cannot instantiate empty bean directly, falling back to JSON deserialization: {}", className);
}
}
cachedValue = OBJECT_MAPPER.treeToValue(valueJson, clazz);
return cachedValue;
} catch (Exception e) {
throw new IllegalStateException(
"Failed to deserialize step output of type " + className, e
);
}
}
/**
* Gets the value as the specified type.
*
* @param type The expected type
* @return The value cast to the type
* @throws ClassCastException if the value cannot be cast
*/
public <T> T getValueAs(Class<T> type) {
Object value = getValue();
if (value == null) {
return null;
}
if (type.isInstance(value)) {
return type.cast(value);
}
throw new ClassCastException(
"Cannot cast step output of type " + value.getClass().getName() +
" to " + type.getName()
);
}
/**
* Checks if this output is compatible with the given type.
*/
public boolean isCompatibleWith(Class<?> type) {
if (type == null) {
return false;
}
Class<?> actualClass = getActualClass();
if (actualClass == null) {
return false;
}
return type.isAssignableFrom(actualClass);
}
/**
* Gets the actual class of the stored value.
* Lazily loads and caches the class from className if needed.
*/
@JsonIgnore
public Class<?> getActualClass() {
// If we have the cached class, return it
if (cachedClass != null) {
return cachedClass;
}
// Try to load from className
if (className != null) {
try {
cachedClass = Class.forName(className);
return cachedClass;
} catch (ClassNotFoundException e) {
log.error("Cannot load class: {}", className, e);
throw new IllegalStateException("Cannot load class: " + className, e);
}
}
return null;
}
/**
* Checks if this output has a value.
*/
public boolean hasValue() {
return valueJson != null && className != null;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/core/StepResult.java
|
package ai.driftkit.workflow.engine.core;
import ai.driftkit.workflow.engine.domain.WorkflowEvent;
import ai.driftkit.workflow.engine.schema.AIFunctionSchema;
import ai.driftkit.workflow.engine.schema.SchemaUtils;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
/**
* Sealed interface representing all possible outcomes of a workflow step execution.
* This design pattern enables compile-time type safety for workflow branching logic.
*
* @param <R> The type of the final workflow result (only used by Finish)
*/
public sealed interface StepResult<R>
permits StepResult.Continue, StepResult.Suspend, StepResult.Branch, StepResult.Finish, StepResult.Fail, StepResult.Async {
// Static factory methods for better SDK experience
static <T> Suspend<T> suspend(T promptToUser, Class<?> nextInputClass) {
AIFunctionSchema schema = SchemaUtils.getSchemaFromClass(nextInputClass);
return new Suspend<>(promptToUser, nextInputClass, schema, new HashMap<>());
}
static <T> Suspend<T> suspend(T promptToUser, Class<?> nextInputClass, Map<String, Object> metadata) {
AIFunctionSchema schema = SchemaUtils.getSchemaFromClass(nextInputClass);
return new Suspend<>(promptToUser, nextInputClass, schema, metadata);
}
static <T> Async<T> async(String taskId, long estimatedMs, Object immediateData) {
return new Async<>(taskId, estimatedMs, new HashMap<>(), immediateData);
}
static <T> Async<T> async(String taskId, long estimatedMs, Map<String, Object> taskArgs, Object immediateData) {
return new Async<>(taskId, estimatedMs, taskArgs, immediateData);
}
static <T> Finish<T> finish(T result) {
return new Finish<>(result);
}
static <T> Fail<T> fail(String message) {
return new Fail<>(new RuntimeException(message));
}
static <T> Fail<T> fail(Throwable error) {
return new Fail<>(error);
}
static <T> Continue<T> continueWith(T data) {
return new Continue<>(data);
}
static <T> Branch<T> branch(T event) {
return new Branch<>(event);
}
/**
* Standard outcome indicating successful step completion and continuation.
* The data will be passed as input to the next step in the workflow.
*
* @param <T> The type of data to pass to the next step
*/
record Continue<T>(T data) implements StepResult<T> {
public Continue {
// data can be null for steps that don't produce output
}
}
/**
* Suspends workflow execution for Human-in-the-Loop (HITL) scenarios.
* The workflow state will be persisted and can be resumed later with user input.
*
* @param <T> The type parameter to maintain consistency with the step's return type
* @param promptToUser Data to be sent to the user (e.g., question, options)
* @param nextInputClass The expected input class for when the workflow resumes
* @param nextInputSchema The schema for the expected input
* @param metadata Additional information about the expected response format
*/
record Suspend<T>(
T promptToUser,
Class<?> nextInputClass,
AIFunctionSchema nextInputSchema,
Map<String, Object> metadata
) implements StepResult<T> {
public Suspend {
if (promptToUser == null) {
throw new IllegalArgumentException("promptToUser cannot be null");
}
if (nextInputClass == null) {
throw new IllegalArgumentException("nextInputClass cannot be null");
}
if (nextInputSchema == null) {
throw new IllegalArgumentException("nextInputSchema cannot be null");
}
if (metadata == null) {
metadata = new HashMap<>();
}
}
/**
* Simplified constructor with empty metadata
*/
public Suspend(T promptToUser, Class<?> nextInputClass, AIFunctionSchema schema) {
this(promptToUser, nextInputClass, schema, new HashMap<>());
}
}
/**
* Explicit workflow branching based on an event object.
* The engine will find the next step that accepts the event's type as input.
*
* @param event The event object used to determine the next step
*/
record Branch<T>(T event) implements StepResult<T> {
public Branch {
if (event == null) {
throw new IllegalArgumentException("event cannot be null");
}
}
}
/**
* Successful completion of the entire workflow.
*
* @param <R> The type of the final result
* @param result The final result of the workflow
*/
record Finish<R>(R result) implements StepResult<R> {
// result can be null
}
/**
* Workflow termination due to an error.
*
* @param <T> The type parameter to maintain consistency with the step's return type
* @param error The exception that caused the failure
*/
record Fail<T>(Throwable error) implements StepResult<T> {
public Fail {
if (error == null) {
throw new IllegalArgumentException("error cannot be null");
}
}
/**
* Convenience constructor that wraps a message in a RuntimeException
*/
public Fail(String errorMessage) {
this(new RuntimeException(errorMessage));
}
}
/**
* Indicates that the step is executing asynchronously.
* The workflow will continue processing in the background.
*
* @param <T> The type parameter to maintain consistency with the step's return type
* @param taskId The ID of the async task for tracking (must be @AsyncStep method ID)
* @param estimatedDurationMs Estimated duration in milliseconds (-1 if unknown)
* @param taskArgs Arguments to pass to the async task
* @param immediateData ANY user object to return immediately to the user (can be annotated with @SchemaClass)
*/
record Async<T>(
String taskId,
long estimatedDurationMs,
Map<String, Object> taskArgs,
Object immediateData // Changed from T to Object to allow any type
) implements StepResult<T> {
public Async {
if (taskId == null || taskId.isBlank()) {
throw new IllegalArgumentException("taskId cannot be null or blank");
}
if (taskArgs == null) {
taskArgs = new HashMap<>();
}
// immediateData can be null
}
/**
* Convenience constructor for async without duration estimate
*/
public Async(String taskId, Map<String, Object> taskArgs, Object immediateData) {
this(taskId, -1, taskArgs, immediateData);
}
/**
* Convenience constructor with just task ID and immediate data
*/
public Async(String taskId, Object immediateData) {
this(taskId, -1, new HashMap<>(), immediateData);
}
/**
* Convenience constructor for CompletableFuture-based async operations.
* The future will be registered with the workflow engine for async execution.
* Uses null as immediate data.
*
* @param taskId The ID of the async task for tracking
* @param future The CompletableFuture representing the async operation
*/
public Async(String taskId, CompletableFuture<StepResult<T>> future) {
this(taskId, -1, Map.of(WorkflowContext.Keys.ASYNC_FUTURE, future), null);
}
/**
* Convenience constructor for CompletableFuture-based async operations.
* The future will be registered with the workflow engine for async execution.
*
* @param taskId The ID of the async task for tracking
* @param future The CompletableFuture representing the async operation
* @param immediateData Immediate data to return to the user
*/
public Async(String taskId, CompletableFuture<StepResult<T>> future, Object immediateData) {
this(taskId, -1, Map.of(WorkflowContext.Keys.ASYNC_FUTURE, future), immediateData);
}
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/core/StepRouter.java
|
package ai.driftkit.workflow.engine.core;
import ai.driftkit.workflow.engine.graph.WorkflowGraph;
/**
* Interface for routing logic between workflow steps.
* Determines the next step to execute based on current state and step outputs.
*/
public interface StepRouter {
/**
* Finds the next step to execute after the current step.
*
* @param graph The workflow graph
* @param currentStepId The ID of the current step
* @param data The output data from the current step
* @return The ID of the next step to execute, or null if no suitable step found
*/
String findNextStep(WorkflowGraph<?, ?> graph, String currentStepId, Object data);
/**
* Finds the target step for a branch based on the event type.
*
* @param graph The workflow graph
* @param currentStepId The ID of the current step
* @param event The branch event that determines the path
* @return The ID of the target step for the branch, or null if no match found
*/
String findBranchTarget(WorkflowGraph<?, ?> graph, String currentStepId, Object event);
/**
* Finds a step that can accept the given input type.
* Used for type-based routing when no explicit edge exists.
*
* @param graph The workflow graph
* @param inputType The type of input to match
* @param excludeStepId Optional step ID to exclude from search
* @return The ID of a step that can accept the input type, or null if none found
*/
String findStepForInputType(WorkflowGraph<?, ?> graph, Class<?> inputType, String excludeStepId);
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/core/SuspendHelper.java
|
package ai.driftkit.workflow.engine.core;
import ai.driftkit.workflow.engine.schema.AIFunctionSchema;
import ai.driftkit.workflow.engine.schema.SchemaUtils;
import java.util.HashMap;
import java.util.Map;
/**
* Helper class for creating Suspend results with proper schema information.
* This bridges the gap between the old StepEvent pattern and new StepResult pattern.
*/
public class SuspendHelper {
/**
* Creates a Suspend result that includes schema information for the next expected input.
* This mimics the old framework's StepEvent.of(data, nextInputClass) pattern.
*
* @param promptData The data to send to the user (will be converted to properties)
* @param nextInputClass The class type expected as input when resumed
* @return A Suspend result with proper metadata
*/
public static <T> StepResult.Suspend<T> suspendForInput(
Object promptData,
Class<?> nextInputClass) {
Map<String, Object> metadata = new HashMap<>();
// Store the expected input class
metadata.put("nextInputClass", nextInputClass.getName());
// Generate the schema (required)
AIFunctionSchema schema = SchemaUtils.getSchemaFromClass(nextInputClass);
if (schema == null) {
throw new IllegalStateException("Failed to generate schema for class: " + nextInputClass.getName());
}
return new StepResult.Suspend<T>((T) promptData, nextInputClass, schema, metadata);
}
/**
* Creates a Suspend result for simple prompts without complex data.
*
* @param message The message to display to the user
* @param nextInputClass The class type expected as input when resumed
* @return A Suspend result with proper metadata
*/
public static <T> StepResult.Suspend<T> suspendWithMessage(
String message,
Class<?> nextInputClass) {
Map<String, String> promptData = new HashMap<>();
promptData.put("message", message);
return suspendForInput(promptData, nextInputClass);
}
/**
* Creates a Suspend result that indicates waiting for user input of a specific type.
* This is the most common pattern from the old framework.
*
* @param responseData The response data to send (will extract properties)
* @param nextInputClass The class type expected as input when resumed
* @return A Suspend result with proper metadata
*/
public static <T> StepResult.Suspend<T> waitForUserInput(
Object responseData,
Class<?> nextInputClass) {
Map<String, Object> metadata = new HashMap<>();
metadata.put("nextInputClass", nextInputClass.getName());
metadata.put("waitingForUserInput", true);
AIFunctionSchema schema = SchemaUtils.getSchemaFromClass(nextInputClass);
if (schema == null) {
throw new IllegalStateException("Failed to generate schema for class: " + nextInputClass.getName());
}
return new StepResult.Suspend<T>((T) responseData, nextInputClass, schema, metadata);
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/core/WorkflowAnalyzer.java
|
package ai.driftkit.workflow.engine.core;
import ai.driftkit.workflow.engine.annotations.AsyncStep;
import ai.driftkit.workflow.engine.annotations.InitialStep;
import ai.driftkit.workflow.engine.annotations.Step;
import ai.driftkit.workflow.engine.annotations.Workflow;
import ai.driftkit.workflow.engine.analyzer.*;
import ai.driftkit.workflow.engine.builder.WorkflowBuilder;
import ai.driftkit.workflow.engine.graph.Edge;
import ai.driftkit.workflow.engine.graph.StepNode;
import ai.driftkit.workflow.engine.graph.WorkflowGraph;
import ai.driftkit.workflow.engine.schema.SchemaUtils;
import lombok.Builder;
import lombok.Data;
import lombok.experimental.UtilityClass;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.ArrayUtils;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.lang.reflect.WildcardType;
import java.util.*;
import java.util.Queue;
import java.util.LinkedList;
import java.util.concurrent.CompletableFuture;
import java.util.stream.Collectors;
/**
* Analyzes workflow classes annotated with @Workflow and builds a WorkflowGraph
* by examining method annotations and return types.
*
* <p>This analyzer implements the automatic graph construction strategy described
* in the technical specification, including:</p>
* <ul>
* <li>Discovery of step methods via annotations</li>
* <li>Automatic edge creation based on return types</li>
* <li>Support for sealed interface branching</li>
* <li>Type-based step matching</li>
* </ul>
*/
@Slf4j
@UtilityClass
public class WorkflowAnalyzer {
/**
* Analyzes a workflow instance and builds a WorkflowGraph.
*
* @param workflowInstance An instance of a class annotated with @Workflow
* @param <T> The workflow input type
* @param <R> The workflow output type
* @return The constructed WorkflowGraph
* @throws IllegalArgumentException if the workflow is invalid
* @throws NullPointerException if workflowInstance is null
*/
@SuppressWarnings("unchecked")
public static <T, R> WorkflowGraph<T, R> analyze(Object workflowInstance) {
if (workflowInstance == null) {
throw new NullPointerException("Workflow instance cannot be null");
}
Class<?> workflowClass = workflowInstance.getClass();
// Verify @Workflow annotation
Workflow workflowAnnotation = workflowClass.getAnnotation(Workflow.class);
if (workflowAnnotation == null) {
throw new IllegalArgumentException(
"Class must be annotated with @Workflow: " + workflowClass.getName()
);
}
log.debug("Analyzing workflow: {} ({})", workflowAnnotation.id(), workflowClass.getName());
try {
// Discover all step methods
Map<String, StepInfo> stepInfos = discoverSteps(workflowClass, workflowInstance);
// Find initial step
String initialStepId = findInitialStep(stepInfos);
// Build nodes
Map<String, StepNode> nodes = buildNodes(stepInfos);
// Build edges by analyzing return types
Map<String, List<Edge>> edges = buildEdges(stepInfos);
// Determine input and output types
StepInfo initialStep = stepInfos.get(initialStepId);
Class<T> inputType = (Class<T>) initialStep.getInputType();
Class<R> outputType = (Class<R>) determineOutputType(stepInfos);
// Find and analyze async steps
Map<String, AsyncStepMetadata> asyncSteps = findAsyncSteps(workflowInstance);
// Validate async steps reference valid parent steps
validateAsyncSteps(stepInfos, asyncSteps);
// Log analysis results
log.info("Workflow analysis complete: {} - {} nodes, {} edges, {} async handlers",
workflowAnnotation.id(), nodes.size(),
edges.values().stream().mapToInt(List::size).sum(),
asyncSteps.size());
// DEBUG: Workflow edges logged at debug level
if (log.isDebugEnabled()) {
edges.forEach((from, edgeList) -> {
edgeList.forEach(edge -> {
log.debug(" {} -> {} (type: {}, event: {})",
from, edge.toStepId(), edge.type(), edge.eventType());
});
});
}
return WorkflowGraph.<T, R>builder()
.id(workflowAnnotation.id())
.version(workflowAnnotation.version())
.inputType(inputType)
.outputType(outputType)
.nodes(nodes)
.edges(edges)
.initialStepId(initialStepId)
.workflowInstance(workflowInstance)
.asyncStepMetadata(asyncSteps)
.build();
} catch (Exception e) {
log.error("Failed to analyze workflow: {}", workflowClass.getName(), e);
throw new IllegalArgumentException(
"Failed to analyze workflow " + workflowClass.getName() + ": " + e.getMessage(), e
);
}
}
/**
* Analyzes a workflow created with the builder API.
*
* @param builderWorkflow The workflow created using the builder API
* @param <T> The workflow input type
* @param <R> The workflow output type
* @return The constructed and validated WorkflowGraph
* @throws IllegalArgumentException if the workflow is invalid
*/
public static <T, R> WorkflowGraph<T, R> analyzeBuilder(WorkflowBuilder<T, R> builderWorkflow) {
if (builderWorkflow == null) {
throw new IllegalArgumentException("Builder workflow cannot be null");
}
log.info("Analyzing builder workflow");
// Build the graph directly
WorkflowGraph<T, R> graph = builderWorkflow.build();
// Validate the graph structure
validateBuilderGraph(graph);
// Log graph statistics
log.info("Enhanced workflow graph '{}' with {} nodes and {} edges",
graph.id(),
graph.nodes().size(),
graph.edges().values().stream().mapToInt(List::size).sum()
);
return graph;
}
/**
* Discovers all step methods in the workflow class.
*/
private static Map<String, StepInfo> discoverSteps(Class<?> workflowClass, Object instance) {
Map<String, StepInfo> steps = new HashMap<>();
Set<String> methodNames = new HashSet<>();
// Process all public methods
for (Method method : workflowClass.getMethods()) {
// Skip methods from Object class
if (method.getDeclaringClass() == Object.class) {
continue;
}
StepInfo stepInfo = null;
// Check for @InitialStep and @Step combination
InitialStep initialAnnotation = method.getAnnotation(InitialStep.class);
Step stepAnnotation = method.getAnnotation(Step.class);
AsyncStep asyncAnnotation = method.getAnnotation(AsyncStep.class);
// Validate combinations
if (asyncAnnotation != null && (initialAnnotation != null || stepAnnotation != null)) {
throw new IllegalArgumentException(
"Method cannot have @AsyncStep with other step annotations: " + method.getName()
);
}
// Handle @InitialStep (with optional @Step)
if (initialAnnotation != null) {
String id = method.getName();
StepInfo.StepInfoBuilder builder = StepInfo.builder()
.id(id)
.method(method)
.instance(instance)
.isInitial(true)
.description(initialAnnotation.description());
// If @Step is also present, merge its properties
if (stepAnnotation != null) {
// Override description if Step provides one
if (StringUtils.isNotBlank(stepAnnotation.description())) {
builder.description(stepAnnotation.description());
}
builder.index(stepAnnotation.index())
.timeoutMs(stepAnnotation.timeoutMs())
.nextClasses(stepAnnotation.nextClasses())
.nextSteps(stepAnnotation.nextSteps())
.condition(stepAnnotation.condition())
.onTrue(stepAnnotation.onTrue())
.onFalse(stepAnnotation.onFalse())
.retryPolicy(stepAnnotation.retryPolicy())
.invocationLimit(stepAnnotation.invocationLimit());
}
stepInfo = builder.build();
}
// Handle @Step only (without @InitialStep)
else if (stepAnnotation != null) {
// Determine step ID - priority: value, id, method name
String id = StringUtils.isNotBlank(stepAnnotation.id()) ? stepAnnotation.id() : method.getName();
stepInfo = StepInfo.builder()
.id(id)
.method(method)
.instance(instance)
.description(stepAnnotation.description())
.index(stepAnnotation.index())
.timeoutMs(stepAnnotation.timeoutMs())
.nextClasses(stepAnnotation.nextClasses())
.nextSteps(stepAnnotation.nextSteps())
.condition(stepAnnotation.condition())
.onTrue(stepAnnotation.onTrue())
.onFalse(stepAnnotation.onFalse())
.retryPolicy(stepAnnotation.retryPolicy())
.invocationLimit(stepAnnotation.invocationLimit())
.onInvocationsLimit(stepAnnotation.onInvocationsLimit())
.build();
}
// Check for @AsyncStep - skip these, they are handled separately
if (asyncAnnotation != null) {
// AsyncStep was already validated above - just skip
// Skip adding @AsyncStep methods to the step graph
// They are handled separately via asyncStepMetadata
continue;
}
if (stepInfo != null) {
// Make method accessible if needed
if (!method.canAccess(instance)) {
method.setAccessible(true);
}
// Track method names to detect overloading
if (methodNames.contains(method.getName())) {
log.warn("Method overloading detected for step: {}. " +
"Consider using explicit step IDs to avoid ambiguity.", method.getName());
}
methodNames.add(method.getName());
// Validate method signature
validateStepMethod(method);
// Analyze method parameters
MethodAnalyzer.analyzeMethodParameters(stepInfo);
// Override input type if specified in annotation
if (stepAnnotation != null && stepAnnotation.inputClass() != void.class) {
stepInfo.setInputType(stepAnnotation.inputClass());
}
// Analyze return type
MethodAnalyzer.analyzeReturnType(stepInfo);
// Check for duplicate IDs
if (steps.containsKey(stepInfo.getId())) {
throw new IllegalArgumentException(
"Duplicate step ID: " + stepInfo.getId() +
" (methods: " + steps.get(stepInfo.getId()).getMethod().getName() +
" and " + method.getName() + ")"
);
}
steps.put(stepInfo.getId(), stepInfo);
log.debug("Discovered step: {} (method: {}, async: {}, initial: {})",
stepInfo.getId(), method.getName(), stepInfo.isInitial());
}
}
if (steps.isEmpty()) {
throw new IllegalArgumentException(
"No steps found in workflow: " + workflowClass.getName() +
". Ensure methods are annotated with @InitialStep, @Step, or @AsyncStep"
);
}
return steps;
}
/**
* Finds the initial step ID.
*/
private static String findInitialStep(Map<String, StepInfo> steps) {
List<String> initialSteps = steps.values().stream()
.filter(StepInfo::isInitial)
.map(StepInfo::getId)
.collect(Collectors.toList());
if (initialSteps.isEmpty()) {
throw new IllegalArgumentException(
"No step marked with @InitialStep. Every workflow must have exactly one initial step."
);
}
if (initialSteps.size() > 1) {
throw new IllegalArgumentException(
"Multiple steps marked with @InitialStep: " + initialSteps +
". A workflow can have only one initial step."
);
}
return initialSteps.get(0);
}
/**
* Builds StepNode instances from StepInfo.
*/
private static Map<String, StepNode> buildNodes(Map<String, StepInfo> stepInfos) {
Map<String, StepNode> nodes = new HashMap<>();
for (StepInfo info : stepInfos.values()) {
try {
String description = info.getDescription();
if (StringUtils.isEmpty(description)) {
description = generateStepDescription(info);
}
StepNode node = StepNode.fromMethod(
info.getId(),
info.getMethod(),
info.getInstance(),
info.getRetryPolicy(),
info.getInvocationLimit(),
info.getOnInvocationsLimit()
);
// Register input type schema if present
if (node.executor() != null) {
Class<?> inputType = node.executor().getInputType();
if (inputType != null && inputType != void.class && inputType != Void.class) {
SchemaUtils.getSchemaFromClass(inputType);
log.debug("Registered input type schema for step {}: {}", info.getId(), inputType.getName());
}
}
if (info.isInitial()) {
node = node.asInitial();
}
if (!description.equals(node.description())) {
node = node.withDescription(description);
}
nodes.put(info.getId(), node);
} catch (Exception e) {
throw new IllegalStateException(
"Failed to create StepNode for " + info.getId() + ": " + e.getMessage(), e
);
}
}
return nodes;
}
/**
* Generates a descriptive name for a step based on its metadata.
*/
private static String generateStepDescription(StepInfo info) {
StringBuilder desc = new StringBuilder();
if (info.isInitial()) {
desc.append("Initial step: ");
} else {
desc.append("Step: ");
}
desc.append(info.getMethod().getName());
if (info.getInputType() != null && info.getInputType() != Object.class) {
desc.append(" (").append(info.getInputType().getSimpleName()).append(")");
}
return desc.toString();
}
/**
* Builds edges by analyzing step return types and finding matching input types.
*/
private static Map<String, List<Edge>> buildEdges(Map<String, StepInfo> stepInfos) {
Map<String, List<Edge>> edges = new HashMap<>();
for (StepInfo fromStep : stepInfos.values()) {
List<Edge> stepEdges = buildEdgesForStep(fromStep, stepInfos);
if (!stepEdges.isEmpty()) {
edges.put(fromStep.getId(), stepEdges);
}
}
// Validate graph structure
validateGraphStructure(stepInfos, edges);
return edges;
}
/**
* Builds all edges for a single step.
*/
private static List<Edge> buildEdgesForStep(StepInfo fromStep, Map<String, StepInfo> allSteps) {
List<Edge> stepEdges = new ArrayList<>();
// Apply edge-building strategies in priority order
addAnnotationBasedEdges(fromStep, allSteps, stepEdges);
addAutomaticTypeBasedEdges(fromStep, allSteps, stepEdges);
addErrorHandlingEdges(fromStep, allSteps, stepEdges);
// Sort edges by priority (sequential, branch, error)
stepEdges.sort(Comparator.comparing(Edge::type));
return stepEdges;
}
/**
* Adds edges based on annotation configurations (nextClasses, nextSteps, conditions).
*/
private static void addAnnotationBasedEdges(StepInfo fromStep, Map<String, StepInfo> allSteps,
List<Edge> edges) {
// First priority: type-based routing from nextClasses annotation
if (fromStep.getNextClasses() != null && fromStep.getNextClasses().length > 0) {
addNextClassEdges(fromStep, allSteps, edges);
}
// Second priority: explicit nextSteps from annotation
if (fromStep.getNextSteps() != null && fromStep.getNextSteps().length > 0) {
addExplicitEdges(fromStep, allSteps, edges);
}
// Third priority: condition-based branching (onTrue/onFalse)
if (fromStep.getCondition() != null && !fromStep.getCondition().isEmpty()) {
addConditionalEdges(fromStep, allSteps, edges);
}
}
/**
* Adds automatic type-based edges when no explicit routing is defined.
*/
private static void addAutomaticTypeBasedEdges(StepInfo fromStep, Map<String, StepInfo> allSteps,
List<Edge> edges) {
// Only add automatic routing if no edges were created from explicit routing
if (!edges.isEmpty()) {
return;
}
boolean hasExplicitRouting = ArrayUtils.isNotEmpty(fromStep.getNextSteps()) ||
ArrayUtils.isNotEmpty(fromStep.getNextClasses());
logAutomaticRoutingDecision(fromStep, hasExplicitRouting);
// Handle Continue<T> case - sequential flow
if (fromStep.getPossibleContinueType() != null) {
addSequentialEdges(fromStep, allSteps, edges);
}
// Handle Branch with sealed interface - branching flow
if (!fromStep.getPossibleBranchTypes().isEmpty()) {
addBranchEdges(fromStep, allSteps, edges);
}
}
/**
* Logs the decision about automatic routing.
*/
private static void logAutomaticRoutingDecision(StepInfo fromStep, boolean hasExplicitRouting) {
if (hasExplicitRouting) {
log.debug("Step {} has explicit routing configuration but no edges found, checking automatic routing",
fromStep.getId());
} else {
log.debug("Step {} has no explicit routing, adding automatic type-based edges",
fromStep.getId());
}
log.debug(" Continue type: {}", fromStep.getPossibleContinueType());
log.debug(" Branch types: {}", fromStep.getPossibleBranchTypes());
}
/**
* Adds error handling edges.
*/
private static void addErrorHandlingEdges(StepInfo fromStep, Map<String, StepInfo> allSteps,
List<Edge> edges) {
addErrorEdges(fromStep, allSteps, edges);
}
/**
* Validates the graph structure after building edges.
*/
private static void validateGraphStructure(Map<String, StepInfo> stepInfos,
Map<String, List<Edge>> edges) {
Map<String, StepNode> nodeMap = new HashMap<>();
for (StepInfo stepInfo : stepInfos.values()) {
StepNode node = buildNodes(Map.of(stepInfo.getId(), stepInfo)).get(stepInfo.getId());
nodeMap.put(node.id(), node);
}
String initialStep = findInitialStep(stepInfos);
validateGraph(nodeMap, edges, initialStep);
}
/**
* Adds sequential edges for Continue<T> return types.
*/
private static void addSequentialEdges(StepInfo fromStep, Map<String, StepInfo> allSteps,
List<Edge> edges) {
Class<?> continueType = fromStep.getPossibleContinueType();
List<StepInfo> compatibleSteps = new ArrayList<>();
for (StepInfo toStep : allSteps.values()) {
// Don't create edges to initial steps or to the same step
if (toStep != fromStep && !toStep.isInitial() &&
TypeUtils.isTypeCompatible(continueType, toStep.getInputType())) {
compatibleSteps.add(toStep);
}
}
if (compatibleSteps.isEmpty() && continueType != Void.class) {
log.debug("Step {} produces type {} with no direct edge to accepting step. Runtime routing will be used.",
fromStep.getId(), continueType.getSimpleName());
}
// Add edges to all compatible steps
for (StepInfo toStep : compatibleSteps) {
edges.add(Edge.sequential(fromStep.getId(), toStep.getId()));
log.debug("Added sequential edge: {} -> {} (type: {})",
fromStep.getId(), toStep.getId(), continueType.getSimpleName());
}
}
/**
* Adds branch edges for sealed interface return types.
*/
private static void addBranchEdges(StepInfo fromStep, Map<String, StepInfo> allSteps,
List<Edge> edges) {
log.debug("Adding branch edges for step {} with branch types: {}",
fromStep.getId(), fromStep.getPossibleBranchTypes());
for (Class<?> branchType : fromStep.getPossibleBranchTypes()) {
List<StepInfo> compatibleSteps = new ArrayList<>();
for (StepInfo toStep : allSteps.values()) {
log.debug("Checking compatibility: {} (input: {}) for branch type {}",
toStep.getId(), toStep.getInputType(), branchType);
// Don't create edges to initial steps or to the same step
if (toStep != fromStep && !toStep.isInitial() &&
TypeUtils.isTypeCompatible(branchType, toStep.getInputType())) {
compatibleSteps.add(toStep);
}
}
if (compatibleSteps.isEmpty()) {
log.warn("Step {} can branch to type {} but no step accepts this type",
fromStep.getId(), branchType.getSimpleName());
}
// Add edges to all compatible steps for this branch type
for (StepInfo toStep : compatibleSteps) {
edges.add(Edge.branch(fromStep.getId(), toStep.getId(), branchType));
log.debug("Added branch edge: {} -> {} (type: {})",
fromStep.getId(), toStep.getId(), branchType.getSimpleName());
}
}
}
/**
* Adds edges based on nextClasses annotation field.
*/
private static void addNextClassEdges(StepInfo fromStep, Map<String, StepInfo> allSteps,
List<Edge> edges) {
if (ArrayUtils.isEmpty(fromStep.getNextClasses())) {
return;
}
log.debug("Adding nextClass edges for step {} with nextClasses: {}",
fromStep.getId(), Arrays.toString(fromStep.getNextClasses()));
for (Class<?> nextClass : fromStep.getNextClasses()) {
for (StepInfo toStep : allSteps.values()) {
if (toStep == fromStep) {
continue;
}
// Skip initial steps - they should only be entry points
if (toStep.isInitial()) {
continue;
}
log.debug(" Checking: {} -> {} (input: {})",
nextClass.getSimpleName(), toStep.getId(), toStep.getInputType());
if (TypeUtils.isTypeCompatible(nextClass, toStep.getInputType())) {
edges.add(Edge.sequential(fromStep.getId(), toStep.getId()));
log.debug(" ADDED nextClass edge: {} -> {} (type: {})",
fromStep.getId(), toStep.getId(), nextClass.getSimpleName());
}
}
}
}
/**
* Adds edges based on explicit nextSteps annotation field.
*/
private static void addExplicitEdges(StepInfo fromStep, Map<String, StepInfo> allSteps,
List<Edge> edges) {
if (ArrayUtils.isEmpty(fromStep.getNextSteps())) {
return;
}
for (String nextStepId : fromStep.getNextSteps()) {
if (!allSteps.containsKey(nextStepId)) {
log.warn("Step {} references non-existent next step: {}",
fromStep.getId(), nextStepId);
continue;
}
edges.add(Edge.sequential(fromStep.getId(), nextStepId));
log.debug("Added explicit edge: {} -> {}", fromStep.getId(), nextStepId);
}
}
/**
* Adds edges based on condition, onTrue, and onFalse annotation fields.
*/
private static void addConditionalEdges(StepInfo fromStep, Map<String, StepInfo> allSteps,
List<Edge> edges) {
if (StringUtils.isEmpty(fromStep.getCondition())) {
return;
}
String onTrue = fromStep.getOnTrue();
String onFalse = fromStep.getOnFalse();
if (StringUtils.isNotEmpty(onTrue)) {
if (!allSteps.containsKey(onTrue)) {
log.warn("Step {} references non-existent onTrue step: {}",
fromStep.getId(), onTrue);
} else {
// Create conditional edge with true predicate
edges.add(Edge.conditional(
fromStep.getId(),
onTrue,
result -> evaluateCondition(fromStep.getCondition(), result, true),
"When " + fromStep.getCondition() + " is true"
));
log.debug("Added conditional true edge: {} -> {}", fromStep.getId(), onTrue);
}
}
if (StringUtils.isNotEmpty(onFalse)) {
if (!allSteps.containsKey(onFalse)) {
log.warn("Step {} references non-existent onFalse step: {}",
fromStep.getId(), onFalse);
} else {
// Create conditional edge with false predicate
edges.add(Edge.conditional(
fromStep.getId(),
onFalse,
result -> evaluateCondition(fromStep.getCondition(), result, false),
"When " + fromStep.getCondition() + " is false"
));
log.debug("Added conditional false edge: {} -> {}", fromStep.getId(), onFalse);
}
}
}
/**
* Adds error handling edges.
*/
private static void addErrorEdges(StepInfo fromStep, Map<String, StepInfo> allSteps,
List<Edge> edges) {
// Find steps that accept Throwable or Exception
for (StepInfo toStep : allSteps.values()) {
if (toStep == fromStep) {
continue;
}
if (!Throwable.class.isAssignableFrom(toStep.getInputType()) &&
!Exception.class.isAssignableFrom(toStep.getInputType())) {
continue;
}
edges.add(Edge.error(fromStep.getId(), toStep.getId()));
log.debug("Added error edge: {} -> {}", fromStep.getId(), toStep.getId());
}
}
/**
* Determines the overall output type of the workflow.
*/
private static Class<?> determineOutputType(Map<String, StepInfo> stepInfos) {
// Look for steps that return Finish<R>
Set<Class<?>> finishTypes = new HashSet<>();
for (StepInfo step : stepInfos.values()) {
StepInfo.ReturnTypeInfo returnInfo = step.getReturnTypeInfo();
if (returnInfo != null && TypeUtils.isFinishType(returnInfo.rawType())) {
// Extract R from Finish<R>
if (returnInfo.innerType() instanceof Class<?> clazz &&
!clazz.equals(Void.class) && !clazz.equals(void.class)) {
finishTypes.add(clazz);
}
}
}
if (finishTypes.isEmpty()) {
log.debug("No Finish<R> return types found, defaulting output type to Object");
return Object.class;
}
if (finishTypes.size() == 1) {
Class<?> outputType = finishTypes.iterator().next();
log.debug("Determined workflow output type: {}", outputType.getSimpleName());
return outputType;
}
// Multiple finish types - find common superclass
Class<?> commonType = TypeMatcher.findCommonSuperclass(finishTypes);
log.debug("Multiple finish types found: {}, using common type: {}",
finishTypes, commonType.getSimpleName());
return commonType;
}
/**
* Validates the graph structure created from builder to ensure it's well-formed.
*
* @param graph The graph to validate
* @throws IllegalStateException if the graph is invalid
*/
private static void validateBuilderGraph(WorkflowGraph<?, ?> graph) {
if (graph.nodes() == null || graph.nodes().isEmpty()) {
throw new IllegalStateException("Workflow graph must have at least one node");
}
if (graph.initialStepId() == null || graph.initialStepId().isBlank()) {
throw new IllegalStateException("Workflow graph must have an initial step");
}
if (!graph.nodes().containsKey(graph.initialStepId())) {
throw new IllegalStateException(
"Initial step '" + graph.initialStepId() + "' not found in graph nodes"
);
}
// Additional validation can be added here
// For example: checking for unreachable nodes, cycles, etc.
}
// Now using StepInfo from analyzer package
/**
* Evaluates a condition expression.
* This is a placeholder implementation - in a real system, you would use
* Spring Expression Language (SpEL) or another expression evaluator.
*/
private static boolean evaluateCondition(String condition, Object result, boolean expectedValue) {
if (StringUtils.isEmpty(condition)) {
return expectedValue;
}
// TODO: Implement proper SpEL evaluation
// For now, just return the expected value
log.debug("Condition evaluation not yet implemented for: {}", condition);
return expectedValue;
}
/**
* Validates a step method.
*
* @param method The method to validate
* @throws IllegalArgumentException if validation fails
*/
private static void validateStepMethod(Method method) {
// Check basic modifiers
int modifiers = method.getModifiers();
if (Modifier.isStatic(modifiers)) {
throw new IllegalArgumentException(
"Step method cannot be static: " + method.getName()
);
}
if (Modifier.isAbstract(modifiers)) {
throw new IllegalArgumentException(
"Step method cannot be abstract: " + method.getName()
);
}
// Delegate to MethodAnalyzer for signature validation
MethodAnalyzer.validateStepMethod(method);
}
/**
* Validates the workflow graph structure.
*
* @param nodes The workflow nodes
* @param edges The workflow edges
* @param initialStepId The initial step ID
* @throws IllegalStateException if validation fails
*/
private static void validateGraph(Map<String, StepNode> nodes,
Map<String, List<Edge>> edges,
String initialStepId) {
if (nodes.isEmpty()) {
throw new IllegalStateException("Workflow has no steps");
}
if (initialStepId == null || !nodes.containsKey(initialStepId)) {
throw new IllegalStateException(
"Initial step not found: " + initialStepId
);
}
// Check for unreachable steps
Set<String> reachableSteps = findReachableSteps(nodes, edges, initialStepId);
Set<String> unreachableSteps = new HashSet<>(nodes.keySet());
unreachableSteps.removeAll(reachableSteps);
if (!unreachableSteps.isEmpty()) {
log.debug("Steps without direct edges detected: {}. These steps may be reached via runtime type-based routing.",
unreachableSteps);
}
}
/**
* Finds all reachable steps from the initial step.
*/
private static Set<String> findReachableSteps(Map<String, StepNode> nodes,
Map<String, List<Edge>> edges,
String initialStepId) {
Set<String> reachable = new HashSet<>();
Queue<String> toVisit = new LinkedList<>();
toVisit.add(initialStepId);
while (!toVisit.isEmpty()) {
String stepId = toVisit.poll();
if (reachable.contains(stepId)) {
continue;
}
reachable.add(stepId);
// Add all target steps from edges
List<Edge> stepEdges = edges.get(stepId);
if (stepEdges != null) {
for (Edge edge : stepEdges) {
if (!reachable.contains(edge.toStepId())) {
toVisit.add(edge.toStepId());
}
}
}
}
return reachable;
}
/**
* Validates an async step method.
*
* @param method The async method to validate
* @param annotation The AsyncStep annotation
* @throws IllegalArgumentException if validation fails
*/
private static void validateAsyncStepMethod(Method method, AsyncStep annotation) {
// Check that value is provided in annotation
if (annotation.value().isEmpty()) {
throw new IllegalArgumentException(
"Async step must have a task ID value: " + method.getName()
);
}
// Check basic modifiers
int modifiers = method.getModifiers();
if (Modifier.isStatic(modifiers)) {
throw new IllegalArgumentException(
"Async step method cannot be static: " + method.getName()
);
}
if (Modifier.isAbstract(modifiers)) {
throw new IllegalArgumentException(
"Async step method cannot be abstract: " + method.getName()
);
}
// Async methods can have up to 3 parameters (input, context, AsyncProgressReporter)
// So we don't use the regular step validation here
}
/**
* Finds all @AsyncStep annotated methods in a workflow instance.
*
* @param workflowInstance The workflow instance to analyze
* @return Map of asyncStepId to AsyncStepMetadata
*/
public static Map<String, AsyncStepMetadata> findAsyncSteps(Object workflowInstance) {
Map<String, AsyncStepMetadata> asyncSteps = new HashMap<>();
Class<?> clazz = workflowInstance.getClass();
for (Method method : clazz.getDeclaredMethods()) {
AsyncStep annotation = method.getAnnotation(AsyncStep.class);
if (annotation != null) {
// Validate the async step method
validateAsyncStepMethod(method, annotation);
String asyncStepId = annotation.value();
// Make method accessible
if (!method.canAccess(workflowInstance)) {
method.setAccessible(true);
}
AsyncStepMetadata metadata = new AsyncStepMetadata(
method,
workflowInstance,
annotation
);
// Check for duplicates
if (asyncSteps.containsKey(asyncStepId)) {
throw new IllegalArgumentException(
"Multiple @AsyncStep methods with id " + asyncStepId + ": " +
asyncSteps.get(asyncStepId).getMethod().getName() + " and " + method.getName()
);
}
asyncSteps.put(asyncStepId, metadata);
log.debug("Found async step handler {} with id {}", method.getName(), asyncStepId);
}
}
return asyncSteps;
}
/**
* Validates that async steps have valid configurations.
* Note: asyncStepId is not a reference to an existing step, but a task identifier
* that will be used by StepResult.Async to trigger this handler.
*/
private static void validateAsyncSteps(Map<String, StepInfo> steps,
Map<String, AsyncStepMetadata> asyncSteps) {
// Validate async steps have proper configuration
for (Map.Entry<String, AsyncStepMetadata> entry : asyncSteps.entrySet()) {
String asyncStepId = entry.getKey();
AsyncStepMetadata metadata = entry.getValue();
// Validate the async step has proper configuration
if (asyncStepId == null || asyncStepId.isEmpty()) {
throw new IllegalArgumentException(
"@AsyncStep " + metadata.getMethod().getName() +
" has empty value"
);
}
// Log warning if using default input class
if (metadata.getAnnotation().inputClass() == Map.class) {
log.debug("@AsyncStep {} uses default Map.class as input",
metadata.getMethod().getName());
}
}
}
/**
* Metadata for @AsyncStep annotated methods.
*/
@Data
public static class AsyncStepMetadata {
private final Method method;
private final Object instance;
private final AsyncStep annotation;
public AsyncStepMetadata(Method method, Object instance, AsyncStep annotation) {
this.method = method;
this.instance = instance;
this.annotation = annotation;
}
public String getValue() {
return annotation.value();
}
public Class<?> getInputClass() {
return annotation.inputClass();
}
public String getDescription() {
return annotation.description();
}
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/core/WorkflowContext.java
|
package ai.driftkit.workflow.engine.core;
import ai.driftkit.workflow.engine.domain.RetryContext;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
/**
* Mutable container for workflow execution context with thread-safe operations.
* Provides access to the current run state and results from previously executed steps.
*
* <p>This class maintains two separate storage areas:
* <ul>
* <li>stepOutputs - Internal storage for workflow engine step results</li>
* <li>customData - User-defined data storage for workflow-specific values</li>
* </ul>
* All operations are thread-safe using ConcurrentHashMap.</p>
*/
@Slf4j
@Getter
public class WorkflowContext {
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private final String runId;
private final Object triggerData;
private final ConcurrentHashMap<String, StepOutput> stepOutputs;
private final ConcurrentHashMap<String, StepOutput> customData;
private final String instanceId;
private volatile String lastStepId;
// Retry and execution tracking
private final ConcurrentHashMap<String, AtomicInteger> stepExecutionCounts;
private final ConcurrentHashMap<String, RetryContext> stepRetryContexts;
// Internal step listener for tracking/mocking support
private transient volatile InternalStepListener internalStepListener;
/**
* Well-known keys for special context values.
*/
public static final class Keys {
public static final String FINAL_RESULT = "__final__";
public static final String USER_INPUT = "__user_input__";
public static final String USER_INPUT_TYPE = "__user_input_type__";
public static final String RESUMED_STEP_INPUT = "__resumed_step_input__";
// Chat-specific keys
public static final String CHAT_ID = "__chat_id__";
public static final String USER_ID = "__user_id__";
public static final String STEP_INVOCATION_COUNTS = "__step_invocation_counts__";
// Async-specific keys
public static final String ASYNC_FUTURE = "_future";
private Keys() {} // prevent instantiation
}
/**
* Creates a new WorkflowContext with the provided parameters.
*/
public WorkflowContext(String runId, Object triggerData, String instanceId) {
this(runId, triggerData, null, null, instanceId);
}
/**
* Creates a new WorkflowContext with the provided parameters.
*/
private WorkflowContext(String runId, Object triggerData,
Map<String, StepOutput> stepOutputs,
Map<String, StepOutput> customData,
String instanceId) {
this.runId = (runId == null || runId.isBlank()) ? UUID.randomUUID().toString() : runId;
this.triggerData = triggerData;
this.stepOutputs = new ConcurrentHashMap<>();
if (stepOutputs != null) {
this.stepOutputs.putAll(stepOutputs);
}
this.customData = new ConcurrentHashMap<>();
if (customData != null) {
this.customData.putAll(customData);
}
this.instanceId = instanceId != null ? instanceId : this.runId;
this.stepExecutionCounts = new ConcurrentHashMap<>();
this.stepRetryContexts = new ConcurrentHashMap<>();
}
/**
* Creates a new WorkflowContext for a fresh workflow run.
*
* @param triggerData The initial data that triggered the workflow
* @return A new WorkflowContext with a generated runId
*/
public static WorkflowContext newRun(Object triggerData) {
String runId = UUID.randomUUID().toString();
// Use the factory from WorkflowEngine if available
return WorkflowEngine.contextFactory.create(runId, triggerData, null);
}
/**
* Creates a new WorkflowContext for a fresh workflow run with an instance ID.
*
* @param triggerData The initial data that triggered the workflow
* @param instanceId The workflow instance ID
* @return A new WorkflowContext with a generated runId
*/
public static WorkflowContext newRun(Object triggerData, String instanceId) {
String runId = UUID.randomUUID().toString();
// Use the factory from WorkflowEngine if available
return WorkflowEngine.contextFactory.create(runId, triggerData, instanceId);
}
/**
* Factory method for creating context with existing data.
*/
public static WorkflowContext fromExisting(String runId, Object triggerData,
Map<String, StepOutput> stepOutputs,
Map<String, StepOutput> customData,
String instanceId) {
return new WorkflowContext(runId, triggerData, stepOutputs, customData, instanceId);
}
/**
* Retrieves the output of a previously executed step.
*
* @param stepId The ID of the step whose output to retrieve
* @param type The expected type of the output
* @param <T> The type parameter
* @return The step output cast to the requested type
* @throws NoSuchElementException if the step output doesn't exist
* @throws ClassCastException if the output cannot be cast to the requested type
*/
public <T> T getStepResult(String stepId, Class<T> type) {
StepOutput output = stepOutputs.get(stepId);
if (output == null || !output.hasValue()) {
throw new NoSuchElementException("No output found for step: " + stepId);
}
try {
return output.getValueAs(type);
} catch (ClassCastException e) {
log.error("Type mismatch for step {}: expected {}, actual {}",
stepId, type.getName(), output.getActualClass().getName(), e);
throw e;
}
}
/**
* Retrieves the output of a previously executed step, returning a default value if not found.
*
* @param stepId The ID of the step whose output to retrieve
* @param type The expected type of the output
* @param defaultValue The default value to return if the step output doesn't exist
* @param <T> The type parameter
* @return The step output cast to the requested type, or the default value
*/
public <T> T getStepResultOrDefault(String stepId, Class<T> type, T defaultValue) {
try {
return getStepResult(stepId, type);
} catch (NoSuchElementException e) {
return defaultValue;
}
}
/**
* Checks if a step has produced output.
*
* @param stepId The ID of the step to check
* @return true if the step has output, false otherwise
*/
public boolean hasStepResult(String stepId) {
StepOutput output = stepOutputs.get(stepId);
return output != null && output.hasValue();
}
/**
* Sets the output for a step (internal use by workflow engine).
*
* @param stepId The ID of the step that produced the output
* @param output The output produced by the step
*/
public void setStepOutput(String stepId, Object output) {
if (stepId == null || stepId.isBlank()) {
throw new IllegalArgumentException("Step ID cannot be null or empty");
}
if (output == null) {
stepOutputs.remove(stepId);
} else {
stepOutputs.put(stepId, StepOutput.of(output));
// Track the last step that produced output
if (!stepId.startsWith("__")) {
lastStepId = stepId;
}
}
log.trace("Set step output for '{}': {}", stepId,
output != null ? output.getClass().getSimpleName() : "null");
}
/**
* Sets multiple step outputs at once (internal use by workflow engine).
*
* @param outputs Map of step IDs to their outputs
*/
public void setStepOutputs(Map<String, Object> outputs) {
if (outputs == null) {
return;
}
outputs.forEach(this::setStepOutput);
}
/**
* Sets a custom value in the context (for user data).
*
* @param key The key for the custom value
* @param value The value to store
*/
public void setContextValue(String key, Object value) {
if (key == null || key.isBlank()) {
throw new IllegalArgumentException("Key cannot be null or empty");
}
if (value == null) {
customData.remove(key);
} else {
customData.put(key, StepOutput.of(value));
}
log.trace("Set context value for '{}': {}", key,
value != null ? value.getClass().getSimpleName() : "null");
}
/**
* Gets a custom value from the context.
*
* @param key The key for the custom value
* @param type The expected type of the value
* @param <T> The type parameter
* @return The value cast to the requested type, or null if not found
*/
public <T> T getContextValue(String key, Class<T> type) {
StepOutput output = customData.get(key);
if (output == null || !output.hasValue()) {
return null;
}
try {
return output.getValueAs(type);
} catch (ClassCastException e) {
log.error("Type mismatch for context key {}: expected {}, actual {}",
key, type.getName(), output.getActualClass().getName(), e);
throw e;
}
}
/**
* Gets a custom value from the context with a default.
*
* @param key The key for the custom value
* @param type The expected type of the value
* @param defaultValue The default value if not found
* @param <T> The type parameter
* @return The value cast to the requested type, or the default value
*/
public <T> T getContextValueOrDefault(String key, Class<T> type, T defaultValue) {
T value = getContextValue(key, type);
return value != null ? value : defaultValue;
}
// Helper methods for common types
/**
* Gets a string value from custom data.
*/
public String getString(String key) {
return getContextValue(key, String.class);
}
/**
* Gets a string value with default.
*/
public String getStringOrDefault(String key, String defaultValue) {
String value = getString(key);
return value != null ? value : defaultValue;
}
/**
* Gets an integer value from custom data.
*/
public Integer getInt(String key) {
return getContextValue(key, Integer.class);
}
/**
* Gets an integer value with default.
*/
public Integer getIntOrDefault(String key, Integer defaultValue) {
Integer value = getInt(key);
return value != null ? value : defaultValue;
}
/**
* Gets a long value from custom data.
*/
public Long getLong(String key) {
return getContextValue(key, Long.class);
}
/**
* Gets a long value with default.
*/
public Long getLongOrDefault(String key, Long defaultValue) {
Long value = getLong(key);
return value != null ? value : defaultValue;
}
/**
* Gets a boolean value from custom data.
*/
public Boolean getBoolean(String key) {
return getContextValue(key, Boolean.class);
}
/**
* Gets a boolean value with default.
*/
public Boolean getBooleanOrDefault(String key, Boolean defaultValue) {
Boolean value = getBoolean(key);
return value != null ? value : defaultValue;
}
/**
* Gets a double value from custom data.
*/
public Double getDouble(String key) {
return getContextValue(key, Double.class);
}
/**
* Gets a double value with default.
*/
public Double getDoubleOrDefault(String key, Double defaultValue) {
Double value = getDouble(key);
return value != null ? value : defaultValue;
}
/**
* Gets a list from custom data.
*/
@SuppressWarnings("unchecked")
public <T> List<T> getList(String key, Class<T> elementType) {
StepOutput output = customData.get(key);
if (output == null || !output.hasValue()) {
return null;
}
Object value = output.getValue();
if (value instanceof List) {
return (List<T>) value;
}
return null;
}
/**
* Gets a map from custom data.
*/
@SuppressWarnings("unchecked")
public <K, V> Map<K, V> getMap(String key, Class<K> keyType, Class<V> valueType) {
StepOutput output = customData.get(key);
if (output == null || !output.hasValue()) {
return null;
}
Object value = output.getValue();
if (value instanceof Map) {
return (Map<K, V>) value;
}
return null;
}
/**
* Gets the trigger data cast to a specific type.
*
* @param type The expected type of the trigger data
* @param <T> The type parameter
* @return The trigger data cast to the requested type
* @throws ClassCastException if the trigger data cannot be cast to the requested type
*/
public <T> T getTriggerData(Class<T> type) {
if (triggerData == null) {
return null;
}
return convertToType(triggerData, type, "trigger data");
}
/**
* Returns the number of step outputs currently stored.
*
* @return The number of step outputs
*/
public int getStepCount() {
return stepOutputs.size();
}
/**
* Gets the raw step outputs map for serialization.
* Internal use only.
*
* @return The map of step outputs
*/
public Map<String, StepOutput> getStepOutputs() {
return new HashMap<>(stepOutputs);
}
/**
* Returns the number of custom data entries.
*
* @return The number of custom data entries
*/
public int getCustomDataCount() {
return customData.size();
}
/**
* Creates a minimal string representation for logging.
*
* @return A string representation of the context
*/
@Override
public String toString() {
return "WorkflowContext{" +
"runId='" + runId + '\'' +
", stepCount=" + stepOutputs.size() +
", customDataCount=" + customData.size() +
", steps=" + stepOutputs.keySet() +
'}';
}
// Retry and execution tracking methods
/**
* Records a step execution and returns the new count.
*
* @param stepId The step ID
* @return The new execution count for this step
*/
public int recordStepExecution(String stepId) {
AtomicInteger count = stepExecutionCounts.computeIfAbsent(stepId, k -> new AtomicInteger(0));
int newCount = count.incrementAndGet();
log.debug("Step {} executed {} times in workflow {}", stepId, newCount, runId);
return newCount;
}
/**
* Gets the current execution count for a step.
*
* @param stepId The step ID
* @return The current execution count, or 0 if never executed
*/
public int getStepExecutionCount(String stepId) {
AtomicInteger count = stepExecutionCounts.get(stepId);
return count != null ? count.get() : 0;
}
/**
* Updates the retry context for a step.
*
* @param stepId The step ID
* @param retryContext The new retry context
*/
public void updateRetryContext(String stepId, RetryContext retryContext) {
stepRetryContexts.put(stepId, retryContext);
log.debug("Updated retry context for step {} in workflow {}", stepId, runId);
}
/**
* Gets the retry context for a step.
*
* @param stepId The step ID
* @return The retry context, or null if no retries have occurred
*/
public RetryContext getRetryContext(String stepId) {
return stepRetryContexts.get(stepId);
}
/**
* Gets the current retry context if the executing step has one.
* This is a convenience method for steps to access their own retry context.
*
* @return The retry context for the current step, or null if not in retry
*/
public RetryContext getCurrentRetryContext() {
if (lastStepId != null) {
return stepRetryContexts.get(lastStepId);
}
return null;
}
/**
* Clears retry context for a step (e.g., after successful execution).
*
* @param stepId The step ID
*/
public void clearRetryContext(String stepId) {
stepRetryContexts.remove(stepId);
log.debug("Cleared retry context for step {} in workflow {}", stepId, runId);
}
/**
* Gets all step execution counts.
*
* @return Map of step IDs to execution counts
*/
public Map<String, Integer> getAllStepExecutionCounts() {
Map<String, Integer> counts = new HashMap<>();
stepExecutionCounts.forEach((stepId, count) -> counts.put(stepId, count.get()));
return counts;
}
/**
* Fluent step output access for cleaner syntax in predicates and workflow logic.
*
* @param stepId The ID of the step whose output to access
* @return A StepOutputAccessor for fluent access to the step's output
*/
public StepOutputAccessor step(String stepId) {
return new StepOutputAccessor(stepId);
}
/**
* Direct access to last step output.
*
* @param type The expected type of the output
* @param <T> The type parameter
* @return Optional containing the last step output, or empty if none
*/
public <T> Optional<T> lastOutput(Class<T> type) {
if (lastStepId == null) {
return Optional.empty();
}
StepOutput output = stepOutputs.get(lastStepId);
if (output == null || !output.hasValue()) {
return Optional.empty();
}
try {
return Optional.of(output.getValueAs(type));
} catch (ClassCastException e) {
log.error("Type mismatch for last output: expected {}, actual {}",
type.getName(), output.getActualClass().getName(), e);
return Optional.empty();
}
}
/**
* Inner class for fluent step output access.
*/
public class StepOutputAccessor {
private final String stepId;
StepOutputAccessor(String stepId) {
this.stepId = stepId;
}
/**
* Get the output of the step as an Optional.
*
* @param type The expected type of the output
* @param <T> The type parameter
* @return Optional containing the output, or empty if not found
*/
public <T> Optional<T> output(Class<T> type) {
StepOutput output = stepOutputs.get(stepId);
if (output == null || !output.hasValue()) {
return Optional.empty();
}
try {
return Optional.of(output.getValueAs(type));
} catch (ClassCastException e) {
log.error("Type mismatch for step {}: expected {}, actual {}",
stepId, type.getName(), output.getActualClass().getName(), e);
return Optional.empty();
}
}
/**
* Get the output of the step or throw if not found.
*
* @param type The expected type of the output
* @param <T> The type parameter
* @return The output value
* @throws NoSuchElementException if output not found
*/
public <T> T outputOrThrow(Class<T> type) {
return output(type)
.orElseThrow(() -> new NoSuchElementException("No output found for step: " + stepId));
}
/**
* Check if the step has produced output.
*
* @return true if output exists
*/
public boolean exists() {
return stepOutputs.containsKey(stepId);
}
/**
* Check if the step succeeded (has output and it's not a Throwable).
*
* @return true if step succeeded
*/
public boolean succeeded() {
StepOutput output = stepOutputs.get(stepId);
if (output == null || !output.hasValue()) {
return false;
}
Object value = output.getValue();
return value != null && !(value instanceof Throwable);
}
}
/**
* Converts a value to the requested type, handling JSON deserialization.
*
* @param value The value to convert
* @param type The target type
* @param description Description for error messages (e.g., "step output", "trigger data")
* @param <T> The type parameter
* @return The value converted to the requested type
* @throws ClassCastException if conversion fails
*/
private static <T> T convertToType(Object value, Class<T> type, String description) {
if (value == null) {
return null;
}
// If already the correct type, return directly
if (type.isInstance(value)) {
return type.cast(value);
}
// Handle JSON deserialization
try {
// If value is a JsonNode, use treeToValue for proper conversion
if (value instanceof JsonNode) {
return OBJECT_MAPPER.treeToValue((JsonNode) value, type);
}
// If value is a Map (from JSON deserialization), convert it
else if (value instanceof Map) {
return OBJECT_MAPPER.convertValue(value, type);
}
// For other types, try direct cast
else {
return type.cast(value);
}
} catch (Exception e) {
throw new ClassCastException(
"Cannot convert " + description + " to " + type.getName() + ": " + e.getMessage()
);
}
}
/**
* Notifies about internal step execution within a branch or other composite step.
* This is used for test framework tracking when steps are executed internally
* without going through the normal workflow engine execution path.
*
* @param stepId the ID of the internal step being executed
* @param input the input to the step
*/
public void notifyInternalStepExecution(String stepId, Object input) {
log.debug("Internal step execution: {} with input type: {}",
stepId, input != null ? input.getClass().getSimpleName() : "null");
if (internalStepListener != null) {
log.debug("Notifying internal step listener for step: {}", stepId);
internalStepListener.beforeInternalStep(stepId, input, this);
} else {
log.debug("No internal step listener set for step: {}", stepId);
}
}
/**
* Sets the internal step listener for this context.
* Used by test frameworks to track internal step executions.
*/
public void setInternalStepListener(InternalStepListener listener) {
this.internalStepListener = listener;
}
/**
* Gets the internal step listener if set.
*/
public InternalStepListener getInternalStepListener() {
return internalStepListener;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/core/WorkflowContextFactory.java
|
package ai.driftkit.workflow.engine.core;
/**
* Factory interface for creating WorkflowContext instances.
* Allows customization of context creation, such as injecting additional services.
*/
public interface WorkflowContextFactory {
/**
* Creates a new WorkflowContext with the provided parameters.
*
* @param runId The unique identifier for this workflow run
* @param triggerData The initial data that triggered the workflow
* @param instanceId The workflow instance ID (may be null)
* @return A new WorkflowContext instance
*/
WorkflowContext create(String runId, Object triggerData, String instanceId);
/**
* Default factory implementation that creates standard WorkflowContext instances.
*/
WorkflowContextFactory DEFAULT = WorkflowContext::new;
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/core/WorkflowEngine.java
|
package ai.driftkit.workflow.engine.core;
import ai.driftkit.common.service.ChatStore;
import ai.driftkit.common.domain.chat.ChatMessage;
import ai.driftkit.common.domain.chat.ChatRequest;
import ai.driftkit.workflow.engine.async.InMemoryProgressTracker;
import ai.driftkit.workflow.engine.async.ProgressTracker;
import ai.driftkit.workflow.engine.async.TaskProgressReporter;
import ai.driftkit.workflow.engine.builder.WorkflowBuilder;
import ai.driftkit.workflow.engine.domain.AsyncStepState;
import ai.driftkit.workflow.engine.domain.SuspensionData;
import ai.driftkit.workflow.engine.domain.WorkflowEngineConfig;
import ai.driftkit.workflow.engine.domain.WorkflowEvent;
import ai.driftkit.workflow.engine.graph.StepNode;
import ai.driftkit.workflow.engine.graph.WorkflowGraph;
import ai.driftkit.workflow.engine.persistence.AsyncStepStateRepository;
import ai.driftkit.workflow.engine.persistence.inmemory.InMemoryAsyncStepStateRepository;
import ai.driftkit.workflow.engine.persistence.inmemory.InMemorySuspensionDataRepository;
import ai.driftkit.workflow.engine.persistence.inmemory.InMemoryWorkflowStateRepository;
import ai.driftkit.workflow.engine.persistence.SuspensionDataRepository;
import ai.driftkit.workflow.engine.persistence.WorkflowInstance;
import ai.driftkit.workflow.engine.persistence.WorkflowInstance.WorkflowStatus;
import ai.driftkit.workflow.engine.persistence.WorkflowStateRepository;
import ai.driftkit.workflow.engine.schema.SchemaUtils;
import ai.driftkit.workflow.engine.utils.WorkflowInputOutputHandler;
import ai.driftkit.workflow.engine.analyzer.TypeUtils;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import java.util.*;
import java.util.concurrent.*;
import java.util.function.Consumer;
import java.util.function.Supplier;
/**
* Core workflow execution engine that orchestrates workflow runs.
*
* <p>This engine handles:</p>
* <ul>
* <li>Workflow registration and management</li>
* <li>Step execution with proper error handling</li>
* <li>Suspension and resumption for Human-in-the-Loop</li>
* <li>Asynchronous step execution</li>
* <li>Integration with Spring DI (optional)</li>
* </ul>
*/
@Slf4j
public class WorkflowEngine {
// Default factory that creates standard WorkflowContext
static WorkflowContextFactory contextFactory = (runId, triggerData, instanceId) ->
WorkflowContext.fromExisting(runId, triggerData, null, null, instanceId);
/**
* Sets the context factory to be used for creating WorkflowContext instances.
* This method is primarily intended for test frameworks to inject custom context implementations.
*
* @param factory the context factory to use, or null to reset to default
*/
public static void setContextFactory(WorkflowContextFactory factory) {
contextFactory = factory != null ? factory : (runId, triggerData, instanceId) ->
WorkflowContext.fromExisting(runId, triggerData, null, null, instanceId);
}
private final Map<String, WorkflowGraph<?, ?>> registeredWorkflows = new ConcurrentHashMap<>();
private final WorkflowStateRepository stateRepository;
private final AsyncStepStateRepository asyncStepStateRepository;
private final SuspensionDataRepository suspensionDataRepository;
private final ExecutorService executorService;
private final ScheduledExecutorService scheduledExecutor;
private final Map<String, WorkflowExecutionListener> listeners = new ConcurrentHashMap<>();
private final ProgressTracker progressTracker;
private final AsyncStepHandler asyncStepHandler;
private final StepRouter stepRouter;
private final WorkflowExecutor workflowExecutor;
private final WorkflowOrchestrator orchestrator;
private final WorkflowStateManager stateManager;
private final AsyncTaskManager asyncTaskManager;
@Getter
private final ChatStore chatStore;
/**
* Creates a workflow engine with default configuration.
*/
public WorkflowEngine() {
this(WorkflowEngineConfig.defaultConfig());
}
/**
* Creates a workflow engine with custom configuration.
*/
public WorkflowEngine(WorkflowEngineConfig config) {
// Initialize state repository
this.stateRepository = config.getStateRepository() != null ?
config.getStateRepository() : new InMemoryWorkflowStateRepository();
// Initialize async step state repository
this.asyncStepStateRepository = config.getAsyncStepStateRepository() != null ?
config.getAsyncStepStateRepository() : new InMemoryAsyncStepStateRepository();
// Initialize suspension data repository
this.suspensionDataRepository = config.getSuspensionDataRepository() != null ?
config.getSuspensionDataRepository() : new InMemorySuspensionDataRepository();
// Initialize progress tracker
this.progressTracker = config.getProgressTracker() != null ?
config.getProgressTracker() : new InMemoryProgressTracker();
// Initialize chat store (optional)
this.chatStore = config.getChatStore();
// Set context factory if provided
if (config.getContextFactory() != null) {
WorkflowEngine.setContextFactory(config.getContextFactory());
}
// Initialize async step handler
this.asyncStepHandler = new AsyncStepHandler();
// Initialize step router
this.stepRouter = new DefaultStepRouter();
// Initialize input preparer
InputPreparer inputPreparer = new InputPreparer();
// Initialize workflow executor
this.workflowExecutor = new WorkflowExecutor(config, progressTracker, chatStore);
// Add listener adapter as interceptor
this.workflowExecutor.addInterceptor(new ListenerAdapterInterceptor());
// Initialize state manager
this.stateManager = new WorkflowStateManager(stateRepository);
// Initialize orchestrator
this.orchestrator = new WorkflowOrchestrator(
stateManager,
workflowExecutor,
stepRouter,
inputPreparer,
suspensionDataRepository,
chatStore
);
// Initialize thread pools
this.executorService = createExecutorService(config);
this.scheduledExecutor = Executors.newScheduledThreadPool(
config.getScheduledThreads(),
new NamedThreadFactory("workflow-scheduler")
);
// Initialize async task manager
this.asyncTaskManager = new AsyncTaskManager(
executorService,
progressTracker,
stateRepository,
asyncStepHandler,
asyncStepStateRepository
);
log.info("WorkflowEngine initialized with config: {}", config);
}
/**
* Registers a workflow graph for execution.
*
* @param graph The workflow graph to register
* @throws IllegalArgumentException if a workflow with the same ID already exists
*/
public void register(WorkflowGraph<?, ?> graph) {
if (graph == null) {
throw new IllegalArgumentException("Workflow graph cannot be null");
}
String workflowId = graph.id();
if (registeredWorkflows.containsKey(workflowId)) {
throw new IllegalArgumentException(
"Workflow already registered: " + workflowId
);
}
registeredWorkflows.put(workflowId, graph);
// Register async steps if any
asyncStepHandler.registerWorkflow(graph);
// Register all input schemas for the workflow steps
registerWorkflowSchemas(graph);
log.info("Registered workflow: {} (version: {}, async steps: {})",
workflowId, graph.version(),
graph.asyncStepMetadata() != null ? graph.asyncStepMetadata().size() : 0);
}
/**
* Registers a workflow instance by analyzing it.
*
* @param workflowInstance An instance of a class annotated with @Workflow
*/
public void register(Object workflowInstance) {
WorkflowGraph<?, ?> graph = WorkflowAnalyzer.analyze(workflowInstance);
register(graph);
}
/**
* Registers a workflow created with the builder API.
* The builder workflow is converted to a WorkflowGraph and registered
* with the same ID specified in the builder.
*
* Example usage:
* <pre>
* Workflow<OrderRequest, OrderResult> orderWorkflow = Workflow
* .define("order-processing", OrderRequest.class, OrderResult.class)
* .then(StepDefinition.of(orderService::validateOrder))
* .then(StepDefinition.of(orderService::processPayment))
* .then(StepDefinition.of(orderService::shipOrder));
*
* engine.register(orderWorkflow);
*
* // Execute using the workflow ID from the builder
* WorkflowExecution<OrderResult> execution = engine.execute("order-processing", orderRequest);
* </pre>
*
* @param builderWorkflow The workflow created using the builder API
*/
public void register(WorkflowBuilder<?, ?> builderWorkflow) {
WorkflowGraph<?, ?> graph = WorkflowAnalyzer.analyzeBuilder(builderWorkflow);
register(graph);
}
/**
* Starts a new workflow execution.
*
* @param workflowId The ID of the workflow to execute
* @param input The input data for the workflow
* @return WorkflowExecution handle for tracking the execution
*/
public <T, R> WorkflowExecution<R> execute(String workflowId, T input) {
return execute(workflowId, input, UUID.randomUUID().toString());
}
/**
* Starts a new workflow execution with chat context.
*
* @param workflowId The ID of the workflow to execute
* @param input The input data for the workflow
* @param instanceId The specific instance ID to use
* @param chatId The chat ID to associate with this execution
* @return WorkflowExecution handle for tracking the execution
*/
public <T, R> WorkflowExecution<R> execute(String workflowId, T input, String instanceId, String chatId) {
// Check if workflow instance already exists
Optional<WorkflowInstance> existingInstance = stateRepository.load(instanceId);
if (existingInstance.isPresent()) {
WorkflowInstance instance = existingInstance.get();
// If workflow is suspended, automatically resume it
if (instance.getStatus() == WorkflowStatus.SUSPENDED) {
log.info("Workflow instance {} is suspended, automatically resuming with provided input", instanceId);
return resume(instanceId, input);
}
// If workflow is in any other state, throw exception
throw new IllegalStateException(
"Workflow instance already exists with ID: " + instanceId +
" in status: " + instance.getStatus() +
". Cannot start new execution."
);
}
// No existing instance, create new one with chatId
WorkflowGraph<T, R> graph = getWorkflowGraph(workflowId);
// Convert ChatRequest to workflow input type if needed
T actualInput = TypeUtils.convertInputForWorkflow(input, graph, workflowId);
WorkflowInstance instance = WorkflowInstance.newInstance(graph, actualInput, instanceId, chatId);
stateRepository.save(instance);
WorkflowExecution<R> execution = new WorkflowExecution<>(
instance.getInstanceId(),
workflowId,
new CompletableFuture<>(),
this
);
// Start execution asynchronously
executorService.submit(() -> executeWorkflow(instance, graph, execution));
return execution;
}
/**
* Starts a new workflow execution with specific instance ID.
* If a workflow with this instance ID already exists and is suspended,
* automatically resumes it with the provided input.
*
* @param workflowId The ID of the workflow to execute
* @param input The input data for the workflow
* @param instanceId The specific instance ID to use
* @return WorkflowExecution handle for tracking the execution
*/
public <T, R> WorkflowExecution<R> execute(String workflowId, T input, String instanceId) {
// Check if workflow instance already exists
Optional<WorkflowInstance> existingInstance = stateRepository.load(instanceId);
if (existingInstance.isPresent()) {
WorkflowInstance instance = existingInstance.get();
// If workflow is suspended, automatically resume it
if (instance.getStatus() == WorkflowStatus.SUSPENDED) {
log.info("Workflow instance {} is suspended, automatically resuming with provided input", instanceId);
return resume(instanceId, input);
}
// If workflow is in any other state, throw exception
throw new IllegalStateException(
"Workflow instance already exists with ID: " + instanceId +
" in status: " + instance.getStatus() +
". Cannot start new execution."
);
}
// No existing instance, create new one
WorkflowGraph<T, R> graph = getWorkflowGraph(workflowId);
// Convert ChatRequest to workflow input type if needed
T actualInput = TypeUtils.convertInputForWorkflow(input, graph, workflowId);
WorkflowInstance instance = WorkflowInstance.newInstance(graph, actualInput, instanceId);
stateRepository.save(instance);
WorkflowExecution<R> execution = new WorkflowExecution<>(
instance.getInstanceId(),
workflowId,
new CompletableFuture<>(),
this
);
// Start execution asynchronously
executorService.submit(() -> executeWorkflow(instance, graph, execution));
return execution;
}
/**
* Resumes a suspended workflow execution.
*
* @param runId The run ID of the suspended workflow
* @param input The user input to resume with
* @return WorkflowExecution handle for tracking the resumed execution
*/
public <T, R> WorkflowExecution<R> resume(String runId, T input) {
WorkflowInstance instance = stateRepository.load(runId)
.orElseThrow(() -> new IllegalArgumentException(
"Workflow instance not found: " + runId
));
if (instance.getStatus() != WorkflowStatus.SUSPENDED) {
throw new IllegalStateException(
"Workflow is not suspended: " + runId + " (status: " + instance.getStatus() + ")"
);
}
WorkflowGraph<?, R> graph = getWorkflowGraph(instance.getWorkflowId());
// Get the suspended step to find the original input type
String suspendedStepId = instance.getCurrentStepId();
// Get suspension data from repository
SuspensionData suspensionData = suspensionDataRepository.findByInstanceId(runId).orElse(null);
log.debug("Resume: loaded suspension data for instance {}: {}", runId,
suspensionData != null ? "found (nextInputClass=" +
(suspensionData.nextInputClass() != null ? suspensionData.nextInputClass().getSimpleName() : "null") + ")" : "not found");
if (suspensionData != null && suspensionData.originalStepInput() != null) {
// Store the original step input back in context for the step to use
instance.updateContext(WorkflowContext.Keys.RESUMED_STEP_INPUT,
suspensionData.originalStepInput());
}
// Store the user input separately with its type information
WorkflowInputOutputHandler.saveUserInput(instance, input);
// When resuming with suspension data, find the next step
if (suspensionData != null && suspensionData.nextInputClass() != null) {
Class<?> expectedInputType = suspensionData.nextInputClass();
// Special handling for ChatRequest with schema name
if (input instanceof ChatRequest) {
expectedInputType = TypeUtils.resolveInputType((ChatRequest) input, expectedInputType);
} else if (!expectedInputType.isInstance(input)) {
throw new IllegalArgumentException(
"Resume input type mismatch: expected " + expectedInputType.getName() +
" but received " + input.getClass().getName()
);
}
// When resuming, we need to find the next step after the suspended one
// The suspended step already executed and returned Suspend result
String nextStepId = stepRouter.findNextStep(graph, suspendedStepId, input);
if (nextStepId != null) {
instance.setCurrentStepId(nextStepId);
log.debug("Resume: moving from suspended step {} to next step {} for input type {}",
suspendedStepId, nextStepId, expectedInputType.getSimpleName());
} else {
// If no next step found by normal routing, try type-based routing with expected type
nextStepId = stepRouter.findStepForInputType(graph, expectedInputType, suspendedStepId);
if (nextStepId != null) {
instance.setCurrentStepId(nextStepId);
log.debug("Resume: found step {} that accepts input type {} using type-based routing",
nextStepId, expectedInputType.getSimpleName());
} else {
throw new IllegalStateException(
"No step found that can process resume input of type: " +
expectedInputType.getName()
);
}
}
}
instance.resume();
// Auto-track user input in ChatStore if available
if (chatStore != null && runId != null && input != null) {
chatStore.add(runId,
SchemaUtils.extractProperties(input),
ChatMessage.MessageType.USER);
log.debug("Auto-tracked user input to ChatStore for instance: {}", runId);
}
// Delete suspension data from repository
suspensionDataRepository.deleteByInstanceId(runId);
stateRepository.save(instance);
WorkflowExecution<R> execution = new WorkflowExecution<>(
instance.getInstanceId(),
instance.getWorkflowId(),
new CompletableFuture<>(),
this
);
// Resume execution asynchronously
executorService.submit(() -> executeWorkflow(instance, graph, execution));
return execution;
}
/**
* Main workflow execution loop.
*/
@SuppressWarnings("unchecked")
private <R> void executeWorkflow(WorkflowInstance instance,
WorkflowGraph<?, R> graph,
WorkflowExecution<R> execution) {
notifyListeners(l -> l.onWorkflowStarted(instance));
// Delegate to orchestrator
orchestrator.orchestrateExecution(instance, graph, execution, this);
// Handle notifications based on final state
if (instance.getStatus() == WorkflowStatus.COMPLETED) {
R finalResult = orchestrator.getFinalResult(instance, graph);
notifyListeners(l -> l.onWorkflowCompleted(instance, finalResult));
} else if (instance.getStatus() == WorkflowStatus.FAILED) {
Throwable error = orchestrator.createErrorFromInfo(instance.getErrorInfo());
notifyListeners(l -> l.onWorkflowFailed(instance, error));
} else if (instance.getStatus() == WorkflowStatus.SUSPENDED) {
log.debug("Workflow suspended: {} at step: {}",
instance.getInstanceId(), instance.getCurrentStepId());
notifyListeners(l -> l.onWorkflowSuspended(instance));
}
}
/**
* Handles asynchronous step execution.
* Modified to treat async as suspend-like with updateable results.
* Package-private to allow WorkflowOrchestrator to call it.
*/
<R> void handleAsyncStep(WorkflowInstance instance,
WorkflowGraph<?, R> graph,
StepNode currentStep,
StepResult.Async<?> async,
WorkflowExecution<R> execution) {
String asyncTaskId = async.taskId();
Object immediateData = async.immediateData();
// Setup async state and suspend workflow
final String messageId = setupAsyncState(instance, currentStep, async, asyncTaskId, immediateData);
// Track the execution with initial event
WorkflowEvent trackingEvent = WorkflowEvent.asyncStarted(asyncTaskId, "");
progressTracker.trackExecution(asyncTaskId, trackingEvent);
// Check if we should use AsyncTaskManager for CompletableFuture support
if (async.taskArgs().containsKey(WorkflowContext.Keys.ASYNC_FUTURE)) {
// Use AsyncTaskManager for CompletableFuture handling
asyncTaskManager.handleAsyncStep(instance, graph, currentStep.id(), async)
.whenComplete((result, error) -> {
if (error != null) {
log.error("Async task failed", error);
execution.future.completeExceptionally(error);
} else {
// Process the result
processAsyncResult(instance, graph, currentStep, result, execution);
}
});
return;
}
// Create async task supplier for traditional async handlers
Supplier<Object> asyncTask = createAsyncTaskSupplier(instance, graph, currentStep,
async, execution, asyncTaskId, messageId);
// Execute async task with progress tracking
CompletableFuture<Object> future = progressTracker.executeAsync(
asyncTaskId,
trackingEvent,
asyncTask
);
// Handle completion/error callbacks
future.whenComplete((result, error) -> {
// Reload instance to get latest state
WorkflowInstance latestInstance = stateRepository.load(instance.getInstanceId())
.orElse(instance);
if (error != null) {
handleAsyncTaskError(latestInstance, currentStep, asyncTaskId, error, execution);
} else {
log.debug("Async task {} completed successfully", asyncTaskId);
progressTracker.onComplete(asyncTaskId, result);
// Result is already handled in the async task itself
// Just save the state
stateRepository.save(latestInstance);
}
});
// Notify listeners about async start (workflow is now suspended)
notifyListeners(l -> l.onStepCompleted(instance, currentStep.id(), async));
notifyListeners(l -> l.onWorkflowSuspended(instance));
}
/**
* Creates the async task supplier that executes the async handler.
*/
private <R> Supplier<Object> createAsyncTaskSupplier(WorkflowInstance instance,
WorkflowGraph<?, R> graph,
StepNode currentStep,
StepResult.Async<?> async,
WorkflowExecution<R> execution,
String asyncTaskId,
String messageId) {
return () -> {
try {
log.debug("Executing async task {} for step {}", asyncTaskId, currentStep.id());
// Create progress reporter that also updates the repository
TaskProgressReporter progressReporter = createRepositoryAwareProgressReporter(asyncTaskId, messageId);
// Find the async step handler - try both taskId and stepId for compatibility
StepResult<?> handlerResult = asyncStepHandler.handleAsyncResult(
graph,
asyncTaskId, // First try task ID to match @AsyncStep value
currentStep.id(), // Fall back to step ID if not found
async.taskArgs(), // Pass taskArgs directly
instance.getContext(),
progressReporter
);
if (handlerResult != null) {
// Process the handler result
try {
// Reload instance to get latest state
WorkflowInstance latestInstance = stateRepository.load(instance.getInstanceId())
.orElse(instance);
// Update async state with completion
updateAsyncStateCompletion(messageId, asyncTaskId, handlerResult);
// Process async handler result WITHOUT recursion
processAsyncHandlerResult(latestInstance, graph, currentStep, handlerResult,
execution, asyncTaskId, messageId);
} catch (Exception e) {
log.error("Error processing async handler result for step {}", currentStep.id(), e);
instance.fail(e, currentStep.id());
stateRepository.save(instance);
execution.future.completeExceptionally(e);
}
}
return handlerResult;
} catch (Exception e) {
log.error("Async task {} failed for step {}", asyncTaskId, currentStep.id(), e);
throw new RuntimeException("Async execution failed", e);
}
};
}
/**
* Process async result from CompletableFuture-based async operations.
*/
private <R> void processAsyncResult(WorkflowInstance instance,
WorkflowGraph<?, R> graph,
StepNode currentStep,
StepResult<?> result,
WorkflowExecution<R> execution) {
try {
// Reload instance to get latest state
WorkflowInstance latestInstance = stateRepository.load(instance.getInstanceId())
.orElse(instance);
// Update async state with completion
Optional<SuspensionData> completionSuspensionData = suspensionDataRepository.findByInstanceId(latestInstance.getInstanceId());
String completionMessageId = completionSuspensionData.map(SuspensionData::messageId).orElse(null);
if (completionMessageId != null) {
asyncStepStateRepository.findByMessageId(completionMessageId).ifPresent(state -> {
state.complete(result);
asyncStepStateRepository.save(state);
WorkflowEvent progressEvent = WorkflowEvent.withProgress(
state.getPercentComplete(),
state.getStatusMessage()
);
progressTracker.updateExecutionStatus(state.getTaskId(), progressEvent);
});
}
// Process the result
switch (result) {
case StepResult.Continue<?> cont -> {
// Store the async result
latestInstance.updateContext(currentStep.id(), cont.data());
latestInstance.resume();
// Find next step
String nextStepId = stepRouter.findNextStep(graph, currentStep.id(), cont.data());
if (nextStepId != null) {
latestInstance.setCurrentStepId(nextStepId);
stateRepository.save(latestInstance);
// Continue execution
if (!latestInstance.isTerminal()) {
executeWorkflow(latestInstance, graph, execution);
}
} else {
throw new IllegalStateException("No next step after async " + currentStep.id());
}
}
case StepResult.Finish<?> fin -> {
// Update final result and mark as completed
latestInstance.updateContext(WorkflowContext.Keys.FINAL_RESULT, fin.result());
latestInstance.updateStatus(WorkflowInstance.WorkflowStatus.COMPLETED);
stateRepository.save(latestInstance);
execution.future.complete((R) fin.result());
}
case StepResult.Fail<?> fail -> {
latestInstance.fail(fail.error(), currentStep.id());
stateRepository.save(latestInstance);
execution.future.completeExceptionally(fail.error());
}
case StepResult.Suspend<?> susp -> {
// Store suspension data
latestInstance.updateContext(WorkflowContext.Keys.USER_INPUT, susp.promptToUser());
stateRepository.save(latestInstance);
notifyListeners(l -> l.onWorkflowSuspended(latestInstance));
}
case StepResult.Branch<?> branch -> {
String branchTarget = stepRouter.findBranchTarget(graph, currentStep.id(), branch.event());
if (branchTarget != null) {
latestInstance.setCurrentStepId(branchTarget);
latestInstance.updateContext(currentStep.id(), branch.event());
latestInstance.resume();
stateRepository.save(latestInstance);
// Continue execution
if (!latestInstance.isTerminal()) {
executeWorkflow(latestInstance, graph, execution);
}
} else {
throw new IllegalStateException(
"No branch target found for event: " + branch.event().getClass()
);
}
}
default -> throw new IllegalStateException("Unknown step result type: " + result.getClass());
}
} catch (Exception e) {
log.error("Error processing async result for step {}", currentStep.id(), e);
instance.fail(e, currentStep.id());
stateRepository.save(instance);
execution.future.completeExceptionally(e);
}
}
/**
* Registers a workflow execution listener.
*/
public void addListener(String listenerId, WorkflowExecutionListener listener) {
listeners.put(listenerId, listener);
}
/**
* Removes a workflow execution listener.
*/
public void removeListener(String listenerId) {
listeners.remove(listenerId);
}
/**
* Notifies all registered listeners.
*/
private void notifyListeners(Consumer<WorkflowExecutionListener> action) {
listeners.values().forEach(listener -> {
try {
action.accept(listener);
} catch (Exception e) {
log.error("Error notifying listener", e);
}
});
}
/**
* Add an execution interceptor to the workflow engine.
* Interceptors can observe and modify workflow execution.
*
* @param interceptor The interceptor to add
*/
public void addInterceptor(ExecutionInterceptor interceptor) {
workflowExecutor.addInterceptor(interceptor);
}
/**
* Remove an execution interceptor from the workflow engine.
*
* @param interceptor The interceptor to remove
*/
public void removeInterceptor(ExecutionInterceptor interceptor) {
workflowExecutor.removeInterceptor(interceptor);
}
/**
* Shuts down the workflow engine.
*/
public void shutdown() {
log.info("Shutting down workflow engine...");
executorService.shutdown();
scheduledExecutor.shutdown();
try {
if (!executorService.awaitTermination(60, TimeUnit.SECONDS)) {
executorService.shutdownNow();
}
if (!scheduledExecutor.awaitTermination(60, TimeUnit.SECONDS)) {
scheduledExecutor.shutdownNow();
}
} catch (InterruptedException e) {
executorService.shutdownNow();
scheduledExecutor.shutdownNow();
Thread.currentThread().interrupt();
}
log.info("Workflow engine shut down");
}
/**
* Creates the main executor service based on configuration.
*/
private ExecutorService createExecutorService(WorkflowEngineConfig config) {
return new ThreadPoolExecutor(
config.getCoreThreads(),
config.getMaxThreads(),
300_000,
TimeUnit.MILLISECONDS,
new LinkedBlockingQueue<>(config.getQueueCapacity()),
new NamedThreadFactory("workflow-executor"),
new ThreadPoolExecutor.CallerRunsPolicy()
);
}
/**
* Handle for tracking workflow execution.
*/
@Getter
public static class WorkflowExecution<R> {
private final String runId;
private final String workflowId;
private final CompletableFuture<R> future;
private final WorkflowEngine engine;
WorkflowExecution(String runId, String workflowId, CompletableFuture<R> future, WorkflowEngine engine) {
this.runId = runId;
this.workflowId = workflowId;
this.future = future;
this.engine = engine;
}
public R get() throws InterruptedException, ExecutionException {
return future.get();
}
public R get(long timeout, TimeUnit unit)
throws InterruptedException, ExecutionException, TimeoutException {
return future.get(timeout, unit);
}
public R getResult() throws InterruptedException, ExecutionException {
return future.get();
}
public boolean isDone() {
return future.isDone();
}
public boolean isCancelled() {
return future.isCancelled();
}
public boolean isAsync() {
return !future.isDone();
}
/**
* Check if the workflow is currently suspended.
*/
public boolean isSuspended() {
return engine.getWorkflowInstance(runId)
.map(instance -> instance.getStatus() == WorkflowInstance.WorkflowStatus.SUSPENDED)
.orElse(false);
}
/**
* Check if the workflow has completed successfully.
*/
public boolean isCompleted() {
return engine.getWorkflowInstance(runId)
.map(instance -> instance.getStatus() == WorkflowInstance.WorkflowStatus.COMPLETED)
.orElse(false);
}
/**
* Get the current workflow instance state.
*/
public Optional<WorkflowInstance> getInstance() {
return engine.getWorkflowInstance(runId);
}
}
/**
* Interface for workflow execution lifecycle events.
*/
public interface WorkflowExecutionListener {
default void onWorkflowStarted(WorkflowInstance instance) {}
default void onWorkflowCompleted(WorkflowInstance instance, Object result) {}
default void onWorkflowFailed(WorkflowInstance instance, Throwable error) {}
default void onWorkflowSuspended(WorkflowInstance instance) {}
default void onStepStarted(WorkflowInstance instance, String stepId) {}
default void onStepCompleted(WorkflowInstance instance, String stepId, StepResult<?> result) {}
default void onStepFailed(WorkflowInstance instance, String stepId, Throwable error) {}
}
/**
* Custom thread factory for named threads.
*/
private static class NamedThreadFactory implements ThreadFactory {
private final String prefix;
private final ThreadFactory delegate = Executors.defaultThreadFactory();
private int counter = 0;
NamedThreadFactory(String prefix) {
this.prefix = prefix;
}
@Override
public Thread newThread(Runnable r) {
Thread thread = delegate.newThread(r);
thread.setName(prefix + "-" + counter++);
return thread;
}
}
/**
* Exception thrown when a step times out.
*/
public static class StepTimeoutException extends RuntimeException {
public StepTimeoutException(String message) {
super(message);
}
}
/**
* Exception thrown during workflow execution.
*/
public static class WorkflowExecutionException extends RuntimeException {
private final WorkflowInstance.ErrorInfo errorInfo;
public WorkflowExecutionException(String message, WorkflowInstance.ErrorInfo errorInfo) {
super(message);
this.errorInfo = errorInfo;
}
public WorkflowInstance.ErrorInfo getErrorInfo() {
return errorInfo;
}
}
/**
* Gets the workflow graph for a given workflow ID.
* Public method for external access.
*/
@SuppressWarnings("unchecked")
public <T, R> WorkflowGraph<T, R> getWorkflowGraph(String workflowId) {
WorkflowGraph<?, ?> graph = registeredWorkflows.get(workflowId);
if (graph == null) {
return null;
}
return (WorkflowGraph<T, R>) graph;
}
/**
* Gets all registered workflow IDs.
*/
public Set<String> getRegisteredWorkflows() {
return new HashSet<>(registeredWorkflows.keySet());
}
/**
* Registers all input schemas for a workflow's steps.
* This ensures that schemas are available for deserialization when resuming workflows.
*/
private void registerWorkflowSchemas(WorkflowGraph<?, ?> graph) {
// Register all step input and output types
for (StepNode step : graph.nodes().values()) {
if (step.executor() == null) {
continue;
}
Class<?> inputType = step.executor().getInputType();
if (inputType != null && inputType != void.class && inputType != Void.class) {
// This will register the schema in the internal registry
SchemaUtils.getSchemaFromClass(inputType);
log.info("Registered schema for step {} input type: {} (full name: {})",
step.id(), inputType.getSimpleName(), inputType.getName());
}
// Also register output type if it's used as suspend data
Class<?> outputType = step.executor().getOutputType();
if (outputType != null && outputType != void.class && outputType != Void.class) {
SchemaUtils.getSchemaFromClass(outputType);
log.debug("Registered schema for step {} output type: {}",
step.id(), outputType.getSimpleName());
}
}
}
/**
* Gets the retry executor used by this engine.
* Useful for accessing metrics and configuring listeners.
*
* @return The retry executor
*/
public RetryExecutor getRetryExecutor() {
return workflowExecutor.getRetryExecutor();
}
/**
* Gets the current result of a workflow execution.
* For async steps, returns the latest progress update.
*
* @param instanceId The workflow instance ID
* @return The current workflow result/event if available
*/
public Optional<WorkflowEvent> getCurrentResult(String instanceId) {
return stateRepository.load(instanceId)
.map(instance -> {
// First check if current step has async state
Optional<AsyncStepState> asyncState = Optional.empty();
Optional<SuspensionData> suspensionData = suspensionDataRepository.findByInstanceId(instanceId);
if (suspensionData.isPresent()) {
asyncState = asyncStepStateRepository.findByMessageId(
suspensionData.get().messageId()
);
}
if (asyncState.isPresent()) {
AsyncStepState state = asyncState.get();
// Get latest progress from progress tracker
progressTracker.getProgress(state.getTaskId()).ifPresent(progress -> {
state.updateProgress(
progress.percentComplete(),
progress.message()
);
});
// Create event from current state
return WorkflowEvent.withProgress(
state.getPercentComplete(),
state.getStatusMessage()
);
}
// If suspended (non-async), return the suspension prompt as completed event
if (instance.getStatus() == WorkflowStatus.SUSPENDED &&
suspensionData.isPresent()) {
SuspensionData suspension = suspensionData.get();
// Suspension prompt is already a structured object, return it as-is
// The promptToUser should be a proper domain object (e.g., WorkflowEvent)
if (suspension.promptToUser() instanceof WorkflowEvent) {
return (WorkflowEvent) suspension.promptToUser();
}
// If not WorkflowEvent, wrap in a completed event
Map<String, String> props = new HashMap<>();
props.put("type", "suspension");
props.put("waitingForInput", "true");
return WorkflowEvent.completed(props);
}
// Return workflow status
return WorkflowEvent.withProgress(
instance.isTerminal() ? 100 : 0,
"Workflow " + instance.getStatus().toString().toLowerCase()
);
});
}
/**
* Gets the workflow instance state.
*
* @param instanceId The workflow instance ID
* @return The workflow instance if found
*/
public Optional<WorkflowInstance> getWorkflowInstance(String instanceId) {
return stateRepository.load(instanceId);
}
/**
* Cancels an async operation if it's running.
*
* @param instanceId The workflow instance ID
* @return true if the operation was cancelled, false if not found or not async
*/
public boolean cancelAsyncOperation(String instanceId) {
Optional<WorkflowInstance> instanceOpt = stateRepository.load(instanceId);
if (instanceOpt.isEmpty()) {
return false;
}
WorkflowInstance instance = instanceOpt.get();
Optional<AsyncStepState> asyncState = Optional.empty();
Optional<SuspensionData> suspensionData = suspensionDataRepository.findByInstanceId(instanceId);
if (suspensionData.isPresent()) {
asyncState = asyncStepStateRepository.findByMessageId(
suspensionData.get().messageId()
);
}
if (asyncState.isPresent() && asyncState.get().isRunning()) {
AsyncStepState state = asyncState.get();
// Cancel the async operation
state.cancel();
// Resume workflow to failed state
instance.resume();
instance.fail(new RuntimeException("Async operation cancelled"), instance.getCurrentStepId());
suspensionDataRepository.deleteByInstanceId(instanceId);
stateRepository.save(instance);
// Notify progress tracker
progressTracker.onError(state.getTaskId(), new RuntimeException("Cancelled"));
return true;
}
return false;
}
/**
* Adapter that bridges ExecutionInterceptor to WorkflowExecutionListener.
*/
private class ListenerAdapterInterceptor implements ExecutionInterceptor {
@Override
public void beforeStep(WorkflowInstance instance, StepNode step, Object input) {
notifyListeners(l -> l.onStepStarted(instance, step.id()));
}
@Override
public void afterStep(WorkflowInstance instance, StepNode step, StepResult<?> result) {
notifyListeners(l -> l.onStepCompleted(instance, step.id(), result));
}
@Override
public void onStepError(WorkflowInstance instance, StepNode step, Exception error) {
notifyListeners(l -> l.onStepFailed(instance, step.id(), error));
}
}
/**
* Sets up the initial async state for a workflow step.
* Creates AsyncStepState and SuspensionData, then suspends the workflow.
*
* @return The messageId of the created async state
*/
private String setupAsyncState(WorkflowInstance instance, StepNode currentStep,
StepResult.Async<?> async, String asyncTaskId, Object immediateData) {
// Create structured async state and save to repository
AsyncStepState asyncState = AsyncStepState.started(asyncTaskId, immediateData);
asyncStepStateRepository.save(asyncState);
final String messageId = asyncState.getMessageId();
// Create suspension data with async state message ID
SuspensionData suspensionData = SuspensionData.createWithMessageId(
messageId,
immediateData, // The initial async data is the prompt to user
Map.of("async", true, "taskId", asyncTaskId),
async, // Original async result
currentStep.id(),
null // No next input class for async
);
// Suspend the workflow with suspension data
instance.suspend();
suspensionDataRepository.save(instance.getInstanceId(), suspensionData);
stateRepository.save(instance);
return messageId;
}
/**
* Creates a task progress reporter that updates both the progress tracker and repository.
*/
private TaskProgressReporter createRepositoryAwareProgressReporter(String asyncTaskId, String messageId) {
TaskProgressReporter baseReporter = progressTracker.createReporter(asyncTaskId);
return new TaskProgressReporter() {
@Override
public void updateProgress(int percentComplete, String message) {
// Update base reporter
baseReporter.updateProgress(percentComplete, message);
// Also update async state in repository
if (percentComplete >= 0) {
asyncStepStateRepository.updateProgress(messageId, percentComplete, message);
} else {
// Just update message - need to get current progress first
asyncStepStateRepository.findByMessageId(messageId).ifPresent(state -> {
asyncStepStateRepository.updateProgress(messageId, state.getPercentComplete(), message);
});
}
}
@Override
public void updateProgress(int percentComplete) {
baseReporter.updateProgress(percentComplete);
asyncStepStateRepository.updateProgress(messageId, percentComplete,
"Processing... " + percentComplete + "%");
}
@Override
public void updateMessage(String message) {
baseReporter.updateMessage(message);
asyncStepStateRepository.findByMessageId(messageId).ifPresent(state -> {
asyncStepStateRepository.updateProgress(messageId, state.getPercentComplete(), message);
});
}
@Override
public boolean isCancelled() {
// Check if cancelled directly from repository
return asyncStepStateRepository.findByMessageId(messageId)
.map(state -> state.getStatus() == AsyncStepState.AsyncStatus.CANCELLED)
.orElse(baseReporter.isCancelled());
}
};
}
/**
* Processes the result of an async handler after it completes.
* Handles different StepResult types appropriately.
*/
private <R> void processAsyncHandlerResult(WorkflowInstance latestInstance, WorkflowGraph<?, R> graph,
StepNode currentStep, StepResult<?> handlerResult,
WorkflowExecution<R> execution, String asyncTaskId, String messageId) {
switch (handlerResult) {
case StepResult.Continue<?> cont ->
handleAsyncContinue(latestInstance, graph, currentStep, cont, execution);
case StepResult.Finish<?> finish ->
handleAsyncFinish(latestInstance, currentStep, finish, execution);
case StepResult.Fail<?> fail ->
handleAsyncFail(latestInstance, currentStep, fail, execution);
case StepResult.Async<?> asyncResult ->
throw new IllegalStateException(
"Async handler cannot return another Async result: " + currentStep.id());
case StepResult.Suspend<?> susp ->
handleAsyncSuspend(latestInstance, currentStep, susp);
case StepResult.Branch<?> branch ->
handleAsyncBranch(latestInstance, graph, currentStep, branch, execution);
}
}
/**
* Handles Continue result from async handler.
*/
private <R> void handleAsyncContinue(WorkflowInstance instance, WorkflowGraph<?, R> graph,
StepNode currentStep, StepResult.Continue<?> cont,
WorkflowExecution<R> execution) {
// Store the async result
instance.updateContext(currentStep.id(), cont.data());
// Resume workflow from suspended state
instance.resume();
// Find next step
String nextStepId = stepRouter.findNextStep(graph, currentStep.id(), cont.data());
if (nextStepId != null) {
instance.setCurrentStepId(nextStepId);
stateRepository.save(instance);
// Continue workflow execution
if (!instance.isTerminal()) {
executeWorkflow(instance, graph, execution);
}
} else {
log.warn("No next step found after async handler for: {}", currentStep.id());
instance.fail(new IllegalStateException("No next step after async " + currentStep.id()),
currentStep.id());
stateRepository.save(instance);
execution.future.completeExceptionally(
new IllegalStateException("No next step after async " + currentStep.id()));
}
}
/**
* Handles Finish result from async handler.
*/
@SuppressWarnings("unchecked")
private <R> void handleAsyncFinish(WorkflowInstance instance, StepNode currentStep,
StepResult.Finish<?> finish, WorkflowExecution<R> execution) {
// Update final result and resume
instance.updateContext(currentStep.id(), finish.result());
instance.resume();
// Workflow completed
instance.updateContext(WorkflowContext.Keys.FINAL_RESULT, finish.result());
instance.updateStatus(WorkflowStatus.COMPLETED);
stateRepository.save(instance);
// Get the typed result using the workflow's output type
R typedResult = (R) finish.result();
execution.future.complete(typedResult);
}
/**
* Handles Fail result from async handler.
*/
private void handleAsyncFail(WorkflowInstance instance, StepNode currentStep,
StepResult.Fail<?> fail, WorkflowExecution<?> execution) {
// Resume to failed state
instance.resume();
// Workflow failed
instance.fail(fail.error(), currentStep.id());
stateRepository.save(instance);
execution.future.completeExceptionally(fail.error());
}
/**
* Handles Suspend result from async handler.
*/
private void handleAsyncSuspend(WorkflowInstance instance, StepNode currentStep,
StepResult.Suspend<?> susp) {
// Suspend from async handler
SuspensionData asyncSuspensionData = SuspensionData.create(
susp.promptToUser(),
susp.metadata(),
null,
currentStep.id(),
susp.nextInputClass()
);
instance.suspend();
suspensionDataRepository.save(instance.getInstanceId(), asyncSuspensionData);
stateRepository.save(instance);
}
/**
* Handles Branch result from async handler.
*/
private <R> void handleAsyncBranch(WorkflowInstance instance, WorkflowGraph<?, R> graph,
StepNode currentStep, StepResult.Branch<?> branch,
WorkflowExecution<R> execution) {
// Resume workflow
instance.resume();
// Find branch target
String branchTarget = stepRouter.findBranchTarget(graph, currentStep.id(), branch.event());
if (branchTarget != null) {
instance.setCurrentStepId(branchTarget);
instance.updateContext(currentStep.id(), branch.event());
stateRepository.save(instance);
// Continue execution
if (!instance.isTerminal()) {
executeWorkflow(instance, graph, execution);
}
} else {
throw new IllegalStateException(
"No branch target found for event: " + branch.event().getClass()
);
}
}
/**
* Updates async state with completion information.
*/
private void updateAsyncStateCompletion(String messageId, String asyncTaskId, StepResult<?> handlerResult) {
asyncStepStateRepository.findByMessageId(messageId).ifPresent(state -> {
state.complete(handlerResult);
asyncStepStateRepository.save(state);
WorkflowEvent completedEvent = WorkflowEvent.completed(Map.of(
"taskId", asyncTaskId,
"status", "completed"
));
progressTracker.updateExecutionStatus(asyncTaskId, completedEvent);
});
}
/**
* Handles async task execution errors.
*/
private void handleAsyncTaskError(WorkflowInstance instance, StepNode currentStep,
String asyncTaskId, Throwable error, WorkflowExecution<?> execution) {
log.error("Async task {} completed with error", asyncTaskId, error);
progressTracker.onError(asyncTaskId, error);
// Update async state with error
Optional<SuspensionData> errorSuspensionData = suspensionDataRepository.findByInstanceId(instance.getInstanceId());
String errorMessageId = errorSuspensionData.map(SuspensionData::messageId).orElse(null);
if (errorMessageId != null) {
asyncStepStateRepository.findByMessageId(errorMessageId).ifPresent(state -> {
state.fail(error);
asyncStepStateRepository.save(state);
});
}
// Update instance state
instance.fail(error, currentStep.id());
stateRepository.save(instance);
// Complete execution exceptionally
execution.future.completeExceptionally(error);
// Notify listeners
notifyListeners(l -> l.onStepFailed(instance, currentStep.id(), error));
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/core/WorkflowExecutionListener.java
|
package ai.driftkit.workflow.engine.core;
import ai.driftkit.workflow.engine.persistence.WorkflowInstance;
/**
* Listener interface for workflow execution events.
* Implementations can react to various workflow lifecycle events.
*/
public interface WorkflowExecutionListener {
/**
* Called when a workflow starts execution.
*
* @param instance The workflow instance that started
*/
default void onWorkflowStarted(WorkflowInstance instance) {
// Default no-op implementation
}
/**
* Called when a workflow completes successfully.
*
* @param instance The completed workflow instance
*/
default void onWorkflowCompleted(WorkflowInstance instance) {
// Default no-op implementation
}
/**
* Called when a workflow fails with an error.
*
* @param instance The failed workflow instance
* @param error The error that caused the failure
*/
default void onWorkflowFailed(WorkflowInstance instance, Throwable error) {
// Default no-op implementation
}
/**
* Called when a suspended workflow is resumed.
*
* @param instance The resumed workflow instance
*/
default void onWorkflowResumed(WorkflowInstance instance) {
// Default no-op implementation
}
/**
* Called when a workflow is cancelled.
*
* @param instance The cancelled workflow instance
*/
default void onWorkflowCancelled(WorkflowInstance instance) {
// Default no-op implementation
}
/**
* Called before a step executes.
*
* @param instance The workflow instance
* @param stepId The ID of the step about to execute
*/
default void onStepStarted(WorkflowInstance instance, String stepId) {
// Default no-op implementation
}
/**
* Called after a step completes.
*
* @param instance The workflow instance
* @param stepId The ID of the completed step
* @param result The step result
*/
default void onStepCompleted(WorkflowInstance instance, String stepId, StepResult<?> result) {
// Default no-op implementation
}
/**
* Called when a step fails.
*
* @param instance The workflow instance
* @param stepId The ID of the failed step
* @param error The error that occurred
*/
default void onStepFailed(WorkflowInstance instance, String stepId, Throwable error) {
// Default no-op implementation
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/core/WorkflowExecutor.java
|
package ai.driftkit.workflow.engine.core;
import ai.driftkit.common.service.ChatStore;
import ai.driftkit.workflow.engine.async.ProgressTracker;
import ai.driftkit.workflow.engine.domain.WorkflowEngineConfig;
import ai.driftkit.workflow.engine.graph.StepNode;
import ai.driftkit.workflow.engine.graph.WorkflowGraph;
import ai.driftkit.workflow.engine.persistence.WorkflowInstance;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.collections4.CollectionUtils;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
/**
* Handles the execution of individual workflow steps.
* This component is responsible for preparing step inputs, invoking step logic,
* and recording execution results.
*/
@Slf4j
public class WorkflowExecutor {
private final WorkflowEngineConfig config;
private final ProgressTracker progressTracker;
private final InputPreparer inputPreparer;
private final List<ExecutionInterceptor> interceptors;
private final RetryExecutor retryExecutor;
public WorkflowExecutor(WorkflowEngineConfig config, ProgressTracker progressTracker, ChatStore chatStore) {
this.config = config;
this.progressTracker = progressTracker;
this.inputPreparer = new InputPreparer();
this.interceptors = new ArrayList<>();
this.retryExecutor = config.getRetryExecutor() != null ?
config.getRetryExecutor() : new RetryExecutor();
// Add chat tracking interceptor if ChatStore is available
if (chatStore != null) {
this.interceptors.add(new ChatTrackingInterceptor(chatStore));
}
}
/**
* Executes a single workflow step.
*
* @param instance The workflow instance
* @param step The step to execute
* @param graph The workflow graph
* @return The step result
* @throws Exception if execution fails
*/
public StepResult<?> executeStep(WorkflowInstance instance,
StepNode step,
WorkflowGraph<?, ?> graph) throws Exception {
// Delegate to retry executor
return retryExecutor.executeWithRetry(instance, step, (inst, stp) -> {
return executeStepInternal(inst, stp, graph);
});
}
/**
* Internal step execution logic without retry.
*/
private StepResult<?> executeStepInternal(WorkflowInstance instance,
StepNode step,
WorkflowGraph<?, ?> graph) throws Exception {
String stepId = step.id();
log.debug("Executing step: {} (instance: {})", stepId, instance.getInstanceId());
long startTime = System.currentTimeMillis();
Object input = null;
try {
// Prepare input for the step
input = inputPreparer.prepareStepInput(instance, step);
log.debug("Step {} expects input type: {}, prepared input: {} (type: {})",
stepId,
step.executor().getInputType() != null ? step.executor().getInputType().getName() : "any",
input,
input != null ? input.getClass().getName() : "null");
// Call interceptors before execution
notifyBeforeStep(instance, step, input);
// Check if any interceptor wants to override the execution
Optional<StepResult<?>> interceptedResult = checkInterceptors(instance, step, input);
StepResult<?> stepResult;
if (interceptedResult.isPresent()) {
// Use the intercepted result instead of executing the step
stepResult = interceptedResult.get();
log.debug("Step execution intercepted for: {} with result type: {}",
stepId, stepResult.getClass().getSimpleName());
} else {
// Execute the step normally
Object result = step.executor().execute(input, instance.getContext());
// Auto-wrap non-StepResult values
if (result instanceof StepResult) {
stepResult = (StepResult<?>) result;
} else {
// Automatically wrap plain values in StepResult.continueWith()
log.debug("Auto-wrapping step result of type {} in StepResult.continueWith()",
result != null ? result.getClass().getSimpleName() : "null");
stepResult = StepResult.continueWith(result);
}
}
long duration = System.currentTimeMillis() - startTime;
// Record execution
instance.recordStepExecution(stepId, input, stepResult, duration, true);
// Call interceptors after execution
notifyAfterStep(instance, step, stepResult);
log.debug("Step completed: {} in {}ms", stepId, duration);
return stepResult;
} catch (Exception e) {
long duration = System.currentTimeMillis() - startTime;
instance.recordStepExecution(stepId, input, null, duration, false);
// Call interceptors on error
notifyOnError(instance, step, e);
throw e;
}
}
/**
* Adds an execution interceptor.
*/
public void addInterceptor(ExecutionInterceptor interceptor) {
if (interceptor != null) {
interceptors.add(interceptor);
}
}
/**
* Gets the retry executor used by this workflow executor.
*
* @return The retry executor
*/
public RetryExecutor getRetryExecutor() {
return retryExecutor;
}
/**
* Removes an execution interceptor.
*/
public void removeInterceptor(ExecutionInterceptor interceptor) {
interceptors.remove(interceptor);
}
private void notifyBeforeStep(WorkflowInstance instance, StepNode step, Object input) {
if (CollectionUtils.isEmpty(interceptors)) {
return;
}
for (ExecutionInterceptor interceptor : interceptors) {
try {
interceptor.beforeStep(instance, step, input);
} catch (Exception e) {
log.warn("Interceptor {} failed in beforeStep", interceptor.getClass().getSimpleName(), e);
}
}
}
private void notifyAfterStep(WorkflowInstance instance, StepNode step, StepResult<?> result) {
if (CollectionUtils.isEmpty(interceptors)) {
return;
}
for (ExecutionInterceptor interceptor : interceptors) {
try {
interceptor.afterStep(instance, step, result);
} catch (Exception e) {
log.warn("Interceptor {} failed in afterStep", interceptor.getClass().getSimpleName(), e);
}
}
}
private void notifyOnError(WorkflowInstance instance, StepNode step, Exception error) {
if (CollectionUtils.isEmpty(interceptors)) {
return;
}
for (ExecutionInterceptor interceptor : interceptors) {
try {
interceptor.onStepError(instance, step, error);
} catch (Exception e) {
log.warn("Interceptor {} failed in onStepError", interceptor.getClass().getSimpleName(), e);
}
}
}
private Optional<StepResult<?>> checkInterceptors(WorkflowInstance instance, StepNode step, Object input) {
if (CollectionUtils.isEmpty(interceptors)) {
return Optional.empty();
}
for (ExecutionInterceptor interceptor : interceptors) {
try {
Optional<StepResult<?>> result = interceptor.interceptExecution(instance, step, input);
if (result.isPresent()) {
return result;
}
} catch (Exception e) {
log.warn("Interceptor {} failed in interceptExecution", interceptor.getClass().getSimpleName(), e);
}
}
return Optional.empty();
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/core/WorkflowOrchestrator.java
|
package ai.driftkit.workflow.engine.core;
import ai.driftkit.workflow.engine.builder.InternalRoutingMarker;
import ai.driftkit.workflow.engine.domain.SuspensionData;
import ai.driftkit.workflow.engine.domain.WorkflowException;
import ai.driftkit.workflow.engine.graph.StepNode;
import ai.driftkit.workflow.engine.graph.WorkflowGraph;
import ai.driftkit.workflow.engine.persistence.SuspensionDataRepository;
import ai.driftkit.workflow.engine.persistence.WorkflowInstance;
import ai.driftkit.workflow.engine.persistence.WorkflowInstance.WorkflowStatus;
import ai.driftkit.workflow.engine.schema.SchemaUtils;
import ai.driftkit.common.service.ChatStore;
import ai.driftkit.common.domain.chat.ChatMessage.MessageType;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import java.util.Map;
/**
* Orchestrates the execution of workflows by coordinating between various components.
* This class contains the main workflow execution logic that was previously in WorkflowEngine.
*/
@Slf4j
@RequiredArgsConstructor
public class WorkflowOrchestrator {
private final WorkflowStateManager stateManager;
private final WorkflowExecutor executor;
private final StepRouter router;
private final InputPreparer inputPreparer;
private final SuspensionDataRepository suspensionDataRepository;
private final ChatStore chatStore;
/**
* Orchestrates the execution of a workflow instance.
*
* @param instance The workflow instance to execute
* @param graph The workflow graph definition
* @param execution The execution handle for completion notification
* @param engine The workflow engine reference for async callbacks
* @param <R> The return type of the workflow
*/
public <R> void orchestrateExecution(WorkflowInstance instance,
WorkflowGraph<?, R> graph,
WorkflowEngine.WorkflowExecution<R> execution,
WorkflowEngine engine) {
log.debug("Starting workflow execution: {} (instance: {})",
graph.id(), instance.getInstanceId());
try {
while (!instance.isTerminal() &&
instance.getStatus() != WorkflowStatus.SUSPENDED) {
String currentStepId = instance.getCurrentStepId();
StepNode currentStep = graph.getNode(currentStepId)
.orElseThrow(() -> new IllegalStateException(
"Step not found in graph: " + currentStepId
));
// Execute the current step
StepResult<?> result = executor.executeStep(instance, currentStep, graph);
// Process the result
processStepResult(instance, graph, currentStep, result, execution, engine);
// Check if workflow was suspended during result processing
if (instance.getStatus() == WorkflowStatus.SUSPENDED) {
log.debug("Workflow suspended after processing step result, exiting execution loop");
break;
}
}
// Handle terminal states
if (instance.getStatus() == WorkflowStatus.COMPLETED) {
WorkflowContext ctx = instance.getContext();
log.debug("Workflow completed. Context has {} step results, contains __final__: {}",
ctx.getStepCount(), ctx.hasStepResult(WorkflowContext.Keys.FINAL_RESULT));
if (ctx.hasStepResult(WorkflowContext.Keys.FINAL_RESULT)) {
R result = ctx.getStepResult(WorkflowContext.Keys.FINAL_RESULT, graph.outputType());
log.debug("Completing future with final result: {}", result);
execution.getFuture().complete(result);
} else {
log.warn("Workflow completed but no final result set. Completing with null.");
execution.getFuture().complete(null);
}
}
if (instance.getStatus() == WorkflowStatus.FAILED) {
WorkflowInstance.ErrorInfo errorInfo = instance.getErrorInfo();
Throwable error;
if (errorInfo != null) {
error = new WorkflowException(
errorInfo.errorMessage(),
new Exception(errorInfo.errorType())
);
} else {
error = new WorkflowException("Workflow failed without specific error");
}
execution.getFuture().completeExceptionally(error);
}
// Suspended workflows don't complete the future - they wait for resume
} catch (Exception e) {
log.error("Workflow execution failed for instance: {}", instance.getInstanceId(), e);
stateManager.failInstance(instance, e, instance.getCurrentStepId());
execution.getFuture().completeExceptionally(e);
}
}
/**
* Processes the result of a step execution.
*
* @param instance The workflow instance
* @param graph The workflow graph
* @param currentStep The step that was just executed
* @param result The result of the step execution
* @param execution The execution handle
* @param engine The workflow engine reference for async handling
* @param <R> The return type of the workflow
*/
public <R> void processStepResult(WorkflowInstance instance,
WorkflowGraph<?, R> graph,
StepNode currentStep,
StepResult<?> result,
WorkflowEngine.WorkflowExecution<R> execution,
WorkflowEngine engine) {
switch (result) {
case StepResult.Continue<?> cont -> {
// Store the output
instance.updateContext(currentStep.id(), cont.data());
// Find next step using enhanced type-based resolution
String nextStepId = router.findNextStep(graph, currentStep.id(), cont.data());
if (nextStepId != null) {
instance.setCurrentStepId(nextStepId);
stateManager.saveInstance(instance);
} else {
log.warn("No next step found for Continue from: {} (data type: {})",
currentStep.id(),
cont.data() != null ? cont.data().getClass().getSimpleName() : "null");
instance.fail(new IllegalStateException(
"No next step found after " + currentStep.id() +
" for data type: " + (cont.data() != null ? cont.data().getClass().getName() : "null")),
currentStep.id());
stateManager.saveInstance(instance);
}
}
case StepResult.Suspend<?> susp -> {
// Get the step input that was passed to this step
Object stepInput = inputPreparer.prepareStepInput(instance, currentStep);
// Generate schema for next input class to register it
if (susp.nextInputClass() != null) {
SchemaUtils.getSchemaFromClass(susp.nextInputClass());
String schemaId = SchemaUtils.getSchemaId(susp.nextInputClass());
log.debug("Registered schema for class {} with ID: {}",
susp.nextInputClass().getName(), schemaId);
}
// Create suspension data with type preservation
SuspensionData suspensionData = SuspensionData.create(
susp.promptToUser(),
susp.metadata(),
stepInput,
currentStep.id(),
susp.nextInputClass()
);
// Suspend the workflow
instance.suspend();
// Save suspension data to repository
suspensionDataRepository.save(instance.getInstanceId(), suspensionData);
// Store the result from this step as it may contain data to return to user
instance.updateContext(currentStep.id(), susp.promptToUser());
// Auto-track suspended message in ChatStore if available
if (chatStore != null && instance.getInstanceId() != null) {
Object promptData = susp.promptToUser();
if (promptData != null) {
Map<String, String> properties = SchemaUtils.extractProperties(promptData);
chatStore.add(instance.getInstanceId(), properties, MessageType.AI);
log.info("Auto-tracked suspended message to ChatStore for instance: {} with properties: {}",
instance.getInstanceId(), properties);
} else {
log.warn("No prompt data to track for suspended workflow: {}", instance.getInstanceId());
}
} else {
log.debug("ChatStore not available or instance ID is null. ChatStore: {}, InstanceId: {}",
chatStore, instance.getInstanceId());
}
stateManager.saveInstance(instance);
}
case StepResult.Branch<?> branch -> {
// Find next step based on event type
String nextStepId = router.findBranchTarget(graph, currentStep.id(), branch.event());
if (nextStepId != null) {
instance.setCurrentStepId(nextStepId);
// Only store the branch event if it's not an internal routing marker
Object branchEvent = branch.event();
if (!(branchEvent instanceof InternalRoutingMarker)) {
// This is actual data to be used by subsequent steps
instance.updateContext(currentStep.id(), branchEvent);
}
// For internal routing objects, don't store them - let InputPreparer
// find the previous suitable output
stateManager.saveInstance(instance);
} else {
throw new IllegalStateException(
"No branch target found for event type: " +
branch.event().getClass().getName()
);
}
}
case StepResult.Finish<?> finish -> {
// Workflow completed successfully
log.debug("Processing Finish result with value: {}", finish.result());
instance.updateContext(WorkflowContext.Keys.FINAL_RESULT, finish.result());
// Auto-track finish message in ChatStore if available
if (chatStore != null && instance.getInstanceId() != null) {
Object finishData = finish.result();
if (finishData != null) {
chatStore.add(instance.getInstanceId(),
SchemaUtils.extractProperties(finishData),
MessageType.AI);
log.debug("Auto-tracked finish message to ChatStore for instance: {}",
instance.getInstanceId());
}
}
instance.updateStatus(WorkflowStatus.COMPLETED);
stateManager.saveInstance(instance);
log.debug("Workflow completed, final result stored under __final__");
}
case StepResult.Fail<?> fail -> {
// Workflow failed
instance.fail(fail.error(), currentStep.id());
stateManager.saveInstance(instance);
}
case StepResult.Async<?> async -> {
// Delegate to engine's async handling method
engine.handleAsyncStep(instance, graph, currentStep, async, execution);
}
}
}
/**
* Gets the final result from a completed workflow instance.
*
* @param instance The workflow instance
* @param graph The workflow graph
* @return The final result or null if not available
*/
public <R> R getFinalResult(WorkflowInstance instance, WorkflowGraph<?, R> graph) {
if (instance.getStatus() == WorkflowStatus.COMPLETED) {
WorkflowContext ctx = instance.getContext();
if (ctx.hasStepResult(WorkflowContext.Keys.FINAL_RESULT)) {
return ctx.getStepResult(WorkflowContext.Keys.FINAL_RESULT, graph.outputType());
}
}
return null;
}
/**
* Creates a Throwable from workflow error info.
*
* @param errorInfo The error info from workflow instance
* @return A Throwable representing the error
*/
public Throwable createErrorFromInfo(WorkflowInstance.ErrorInfo errorInfo) {
if (errorInfo != null) {
return new WorkflowException(
errorInfo.errorMessage(),
new Exception(errorInfo.errorType())
);
} else {
return new WorkflowException("Workflow failed without specific error");
}
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/core/WorkflowStateManager.java
|
package ai.driftkit.workflow.engine.core;
import ai.driftkit.workflow.engine.graph.StepNode;
import ai.driftkit.workflow.engine.graph.WorkflowGraph;
import ai.driftkit.workflow.engine.persistence.WorkflowInstance;
import ai.driftkit.workflow.engine.persistence.WorkflowStateRepository;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import java.util.Map;
import java.util.Optional;
/**
* Manages workflow instance state and persistence.
* This component handles creation, loading, saving, and state transitions
* of workflow instances.
*/
@Slf4j
@RequiredArgsConstructor
public class WorkflowStateManager {
private final WorkflowStateRepository stateRepository;
/**
* Creates a new workflow instance for a fresh run.
*/
public WorkflowInstance createInstance(String workflowId,
String version,
Object input,
Map<String, Object> initialContext,
String initialStepId) {
// Create a temporary context with initial data
WorkflowContext context = WorkflowContext.newRun(input);
if (initialContext != null) {
context.setStepOutputs(initialContext);
}
// Create instance manually since we don't have the full graph
WorkflowInstance instance = WorkflowInstance.builder()
.instanceId(context.getRunId())
.workflowId(workflowId)
.workflowVersion(version)
.context(context)
.status(WorkflowInstance.WorkflowStatus.RUNNING)
.currentStepId(initialStepId)
.createdAt(System.currentTimeMillis())
.updatedAt(System.currentTimeMillis())
.build();
stateRepository.save(instance);
log.debug("Created new workflow instance: {} for workflow: {}",
instance.getInstanceId(), workflowId);
return instance;
}
/**
* Loads an existing workflow instance.
*/
public Optional<WorkflowInstance> loadInstance(String runId) {
return stateRepository.load(runId);
}
/**
* Saves the current state of a workflow instance.
*/
public void saveInstance(WorkflowInstance instance) {
stateRepository.save(instance);
}
/**
* Updates instance after successful step execution.
*/
public void updateAfterStepExecution(WorkflowInstance instance,
StepNode step,
StepResult<?> result) {
// Update context with step output (for Continue results)
if (result instanceof StepResult.Continue<?> cont) {
instance.updateContext(step.id(), cont.data());
} else if (result instanceof StepResult.Suspend<?> susp) {
// Store the suspend prompt as step result
instance.updateContext(step.id(), susp.promptToUser());
} else if (result instanceof StepResult.Branch<?> branch) {
// Store the branch event
instance.updateContext(step.id(), branch.event());
}
// Save state after update
saveInstance(instance);
}
/**
* Suspends a workflow instance.
*/
public void suspendInstance(WorkflowInstance instance) {
instance.suspend();
saveInstance(instance);
log.debug("Workflow suspended: {} at step: {}",
instance.getInstanceId(), instance.getCurrentStepId());
}
/**
* Resumes a suspended workflow instance.
*/
public void resumeInstance(WorkflowInstance instance, Object userInput) {
if (instance.getStatus() != WorkflowInstance.WorkflowStatus.SUSPENDED) {
throw new IllegalStateException(
"Cannot resume workflow that is not suspended: " + instance.getInstanceId()
);
}
// Store user input in context
if (userInput != null) {
instance.updateContext(WorkflowContext.Keys.USER_INPUT, userInput);
instance.updateContext(WorkflowContext.Keys.USER_INPUT_TYPE,
userInput.getClass().getName());
}
instance.resume();
saveInstance(instance);
log.debug("Workflow resumed: {}", instance.getInstanceId());
}
/**
* Marks workflow as completed.
*/
public void completeInstance(WorkflowInstance instance, Object finalResult) {
instance.updateContext(WorkflowContext.Keys.FINAL_RESULT, finalResult);
instance.updateStatus(WorkflowInstance.WorkflowStatus.COMPLETED);
saveInstance(instance);
log.debug("Workflow completed: {} with result type: {}",
instance.getInstanceId(),
finalResult != null ? finalResult.getClass().getSimpleName() : "null");
}
/**
* Marks workflow as failed.
*/
public void failInstance(WorkflowInstance instance, Throwable error, String stepId) {
instance.fail(error, stepId);
saveInstance(instance);
log.error("Workflow failed: {} at step: {}", instance.getInstanceId(), stepId, error);
}
/**
* Updates the current step of the workflow.
*/
public void updateCurrentStep(WorkflowInstance instance, String stepId) {
instance.setCurrentStepId(stepId);
saveInstance(instance);
}
/**
* Finds a step that can handle the given input type after suspension.
*/
public Optional<String> findStepForResumeInput(WorkflowInstance instance,
Class<?> inputType,
WorkflowGraph<?, ?> graph) {
String currentStepId = instance.getCurrentStepId();
// Check outgoing edges first
var edges = graph.getOutgoingEdges(currentStepId);
for (var edge : edges) {
var targetStep = graph.getNode(edge.toStepId());
if (targetStep.isPresent() && targetStep.get().canAcceptInput(inputType)) {
return Optional.of(edge.toStepId());
}
}
// Search all nodes if no direct edge found
for (var node : graph.nodes().values()) {
if (!node.id().equals(currentStepId) && node.canAcceptInput(inputType)) {
return Optional.of(node.id());
}
}
return Optional.empty();
}
/**
* Marks workflow as failed.
* Alias for failInstance for consistency with WorkflowEngine.
*/
public void markFailed(WorkflowInstance instance, Throwable error, String stepId) {
failInstance(instance, error, stepId);
}
/**
* Cancels a workflow instance.
*
* @param instance The workflow instance to cancel
*/
public void cancelInstance(WorkflowInstance instance) {
instance.updateStatus(WorkflowInstance.WorkflowStatus.CANCELLED);
stateRepository.save(instance);
log.info("Cancelled workflow instance: {}", instance.getInstanceId());
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/core/WorkflowTransitionResult.java
|
package ai.driftkit.workflow.engine.core;
import java.util.Map;
/**
* Special result type for transitioning from one workflow to another.
* This allows workflows to trigger other workflows, similar to WorkflowTransitionEvent
* in the old chat-framework.
*
* Usage example:
* <pre>
* return StepResult.finish(new WorkflowTransitionResult(
* "target-workflow-id",
* Map.of("key", "value")
* ));
* </pre>
*
* The workflow engine or controller should check if the finish result is of this type
* and handle the transition accordingly.
*/
public record WorkflowTransitionResult(
String targetWorkflowId,
Map<String, Object> transitionData
) {
public WorkflowTransitionResult {
if (targetWorkflowId == null || targetWorkflowId.isBlank()) {
throw new IllegalArgumentException("targetWorkflowId cannot be null or blank");
}
if (transitionData == null) {
transitionData = Map.of();
}
}
/**
* Creates a transition result with empty transition data.
*/
public WorkflowTransitionResult(String targetWorkflowId) {
this(targetWorkflowId, Map.of());
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/domain/AsyncStepState.java
|
package ai.driftkit.workflow.engine.domain;
import ai.driftkit.workflow.engine.core.StepResult;
import lombok.Builder;
import lombok.Data;
import java.util.UUID;
/**
* Structured representation of an asynchronous step's execution state.
* This class encapsulates all state related to an async operation,
* eliminating the need for string-based keys in the context.
*/
@Data
@Builder
public class AsyncStepState {
/**
* The task ID - this is the @AsyncStep method ID (e.g., "executeTask").
* NOT unique per execution - same for all concurrent executions of the same async step.
*/
private final String taskId;
/**
* The unique message ID generated for external tracking.
* This is unique per execution and used to track specific async operations.
*/
private final String messageId;
/**
* The initial data object returned immediately to the user.
*/
private final Object initialData;
/**
* The current data representing the async operation's state.
* This is updated as the async operation progresses.
*/
private Object currentData;
/**
* The percentage of completion (0-100).
*/
private int percentComplete;
/**
* The current status message.
*/
private String statusMessage;
/**
* When the async operation started (timestamp in milliseconds).
*/
private final long startTime;
/**
* When the async operation completed (timestamp in milliseconds, 0 if not completed).
*/
private long completionTime;
/**
* The result data once the async operation completes.
*/
private Object resultData;
/**
* The final StepResult once the async operation completes.
*/
private StepResult<?> finalResult;
/**
* Error information if the async operation failed.
*/
private Throwable error;
/**
* The status of the async operation.
*/
private AsyncStatus status;
public enum AsyncStatus {
PENDING,
IN_PROGRESS,
COMPLETED,
FAILED,
CANCELLED
}
/**
* Creates a new AsyncStepState for a starting async operation.
* Generates a unique messageId for tracking this specific execution.
*/
public static AsyncStepState started(String taskId, Object initialData) {
// Generate unique message ID for this execution
String messageId = UUID.randomUUID().toString();
return AsyncStepState.builder()
.taskId(taskId)
.messageId(messageId)
.initialData(initialData)
.currentData(initialData)
.percentComplete(0)
.statusMessage("Started")
.startTime(System.currentTimeMillis())
.status(AsyncStatus.IN_PROGRESS)
.build();
}
/**
* Updates the progress of this async operation.
*/
public void updateProgress(int percentComplete, Object progressData) {
this.percentComplete = percentComplete;
this.currentData = progressData;
// Extract status message if progressData is a string
if (progressData instanceof String) {
this.statusMessage = (String) progressData;
}
}
/**
* Marks this async operation as completed.
*/
public void complete(Object resultData) {
this.status = AsyncStatus.COMPLETED;
this.percentComplete = 100;
this.statusMessage = "Completed";
this.resultData = resultData;
this.currentData = resultData;
this.completionTime = System.currentTimeMillis();
}
/**
* Marks this async operation as failed.
*/
public void fail(Throwable error) {
this.status = AsyncStatus.FAILED;
this.error = error;
this.completionTime = System.currentTimeMillis();
this.statusMessage = "Failed: " + error.getMessage();
}
/**
* Marks this async operation as cancelled.
*/
public void cancel() {
this.status = AsyncStatus.CANCELLED;
this.completionTime = System.currentTimeMillis();
this.statusMessage = "Cancelled";
}
/**
* Gets the duration of this async operation in milliseconds.
*/
public long getDurationMs() {
long endTime = completionTime > 0 ? completionTime : System.currentTimeMillis();
return endTime - startTime;
}
/**
* Checks if this async operation is still running.
*/
public boolean isRunning() {
return status == AsyncStatus.IN_PROGRESS;
}
/**
* Checks if this async operation has completed (successfully or not).
*/
public boolean isCompleted() {
return status == AsyncStatus.COMPLETED ||
status == AsyncStatus.FAILED ||
status == AsyncStatus.CANCELLED;
}
/**
* Gets the current data for conversion to ChatResponse.
* Returns currentData if set, otherwise returns initialData.
*/
public Object getCurrentData() {
return currentData != null ? currentData : initialData;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/domain/ChatSession.java
|
package ai.driftkit.workflow.engine.domain;
import ai.driftkit.workflow.engine.chat.ChatContextHelper;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.util.HashMap;
import java.util.Map;
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
public class ChatSession {
private String chatId;
private String userId;
private String name;
private String description;
private long createdAt;
private long lastMessageTime;
@Builder.Default
private Map<String, Object> metadata = new HashMap<>();
@Builder.Default
private boolean archived = false;
public static ChatSession create(String chatId, String userId, String name) {
long now = System.currentTimeMillis();
return ChatSession.builder()
.chatId(chatId)
.userId(userId)
.name(name)
.createdAt(now)
.lastMessageTime(now)
.build();
}
public ChatSession withLastMessageTime(Long time) {
this.lastMessageTime = time;
return this;
}
public ChatSession archive() {
this.archived = true;
return this;
}
}
|
0
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine
|
java-sources/ai/driftkit/driftkit-workflow-engine-core/0.8.1/ai/driftkit/workflow/engine/domain/PageRequest.java
|
package ai.driftkit.workflow.engine.domain;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
/**
* Simple page request abstraction without Spring dependencies.
* Represents pagination parameters.
*/
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
public class PageRequest {
@Builder.Default
private int pageNumber = 0;
@Builder.Default
private int pageSize = 20;
@Builder.Default
private String sortBy = "id";
@Builder.Default
private SortDirection sortDirection = SortDirection.ASC;
public long getOffset() {
return (long) pageNumber * pageSize;
}
public static PageRequest of(int pageNumber, int pageSize) {
return PageRequest.builder()
.pageNumber(pageNumber)
.pageSize(pageSize)
.build();
}
public static PageRequest of(int pageNumber, int pageSize, String sortBy, SortDirection sortDirection) {
return PageRequest.builder()
.pageNumber(pageNumber)
.pageSize(pageSize)
.sortBy(sortBy)
.sortDirection(sortDirection)
.build();
}
public enum SortDirection {
ASC, DESC
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.