index
int64 | repo_id
string | file_path
string | content
string |
|---|---|---|---|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/connector
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/connector/sql/StorageObject.java
|
package ai.knowly.langtorch.connector.sql;
/** Shared interface for objects reading with SQL connector. */
public interface StorageObject {}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/example/ExampleUtils.java
|
package ai.knowly.langtorch.example;
import ai.knowly.langtorch.capability.Capability;
import com.google.common.flogger.FluentLogger;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
public class ExampleUtils {
private ExampleUtils() {}
static void readInputUntilEXIT(FluentLogger logger, Capability<String, String> capability)
throws IOException {
BufferedReader reader = new BufferedReader(new InputStreamReader(System.in));
String input;
final String sentinel = "EXIT"; // Define a sentinel value to exit the loop
logger.atInfo().log("Type '%s' and press Enter to exit the application.%n", sentinel);
while (true) {
input = reader.readLine();
if (input == null || sentinel.equalsIgnoreCase(input)) {
break; // Exit the loop if the user types the sentinel value
}
logger.atInfo().log("User: " + input);
String assistantMsg = capability.run(input);
logger.atInfo().log("Assistant: " + assistantMsg);
}
logger.atInfo().log("Exiting the application.");
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/example/SimpleChatBotWithExplicitKey.java
|
package ai.knowly.langtorch.example;
import static ai.knowly.langtorch.example.ExampleUtils.readInputUntilEXIT;
import ai.knowly.langtorch.capability.integration.openai.SimpleChatCapability;
import ai.knowly.langtorch.hub.LangtorchHub;
import ai.knowly.langtorch.hub.LangtorchHubModuleRegistry;
import ai.knowly.langtorch.hub.module.token.TokenUsage;
import ai.knowly.langtorch.hub.schema.OpenAIKeyConfig;
import com.google.common.flogger.FluentLogger;
import java.io.IOException;
public class SimpleChatBotWithExplicitKey {
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
public static void main(String[] args) throws IOException {
String openAIKey = "YOUR_OPENAI_API_KEY";
LangtorchHubModuleRegistry registry = LangtorchHubModuleRegistry.create();
registry.registerOpenAiModule(OpenAIKeyConfig.createOpenConfigWithApiKey(openAIKey));
LangtorchHub langtorchHub = new LangtorchHub(registry);
SimpleChatCapability chatBot = langtorchHub.getInstance(SimpleChatCapability.class);
readInputUntilEXIT(logger, chatBot);
TokenUsage tokenUsage = langtorchHub.getTokenUsage();
logger.atInfo().log(
"Prompt token usage: %s, Completion token usage: %s",
tokenUsage.getPromptTokenUsage(), tokenUsage.getCompletionTokenUsage());
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/example/SimpleChatBotWithImplicitKey.java
|
package ai.knowly.langtorch.example;
import static ai.knowly.langtorch.example.ExampleUtils.readInputUntilEXIT;
import ai.knowly.langtorch.capability.integration.openai.SimpleChatCapability;
import ai.knowly.langtorch.hub.LangtorchHub;
import ai.knowly.langtorch.hub.LangtorchHubModuleRegistry;
import ai.knowly.langtorch.hub.module.token.TokenUsage;
import ai.knowly.langtorch.hub.schema.OpenAIKeyConfig;
import com.google.common.flogger.FluentLogger;
import java.io.IOException;
public class SimpleChatBotWithImplicitKey {
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
public static void main(String[] args) throws IOException {
LangtorchHubModuleRegistry registry = LangtorchHubModuleRegistry.create();
registry.registerOpenAiModule(OpenAIKeyConfig.createOpenConfigReadFromEnv());
LangtorchHub langtorchHub = new LangtorchHub(registry);
SimpleChatCapability chatBot = langtorchHub.getInstance(SimpleChatCapability.class);
readInputUntilEXIT(logger, chatBot);
TokenUsage tokenUsage = langtorchHub.getTokenUsage();
logger.atInfo().log(
"Prompt token usage: %s, Completion token usage: %s",
tokenUsage.getPromptTokenUsage(), tokenUsage.getCompletionTokenUsage());
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/hub/LangtorchHub.java
|
package ai.knowly.langtorch.hub;
import ai.knowly.langtorch.hub.module.token.TokenUsage;
import com.google.inject.Guice;
import com.google.inject.Injector;
import javax.inject.Inject;
/** LangtorchHub is the entry point for the Langtorch library. */
public class LangtorchHub {
private final Injector injector;
@Inject
public LangtorchHub(LangtorchHubModuleRegistry registry) {
this.injector = Guice.createInjector(registry.getModules());
}
public <T> T getInstance(Class<T> clazz) {
return injector.getInstance(clazz);
}
public TokenUsage getTokenUsage() {
return injector.getInstance(TokenUsage.class);
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/hub/LangtorchHubModuleRegistry.java
|
package ai.knowly.langtorch.hub;
import ai.knowly.langtorch.hub.module.token.OpenAITokenModule;
import ai.knowly.langtorch.hub.schema.OpenAIKeyConfig;
import ai.knowly.langtorch.llm.openai.modules.key.OpenAIServiceConfigWithExplicitAPIKeyModule;
import ai.knowly.langtorch.llm.openai.modules.key.OpenAIServiceConfigWithImplicitAPIKeyModule;
import ai.knowly.langtorch.processor.openai.chat.OpenAIChatProcessorConfig;
import ai.knowly.langtorch.store.memory.conversation.ConversationMemory;
import com.google.inject.AbstractModule;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
public final class LangtorchHubModuleRegistry extends AbstractModule {
private final List<AbstractModule> modules;
public static LangtorchHubModuleRegistry create() {
// TODO: Pass in args here and process them.
return new LangtorchHubModuleRegistry();
}
public List<AbstractModule> getModules() {
return modules;
}
/** Registers Open Ai related modules in langtorch hub. */
public void registerOpenAiModule(OpenAIKeyConfig config) {
modules.add(new OpenAITokenModule());
modules.add(getOpenAIModule(config));
modules.add(
new AbstractModule() {
@Override
protected void configure() {
bind(ConversationMemory.class).toInstance(ConversationMemory.getDefaultInstance());
bind(OpenAIChatProcessorConfig.class)
.toInstance(OpenAIChatProcessorConfig.getDefaultInstance());
}
});
}
private AbstractModule getOpenAIModule(OpenAIKeyConfig openAIKeyConfig) {
Optional<String> config = openAIKeyConfig.getOpenAiApiKey();
if (openAIKeyConfig.isReadFromEnvFile()) {
return new OpenAIServiceConfigWithImplicitAPIKeyModule();
}
if (config.isPresent()) {
return new OpenAIServiceConfigWithExplicitAPIKeyModule(config.get());
}
throw new IllegalArgumentException(
"OpenAI API key is not present. Please provide the API key in the config.");
}
private LangtorchHubModuleRegistry() {
this.modules = new ArrayList<>();
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/hub/module
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/hub/module/token/EnableOpenAITokenRecord.java
|
package ai.knowly.langtorch.hub.module.token;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.METHOD)
public @interface EnableOpenAITokenRecord {}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/hub/module
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/hub/module/token/OpenAITokenModule.java
|
package ai.knowly.langtorch.hub.module.token;
import com.google.inject.AbstractModule;
import com.google.inject.Provides;
import com.google.inject.Singleton;
import com.google.inject.matcher.Matchers;
import java.util.concurrent.atomic.AtomicLong;
public class OpenAITokenModule extends AbstractModule {
@Provides
@Singleton
public static TokenUsage provideTokenUsageContainer() {
return TokenUsage.builder()
.setPromptTokenUsage(new AtomicLong(0))
.setCompletionTokenUsage(new AtomicLong(0))
.build();
}
@Override
protected void configure() {
bindInterceptor(
Matchers.any(),
Matchers.annotatedWith(EnableOpenAITokenRecord.class),
new OpenAITokenUsageInterceptor(getProvider(TokenUsage.class)));
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/hub/module
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/hub/module/token/OpenAITokenUsageInterceptor.java
|
package ai.knowly.langtorch.hub.module.token;
import ai.knowly.langtorch.llm.openai.schema.dto.completion.CompletionResult;
import ai.knowly.langtorch.llm.openai.schema.dto.completion.chat.ChatCompletionResult;
import com.google.common.flogger.FluentLogger;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.SettableFuture;
import com.google.inject.Inject;
import com.google.inject.Provider;
import java.util.concurrent.Executors;
import org.aopalliance.intercept.MethodInterceptor;
import org.aopalliance.intercept.MethodInvocation;
public class OpenAITokenUsageInterceptor implements MethodInterceptor {
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
private final Provider<TokenUsage> tokenUsage;
@Inject
public OpenAITokenUsageInterceptor(Provider<TokenUsage> tokenUsage) {
this.tokenUsage = tokenUsage;
}
@Override
public Object invoke(MethodInvocation invocation) throws Throwable {
Object result = invocation.proceed();
if (result instanceof ListenableFuture) {
// Create a new SettableFuture to return
SettableFuture newFuture = SettableFuture.create();
ListenableFuture originalFuture = (ListenableFuture) result;
Futures.addCallback(
originalFuture,
new FutureCallback() {
@Override
public void onSuccess(Object result) {
if (result instanceof ChatCompletionResult) {
ChatCompletionResult chatCompletionResult = (ChatCompletionResult) result;
tokenUsage
.get()
.getPromptTokenUsage()
.getAndAdd(chatCompletionResult.getUsage().getPromptTokens());
tokenUsage
.get()
.getCompletionTokenUsage()
.addAndGet(chatCompletionResult.getUsage().getCompletionTokens());
}
if (result instanceof CompletionResult) {
CompletionResult completionResult = (CompletionResult) result;
tokenUsage
.get()
.getPromptTokenUsage()
.getAndAdd(completionResult.getUsage().getPromptTokens());
tokenUsage
.get()
.getCompletionTokenUsage()
.addAndGet(completionResult.getUsage().getCompletionTokens());
}
newFuture.set(result);
}
public void onFailure(Throwable thrown) {
logger.atWarning().withCause(thrown).log(
"Failed to add callback in OpenAITokenUsageInterceptor");
newFuture.setException(thrown);
}
},
Executors.newCachedThreadPool());
// Return newFuture instead of the original one
return newFuture;
}
return result;
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/hub/module
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/hub/module/token/TokenUsage.java
|
package ai.knowly.langtorch.hub.module.token;
import java.util.concurrent.atomic.AtomicLong;
import lombok.AccessLevel;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
@Data
@Builder(toBuilder = true, setterPrefix = "set")
@AllArgsConstructor(access = AccessLevel.PRIVATE)
public class TokenUsage {
@Builder.Default private AtomicLong promptTokenUsage = new AtomicLong(0);
@Builder.Default private AtomicLong completionTokenUsage = new AtomicLong(0);
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/hub
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/hub/schema/OpenAIKeyConfig.java
|
package ai.knowly.langtorch.hub.schema;
import java.util.Optional;
import lombok.AccessLevel;
import lombok.AllArgsConstructor;
import lombok.Data;
@Data
@AllArgsConstructor(access = AccessLevel.PRIVATE)
public class OpenAIKeyConfig {
private String openAiApiKey;
// Read the OpenAI API key from the .env file.
// If set, no need to set the openAiApiKey explicitly.
private boolean readFromEnvFile;
public Optional<String> getOpenAiApiKey() {
return Optional.ofNullable(openAiApiKey);
}
public static OpenAIKeyConfig createOpenConfigReadFromEnv() {
return new OpenAIKeyConfig(null, true);
}
public static OpenAIKeyConfig createOpenConfigWithApiKey(String apiKey) {
return new OpenAIKeyConfig(apiKey, false);
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/cohere/CohereAIApi.java
|
package ai.knowly.langtorch.llm.cohere;
import ai.knowly.langtorch.llm.cohere.schema.CohereGenerateRequest;
import ai.knowly.langtorch.llm.cohere.schema.CohereGenerateResponse;
import com.google.common.util.concurrent.ListenableFuture;
import retrofit2.http.Body;
import retrofit2.http.POST;
public interface CohereAIApi {
@POST("/v1/generate")
ListenableFuture<CohereGenerateResponse> generate(@Body CohereGenerateRequest request);
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/cohere/CohereAIService.java
|
package ai.knowly.langtorch.llm.cohere;
import ai.knowly.langtorch.llm.cohere.schema.CohereExecutionException;
import ai.knowly.langtorch.llm.cohere.schema.CohereGenerateRequest;
import ai.knowly.langtorch.llm.cohere.schema.CohereGenerateResponse;
import ai.knowly.langtorch.llm.cohere.schema.CohereHttpException;
import ai.knowly.langtorch.llm.cohere.schema.CohereInterruptedException;
import ai.knowly.langtorch.llm.cohere.schema.config.CohereAIServiceConfig;
import ai.knowly.langtorch.llm.cohere.serialization.CohereGenerateRequestAdapter;
import ai.knowly.langtorch.utils.future.retry.FutureRetrier;
import com.google.common.flogger.FluentLogger;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import com.google.inject.Inject;
import java.io.IOException;
import java.time.Duration;
import java.util.Objects;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import okhttp3.ConnectionPool;
import okhttp3.OkHttpClient;
import retrofit2.HttpException;
import retrofit2.Retrofit;
import retrofit2.adapter.guava.GuavaCallAdapterFactory;
import retrofit2.converter.gson.GsonConverterFactory;
public class CohereAIService {
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
private static final String BASE_URL = "https://api.cohere.ai/";
private static final Gson gson =
new GsonBuilder()
.registerTypeAdapter(CohereGenerateRequest.class, new CohereGenerateRequestAdapter())
.create();
private final CohereAIApi api;
private final FutureRetrier futureRetrier;
private final ScheduledExecutorService scheduledExecutor;
/** Creates a new CohereAPIService that wraps CohereApi */
@Inject
public CohereAIService(CohereAIServiceConfig config) {
this.api = buildApi(config);
scheduledExecutor = Executors.newSingleThreadScheduledExecutor();
this.futureRetrier =
new FutureRetrier(scheduledExecutor, config.backoffStrategy(), config.retryConfig());
}
public static <T> T execute(ListenableFuture<T> apiCall) {
try {
return apiCall.get();
} catch (InterruptedException e) {
// Restore the interrupt status
Thread.currentThread().interrupt();
// Optionally, log or handle the exception here.
logger.atSevere().withCause(e).log("Thread was interrupted during API call.");
throw new CohereInterruptedException(e);
} catch (ExecutionException e) {
if (e.getCause() instanceof HttpException) {
HttpException httpException = (HttpException) e.getCause();
try {
String errorBody = httpException.response().errorBody().string();
logger.atSevere().log("HTTP Error: %s", errorBody);
throw new CohereHttpException(errorBody, httpException);
} catch (IOException ioException) {
logger.atSevere().withCause(ioException).log("Error while reading errorBody");
}
}
throw new CohereExecutionException(e);
}
}
public static CohereAIApi buildApi(CohereAIServiceConfig config) {
Objects.requireNonNull(config.apiKey(), "Cohere token required");
OkHttpClient client = defaultClient(config.apiKey(), config.timeoutDuration());
Retrofit retrofit = defaultRetrofit(client, gson);
return retrofit.create(CohereAIApi.class);
}
public static OkHttpClient defaultClient(String token, Duration timeout) {
return new OkHttpClient.Builder()
.addInterceptor(new CohereAuthenticationInterceptor(token))
.connectionPool(new ConnectionPool(5, 1, TimeUnit.SECONDS))
.readTimeout(timeout.toMillis(), TimeUnit.MILLISECONDS)
.build();
}
public static Retrofit defaultRetrofit(OkHttpClient client, Gson gson) {
return new Retrofit.Builder()
.baseUrl(BASE_URL)
.client(client)
.addConverterFactory(GsonConverterFactory.create(gson))
.addCallAdapterFactory(GuavaCallAdapterFactory.create())
.build();
}
public CohereGenerateResponse generate(CohereGenerateRequest request) {
return execute(futureRetrier.runWithRetries(() -> api.generate(request), response -> true));
}
public ListenableFuture<CohereGenerateResponse> generateAsync(CohereGenerateRequest request) {
return futureRetrier.runWithRetries(() -> api.generate(request), response -> true);
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/cohere/CohereAuthenticationInterceptor.java
|
package ai.knowly.langtorch.llm.cohere;
import java.io.IOException;
import okhttp3.Interceptor;
import okhttp3.Request;
import okhttp3.Response;
public class CohereAuthenticationInterceptor implements Interceptor {
private final String token;
CohereAuthenticationInterceptor(String token) {
this.token = token;
}
@Override
public Response intercept(Chain chain) throws IOException {
Request request =
chain
.request()
.newBuilder()
.header("accept", "application/json")
.header("content-type", "application/json")
.header("authorization", "Bearer " + token)
.build();
return chain.proceed(request);
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/cohere
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/cohere/schema/CohereError.java
|
package ai.knowly.langtorch.llm.cohere.schema;
import com.google.auto.value.AutoValue;
@AutoValue
public abstract class CohereError {
public static Builder builder() {
return new AutoValue_CohereError.Builder();
}
public abstract Integer code();
public abstract String message();
@AutoValue.Builder
public abstract static class Builder {
public abstract Builder setCode(Integer code);
public abstract Builder setMessage(String message);
public abstract CohereError build();
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/cohere
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/cohere/schema/CohereExecutionException.java
|
package ai.knowly.langtorch.llm.cohere.schema;
import java.util.concurrent.ExecutionException;
public class CohereExecutionException extends RuntimeException {
public CohereExecutionException(ExecutionException e) {
super(e);
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/cohere
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/cohere/schema/CohereGenerateRequest.java
|
package ai.knowly.langtorch.llm.cohere.schema;
import com.google.auto.value.AutoValue;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@AutoValue
public abstract class CohereGenerateRequest {
private static final String DEFAULT_MODEL = "command";
public static Builder builder() {
return new AutoValue_CohereGenerateRequest.Builder()
.model(DEFAULT_MODEL)
.numGenerations(1)
.maxTokens(20)
.preset("")
.temperature(0.0)
.k(0)
.p(0.0)
.frequencyPenalty(0.0)
.presencePenalty(0.0)
.endSequences(new ArrayList<>())
.stopSequences(new ArrayList<>())
.logitBias(new HashMap<>())
.returnLikelihoods("NONE")
.truncate("END");
}
// Represents the prompt or text to be completed. Trailing whitespaces will be trimmed.
public abstract String prompt();
// The size of the model to generate with. Currently available models are command (default),
// command-nightly (experimental), command-light, and command-light-nightly (experimental).
// Smaller, "light" models are faster, while larger models will perform better. Custom models can
// also be supplied with their full ID.
public abstract String model();
// Defaults to 1, min value of 1, max value of 5. Denotes the maximum number of generations that
// will be returned.
public abstract Integer numGenerations();
// Denotes the number of tokens to predict per generation, defaults to 20. See BPE
// Tokens[https://docs.cohere.com/docs/tokens] for more details.
// Can only be set to 0 if return_likelihoods is set to ALL to get the likelihood of the prompt.
public abstract Integer maxTokens();
// The ID of a custom playground preset. You can create presets in the playground. If you use a
// preset, the prompt parameter becomes optional, and any included parameters will override the
// preset's parameters.
public abstract String preset();
// Min value of 0.0, max value of 5.0. A non-negative float that tunes the
// degree of randomness in generation. Lower temperatures mean less random generations. See
// Temperature for more details.
public abstract Double temperature();
// Defaults to 0(disabled), which is the minimum. Maximum value is 500. Ensures only the top k
// most likely tokens are considered for generation at each step.
public abstract Integer k();
// Set to 1.0 or 0 to disable. If set to a probability 0.0 < p < 1.0, it ensures
// that only the most likely tokens, with total probability mass of p, are considered for
// generation at each step. If both k and p are enabled, p acts after k.
public abstract Double p();
// Defaults to 0.0, min value of 0.0, max value of 1.0. Can be used to reduce repetitiveness of
// generated tokens. The higher the value, the stronger a penalty is applied to previously present
// tokens, proportional to how many times they have already appeared in the prompt or prior
// generation.
public abstract Double frequencyPenalty();
// Defaults to 0.0, min value of 0.0, max value of 1.0. Can be used to reduce repetitiveness of
// generated tokens. Similar to frequency_penalty, except that this penalty is applied equally to
// all tokens that have already appeared, regardless of their exact frequencies.
public abstract Double presencePenalty();
// The generated text will be cut at the beginning of the earliest occurrence of an end sequence.
// The sequence will be excluded from the text.
public abstract ImmutableList<String> endSequences();
// The generated text will be cut at the end of the earliest occurence of a stop sequence. The
// sequence will be included the text.
public abstract ImmutableList<String> stopSequences();
// One of GENERATION|ALL|NONE to specify how and if the token likelihoods are returned with the
// response. Defaults to NONE.
//
// If GENERATION is selected, the token likelihoods will only be provided for generated text.
//
// If ALL is selected, the token likelihoods will be provided both for the prompt and the
// generated text.
public abstract String returnLikelihoods();
// Used to prevent the model from generating unwanted tokens or to incentivize it to include
// desired tokens. The format is {token_id: bias} where bias is a float between -10 and 10. Tokens
// can be obtained from text using Tokenize.
//
// For example, if the value {'11': -10} is provided, the model will be very unlikely to include
// the token 11 ("\n", the newline character) anywhere in the generated text. In contrast {'11':
// 10} will result in generations that nearly only contain that token. Values between -10 and 10
// will proportionally affect the likelihood of the token appearing in the generated text.
//
// Note: logit bias may not be supported for all custom models.
public abstract ImmutableMap<String, Float> logitBias();
// One of NONE|START|END to specify how the API will handle inputs longer than the maximum token
// length.
//
// Passing START will discard the start of the input. END will discard the end of the input. In
// both cases, input is discarded until the remaining input is exactly the maximum input token
// length for the model.
//
// If NONE is selected, when the input exceeds the maximum input token length an error will be
// returned.
public abstract String truncate();
@AutoValue.Builder
public abstract static class Builder {
public abstract Builder prompt(String prompt);
public abstract Builder model(String model);
public abstract Builder numGenerations(Integer numGenerations);
public abstract Builder maxTokens(Integer maxTokens);
public abstract Builder preset(String preset);
public abstract Builder temperature(Double temperature);
public abstract Builder k(Integer k);
public abstract Builder p(Double p);
public abstract Builder frequencyPenalty(Double frequencyPenalty);
public abstract Builder presencePenalty(Double presencePenalty);
public abstract Builder endSequences(List<String> endSequences);
public abstract Builder stopSequences(List<String> stopSequences);
public abstract Builder returnLikelihoods(String returnLikelihoods);
public abstract Builder logitBias(Map<String, Float> logitBias);
public abstract Builder truncate(String truncate);
public abstract CohereGenerateRequest build();
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/cohere
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/cohere/schema/CohereGenerateResponse.java
|
package ai.knowly.langtorch.llm.cohere.schema;
import java.util.*;
public class CohereGenerateResponse {
private String id;
private List<Generation> generations;
private List<String> warnings;
private Map<String, Object> dynamicFields;
// common fields getters and setters
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public List<Generation> getGenerations() {
return generations;
}
public void setGenerations(List<Generation> generations) {
this.generations = generations;
}
public List<String> getWarnings() {
return warnings;
}
public void setWarnings(List<String> warnings) {
this.warnings = warnings;
}
// dynamic fields getters and setters
public Object getField(String key) {
if (dynamicFields != null) {
return dynamicFields.get(key);
}
return null;
}
public void setField(String key, Object value) {
if (dynamicFields == null) {
dynamicFields = new HashMap<>();
}
dynamicFields.put(key, value);
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/cohere
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/cohere/schema/CohereHttpException.java
|
package ai.knowly.langtorch.llm.cohere.schema;
public class CohereHttpException extends RuntimeException {
public CohereHttpException(String msg, Exception parent) {
super(msg, parent);
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/cohere
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/cohere/schema/CohereInterruptedException.java
|
package ai.knowly.langtorch.llm.cohere.schema;
public class CohereInterruptedException extends RuntimeException {
public CohereInterruptedException(InterruptedException e) {
super(e);
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/cohere
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/cohere/schema/Generation.java
|
package ai.knowly.langtorch.llm.cohere.schema;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class Generation {
private String id;
private String text;
private List<TokenLikelihood> tokenLikelihoods;
private Map<String, Object> dynamicFields;
// common fields getters and setters
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getText() {
return text;
}
public void setText(String text) {
this.text = text;
}
public List<TokenLikelihood> getTokenLikelihoods() {
return tokenLikelihoods;
}
public void setTokenLikelihoods(List<TokenLikelihood> tokenLikelihoods) {
this.tokenLikelihoods = tokenLikelihoods;
}
// dynamic fields getters and setters
public Object getField(String key) {
if (dynamicFields != null) {
return dynamicFields.get(key);
}
return null;
}
public void setField(String key, Object value) {
if (dynamicFields == null) {
dynamicFields = new HashMap<>();
}
dynamicFields.put(key, value);
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/cohere
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/cohere/schema/TokenLikelihood.java
|
package ai.knowly.langtorch.llm.cohere.schema;
public class TokenLikelihood {
private String token;
private double likelihood;
public String getToken() {
return token;
}
public void setToken(String token) {
this.token = token;
}
public double getLikelihood() {
return likelihood;
}
public void setLikelihood(double likelihood) {
this.likelihood = likelihood;
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/cohere/schema
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/cohere/schema/config/CohereAIServiceConfig.java
|
package ai.knowly.langtorch.llm.cohere.schema.config;
import ai.knowly.langtorch.utils.future.retry.RetryConfig;
import ai.knowly.langtorch.utils.future.retry.strategy.BackoffStrategy;
import ai.knowly.langtorch.utils.future.retry.strategy.ExponentialBackoffStrategy;
import com.google.auto.value.AutoValue;
import java.time.Duration;
@AutoValue
public abstract class CohereAIServiceConfig {
public static Builder builder() {
return new AutoValue_CohereAIServiceConfig.Builder()
.setTimeoutDuration(Duration.ofSeconds(10))
.setRetryConfig(RetryConfig.getDefaultInstance())
.setBackoffStrategy(new ExponentialBackoffStrategy());
}
public abstract String apiKey();
public abstract Duration timeoutDuration();
public abstract BackoffStrategy backoffStrategy();
public abstract RetryConfig retryConfig();
@AutoValue.Builder
public abstract static class Builder {
public abstract Builder setApiKey(String newApiKey);
public abstract Builder setTimeoutDuration(Duration newTimeoutDuration);
public abstract Builder setBackoffStrategy(BackoffStrategy newBackoffStrategy);
public abstract Builder setRetryConfig(RetryConfig newRetryConfig);
public abstract CohereAIServiceConfig build();
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/cohere
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/cohere/serialization/CohereGenerateRequestAdapter.java
|
package ai.knowly.langtorch.llm.cohere.serialization;
import ai.knowly.langtorch.llm.cohere.schema.CohereGenerateRequest;
import com.google.gson.TypeAdapter;
import com.google.gson.stream.JsonReader;
import com.google.gson.stream.JsonToken;
import com.google.gson.stream.JsonWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class CohereGenerateRequestAdapter extends TypeAdapter<CohereGenerateRequest> {
@Override
public void write(JsonWriter out, CohereGenerateRequest value) throws IOException {
out.beginObject();
out.name("prompt").value(value.prompt());
out.name("model").value(value.model());
out.name("num_generations").value(value.numGenerations());
out.name("max_tokens").value(value.maxTokens());
if (value.preset() != null && !value.preset().isEmpty()) {
out.name("preset").value(value.preset());
}
out.name("temperature").value(value.temperature());
out.name("k").value(value.k());
out.name("p").value(value.p());
out.name("frequency_penalty").value(value.frequencyPenalty());
out.name("presence_penalty").value(value.presencePenalty());
if (value.endSequences() != null && !value.endSequences().isEmpty()) {
out.name("end_sequences").beginArray();
for (String endSequence : value.endSequences()) {
out.value(endSequence);
}
out.endArray();
}
if (value.stopSequences() != null && !value.stopSequences().isEmpty()) {
out.name("stop_sequences").beginArray();
for (String stopSequence : value.stopSequences()) {
out.value(stopSequence);
}
out.endArray();
}
out.name("return_likelihoods").value(value.returnLikelihoods());
if (value.logitBias() != null && !value.logitBias().entrySet().isEmpty()) {
out.name("logit_bias").beginObject();
for (Map.Entry<String, Float> entry : value.logitBias().entrySet()) {
out.name(entry.getKey()).value(entry.getValue());
}
out.endObject();
}
out.name("truncate").value(value.truncate());
out.endObject();
}
@Override
public CohereGenerateRequest read(JsonReader in) throws IOException {
if (in.peek() == JsonToken.NULL) {
in.nextNull();
return null;
}
in.beginObject();
CohereGenerateRequest.Builder builder = CohereGenerateRequest.builder();
while (in.hasNext()) {
String name = in.nextName();
switch (name) {
case "prompt":
builder.prompt(in.nextString());
break;
case "model":
builder.model(in.nextString());
break;
case "num_generations":
builder.numGenerations(in.nextInt());
break;
case "max_tokens":
builder.maxTokens(in.nextInt());
break;
case "preset":
builder.preset(in.nextString());
break;
case "temperature":
builder.temperature(in.nextDouble());
break;
case "k":
builder.k(in.nextInt());
break;
case "p":
builder.p(in.nextDouble());
break;
case "frequency_penalty":
builder.frequencyPenalty(in.nextDouble());
break;
case "presence_penalty":
builder.presencePenalty(in.nextDouble());
break;
case "end_sequences":
List<String> endSequences = new ArrayList<>();
in.beginArray();
while (in.hasNext()) {
endSequences.add(in.nextString());
}
in.endArray();
builder.endSequences(endSequences);
break;
case "stop_sequences":
List<String> stopSequences = new ArrayList<>();
in.beginArray();
while (in.hasNext()) {
stopSequences.add(in.nextString());
}
in.endArray();
builder.stopSequences(stopSequences);
break;
case "return_likelihoods":
builder.returnLikelihoods(in.nextString());
break;
case "logit_bias":
Map<String, Float> logitBias = new HashMap<>();
in.beginObject();
while (in.hasNext()) {
logitBias.put(in.nextName(), (float) in.nextDouble());
}
in.endObject();
builder.logitBias(logitBias);
break;
case "truncate":
builder.truncate(in.nextString());
break;
default:
in.skipValue();
break;
}
}
in.endObject();
return builder.build();
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/huggingface/HuggingFaceApi.java
|
package ai.knowly.langtorch.llm.huggingface;
import ai.knowly.langtorch.llm.huggingface.schema.dto.CreateTextGenerationTaskRequest;
import ai.knowly.langtorch.llm.huggingface.schema.dto.CreateTextGenerationTaskResponse;
import com.google.common.util.concurrent.ListenableFuture;
import java.util.List;
import retrofit2.http.Body;
import retrofit2.http.POST;
/** HuggingFaceApi provides the Retrofit interface for the HuggingFace API. */
public interface HuggingFaceApi {
@POST(".")
ListenableFuture<List<CreateTextGenerationTaskResponse>> createTextGenerationTask(
@Body CreateTextGenerationTaskRequest request);
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/huggingface/HuggingFaceAuthenticationInterceptor.java
|
package ai.knowly.langtorch.llm.huggingface;
import java.io.IOException;
import java.util.Objects;
import okhttp3.Interceptor;
import okhttp3.Request;
import okhttp3.Response;
/** OkHttp Interceptor that adds an authorization token header */
public class HuggingFaceAuthenticationInterceptor implements Interceptor {
private final String token;
HuggingFaceAuthenticationInterceptor(String token) {
Objects.requireNonNull(token, "HuggingFace api token required");
this.token = token;
}
@Override
public Response intercept(Chain chain) throws IOException {
Request request =
chain.request().newBuilder().header("Authorization", "Bearer " + token).build();
return chain.proceed(request);
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/huggingface/HuggingFaceService.java
|
package ai.knowly.langtorch.llm.huggingface;
import ai.knowly.langtorch.hub.module.token.EnableOpenAITokenRecord;
import ai.knowly.langtorch.llm.huggingface.exception.HuggingFaceExecutionException;
import ai.knowly.langtorch.llm.huggingface.exception.HuggingFaceHttpException;
import ai.knowly.langtorch.llm.huggingface.exception.HuggingFaceServiceInterruptedException;
import ai.knowly.langtorch.llm.huggingface.schema.config.HuggingFaceServiceConfig;
import ai.knowly.langtorch.llm.huggingface.schema.dto.CreateTextGenerationTaskRequest;
import ai.knowly.langtorch.llm.huggingface.schema.dto.CreateTextGenerationTaskResponse;
import ai.knowly.langtorch.utils.future.retry.FutureRetrier;
import com.fasterxml.jackson.annotation.JsonInclude.Include;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.PropertyNamingStrategy;
import com.google.common.flogger.FluentLogger;
import com.google.common.util.concurrent.ListenableFuture;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import javax.inject.Inject;
import okhttp3.ConnectionPool;
import okhttp3.OkHttpClient;
import okhttp3.OkHttpClient.Builder;
import retrofit2.HttpException;
import retrofit2.Retrofit;
import retrofit2.adapter.guava.GuavaCallAdapterFactory;
import retrofit2.converter.jackson.JacksonConverterFactory;
/** Service for interacting with the HuggingFace API */
public class HuggingFaceService {
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
private static final String BASE_URL = "https://api-inference.huggingface.co/models/";
private final HuggingFaceApi api;
private final FutureRetrier futureRetrier;
private final ScheduledExecutorService scheduledExecutor;
@Inject
public HuggingFaceService(final HuggingFaceServiceConfig huggingFaceServiceConfig) {
ObjectMapper defaultObjectMapper = defaultObjectMapper();
OkHttpClient client = buildClient(huggingFaceServiceConfig);
Retrofit retrofit =
defaultRetrofit(huggingFaceServiceConfig.modelId(), client, defaultObjectMapper);
scheduledExecutor = Executors.newSingleThreadScheduledExecutor();
this.futureRetrier =
new FutureRetrier(
scheduledExecutor,
huggingFaceServiceConfig.backoffStrategy(),
huggingFaceServiceConfig.retryConfig());
this.api = retrofit.create(HuggingFaceApi.class);
}
public static <T> T execute(ListenableFuture<T> apiCall) {
try {
return apiCall.get();
} catch (InterruptedException e) {
// Restore the interrupt status
Thread.currentThread().interrupt();
// Optionally, log or handle the exception here.
logger.atSevere().withCause(e).log("Thread was interrupted during API call.");
throw new HuggingFaceServiceInterruptedException(e);
} catch (ExecutionException e) {
if (e.getCause() instanceof HttpException) {
HttpException httpException = (HttpException) e.getCause();
try {
String errorBody = httpException.response().errorBody().string();
logger.atSevere().log("HTTP Error: %s", errorBody);
throw new HuggingFaceHttpException(errorBody);
} catch (IOException ioException) {
logger.atSevere().withCause(ioException).log("Error while reading errorBody");
}
}
throw new HuggingFaceExecutionException(e);
}
}
public static ObjectMapper defaultObjectMapper() {
ObjectMapper mapper = new ObjectMapper();
mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
mapper.setSerializationInclusion(Include.NON_NULL);
mapper.setPropertyNamingStrategy(PropertyNamingStrategy.SNAKE_CASE);
return mapper;
}
public static OkHttpClient buildClient(HuggingFaceServiceConfig huggingFaceServiceConfig) {
return new Builder()
.addInterceptor(
new HuggingFaceAuthenticationInterceptor(huggingFaceServiceConfig.apiToken()))
.connectionPool(new ConnectionPool(5, 1, TimeUnit.SECONDS))
.readTimeout(huggingFaceServiceConfig.timeoutDuration().toMillis(), TimeUnit.MILLISECONDS)
.build();
}
public static Retrofit defaultRetrofit(String modelId, OkHttpClient client, ObjectMapper mapper) {
String url = BASE_URL + modelId + "/";
return new Retrofit.Builder()
.baseUrl(url)
.client(client)
.addConverterFactory(JacksonConverterFactory.create(mapper))
.addCallAdapterFactory(GuavaCallAdapterFactory.create())
.build();
}
public List<CreateTextGenerationTaskResponse> createTextGenerationTask(
CreateTextGenerationTaskRequest request) {
return execute(createChatCompletionAsync(request));
}
@EnableOpenAITokenRecord
public ListenableFuture<List<CreateTextGenerationTaskResponse>> createChatCompletionAsync(
CreateTextGenerationTaskRequest request) {
return futureRetrier.runWithRetries(
() -> api.createTextGenerationTask(request), result -> true);
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/huggingface/HuggingFaceServiceConfigModule.java
|
package ai.knowly.langtorch.llm.huggingface;
import ai.knowly.langtorch.llm.huggingface.schema.config.HuggingFaceServiceConfig;
import ai.knowly.langtorch.utils.Environment;
import ai.knowly.langtorch.utils.api.key.HuggingFaceKeyUtil;
import com.google.inject.AbstractModule;
import com.google.inject.Provides;
/** Provides the HuggingFace service configuration. */
public class HuggingFaceServiceConfigModule extends AbstractModule {
private final String modelId;
public HuggingFaceServiceConfigModule(String modelId) {
this.modelId = modelId;
}
@Provides
public HuggingFaceServiceConfig provideOpenAIServiceConfig() {
return HuggingFaceServiceConfig.builder()
.setApiToken(HuggingFaceKeyUtil.getKey(Environment.PRODUCTION))
.setModelId(modelId)
.build();
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/huggingface
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/huggingface/exception/HuggingFaceExecutionException.java
|
package ai.knowly.langtorch.llm.huggingface.exception;
public class HuggingFaceExecutionException extends RuntimeException {
public HuggingFaceExecutionException(Exception e) {
super(e);
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/huggingface
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/huggingface/exception/HuggingFaceHttpException.java
|
package ai.knowly.langtorch.llm.huggingface.exception;
public class HuggingFaceHttpException extends RuntimeException {
public HuggingFaceHttpException(String e) {
super(e);
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/huggingface
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/huggingface/exception/HuggingFaceServiceInterruptedException.java
|
package ai.knowly.langtorch.llm.huggingface.exception;
public class HuggingFaceServiceInterruptedException extends RuntimeException {
public HuggingFaceServiceInterruptedException(InterruptedException e) {
super(e);
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/huggingface/schema
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/huggingface/schema/config/HuggingFaceServiceConfig.java
|
package ai.knowly.langtorch.llm.huggingface.schema.config;
import ai.knowly.langtorch.utils.future.retry.RetryConfig;
import ai.knowly.langtorch.utils.future.retry.strategy.BackoffStrategy;
import ai.knowly.langtorch.utils.future.retry.strategy.ExponentialBackoffStrategy;
import com.google.auto.value.AutoValue;
import java.time.Duration;
/**
* The HuggingFaceServiceConfig class is an AutoValue class with a builder pattern that contains
* various configurations for HuggingFace service.
*/
@AutoValue
public abstract class HuggingFaceServiceConfig {
public static Builder builder() {
return new AutoValue_HuggingFaceServiceConfig.Builder()
.setTimeoutDuration(Duration.ofSeconds(10))
.setRetryConfig(RetryConfig.getDefaultInstance())
.setBackoffStrategy(new ExponentialBackoffStrategy());
}
public abstract String apiToken();
public abstract String modelId();
public abstract Duration timeoutDuration();
public abstract BackoffStrategy backoffStrategy();
public abstract RetryConfig retryConfig();
@AutoValue.Builder
public abstract static class Builder {
public abstract Builder setApiToken(String newApiKey);
public abstract Builder setModelId(String newModelId);
public abstract Builder setTimeoutDuration(Duration newTimeoutDuration);
public abstract Builder setBackoffStrategy(BackoffStrategy newBackoffStrategy);
public abstract Builder setRetryConfig(RetryConfig newRetryConfig);
public abstract HuggingFaceServiceConfig build();
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/huggingface/schema
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/huggingface/schema/dto/CreateTextGenerationTaskRequest.java
|
package ai.knowly.langtorch.llm.huggingface.schema.dto;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NonNull;
/**
* CreateTextGenerationTaskRequest is a DTO class for the request body of the Text Generation API.
*/
@AllArgsConstructor
@Data
@Builder(toBuilder = true, setterPrefix = "set")
public class CreateTextGenerationTaskRequest {
@NonNull private String inputs;
private Parameters parameters;
private Options options;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/huggingface/schema
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/huggingface/schema/dto/CreateTextGenerationTaskResponse.java
|
package ai.knowly.langtorch.llm.huggingface.schema.dto;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
/**
* CreateTextGenerationTaskResponse is a DTO class for the response body of the Text Generation API.
*/
@AllArgsConstructor
@Data
@NoArgsConstructor
public class CreateTextGenerationTaskResponse {
@JsonProperty("generated_text")
private String generatedText;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/huggingface/schema
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/huggingface/schema/dto/Options.java
|
package ai.knowly.langtorch.llm.huggingface.schema.dto;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
@Data
@NoArgsConstructor
@AllArgsConstructor
@Builder(toBuilder = true, setterPrefix = "set")
public class Options {
@JsonProperty("use_cache")
private Boolean useCache;
@JsonProperty("wait_for_model")
private Boolean waitForModel;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/huggingface/schema
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/huggingface/schema/dto/Parameters.java
|
package ai.knowly.langtorch.llm.huggingface.schema.dto;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
@Data
@NoArgsConstructor
@AllArgsConstructor
@Builder(toBuilder = true, setterPrefix = "set")
public class Parameters {
@JsonProperty("top_k")
private Integer topK;
@JsonProperty("top_p")
private Float topP;
private Float temperature;
@JsonProperty("repetition_penalty")
private Float repetitionPenalty;
@JsonProperty("max_new_tokens")
private Integer maxNewTokens;
@JsonProperty("max_time")
private Float maxTime;
@JsonProperty("return_full_text")
private Boolean returnFullText;
@JsonProperty("num_return_sequences")
private Integer numReturnSequences;
@JsonProperty("do_sample")
private Boolean doSample;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/minimax/MiniMaxApi.java
|
package ai.knowly.langtorch.llm.minimax;
import ai.knowly.langtorch.llm.minimax.schema.dto.completion.ChatCompletionRequest;
import ai.knowly.langtorch.llm.minimax.schema.dto.completion.ChatCompletionResult;
import ai.knowly.langtorch.llm.minimax.schema.dto.embedding.EmbeddingRequest;
import ai.knowly.langtorch.llm.minimax.schema.dto.embedding.EmbeddingResult;
import com.google.common.util.concurrent.ListenableFuture;
import retrofit2.http.Body;
import retrofit2.http.POST;
/**
* doc link: https://api.minimax.chat/document/guides
*
* @author maxiao
* @date 2023/06/07
*/
public interface MiniMaxApi {
@POST("/v1/text/chatcompletion")
ListenableFuture<ChatCompletionResult> createChatCompletion(@Body ChatCompletionRequest request);
@POST("/v1/embeddings")
ListenableFuture<EmbeddingResult> createEmbeddings(@Body EmbeddingRequest request);
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/minimax/MiniMaxAuthenticationInterceptor.java
|
package ai.knowly.langtorch.llm.minimax;
import okhttp3.HttpUrl;
import okhttp3.Interceptor;
import okhttp3.Request;
import okhttp3.Response;
import org.jetbrains.annotations.NotNull;
import java.io.IOException;
import java.util.Objects;
/**
* OkHttp Interceptor that adds an authorization token header
*
* @author maxiao
* @date 2023/06/07
*/
public class MiniMaxAuthenticationInterceptor implements Interceptor {
private final String groupId;
private final String apiKey;
MiniMaxAuthenticationInterceptor(String groupId, String apiKey) {
Objects.requireNonNull(groupId, "Minimax groupId required");
Objects.requireNonNull(apiKey, "Minimax apiKey required");
this.groupId = groupId;
this.apiKey = apiKey;
}
@Override
public Response intercept(@NotNull Chain chain) throws IOException {
HttpUrl url = chain.request().url();
HttpUrl completeUrl = url.newBuilder().addQueryParameter("GroupId", groupId).build();
Request request =
chain
.request()
.newBuilder()
.url(completeUrl)
.header("Authorization", "Bearer " + apiKey)
.header("Content-Type", "application/json")
.build();
return chain.proceed(request);
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/minimax/MiniMaxService.java
|
package ai.knowly.langtorch.llm.minimax;
import ai.knowly.langtorch.llm.minimax.schema.MiniMaxApiBusinessErrorException;
import ai.knowly.langtorch.llm.minimax.schema.MiniMaxApiExecutionException;
import ai.knowly.langtorch.llm.minimax.schema.MiniMaxApiServiceInterruptedException;
import ai.knowly.langtorch.llm.minimax.schema.config.MiniMaxServiceConfig;
import ai.knowly.langtorch.llm.minimax.schema.dto.BaseResp;
import ai.knowly.langtorch.llm.minimax.schema.dto.completion.ChatCompletionRequest;
import ai.knowly.langtorch.llm.minimax.schema.dto.completion.ChatCompletionResult;
import ai.knowly.langtorch.llm.minimax.schema.dto.embedding.EmbeddingRequest;
import ai.knowly.langtorch.llm.minimax.schema.dto.embedding.EmbeddingResult;
import ai.knowly.langtorch.utils.future.retry.FutureRetrier;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.PropertyNamingStrategy;
import com.google.common.flogger.FluentLogger;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.inject.Inject;
import java.io.IOException;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import okhttp3.ConnectionPool;
import okhttp3.OkHttpClient;
import retrofit2.HttpException;
import retrofit2.Retrofit;
import retrofit2.adapter.guava.GuavaCallAdapterFactory;
import retrofit2.converter.jackson.JacksonConverterFactory;
/**
* MiniMaxService wraps MiniMaxApi and provides a synchronous and asynchronous interface to the
* MiniMax API
*
* @author maxiao
* @date 2023/06/07
*/
public class MiniMaxService {
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
private static final String BASE_URL = "https://api.minimax.chat";
private final MiniMaxApi api;
private final FutureRetrier futureRetrier;
private final ScheduledExecutorService scheduledExecutor;
@Inject
public MiniMaxService(final MiniMaxServiceConfig miniMaxServiceConfig) {
ObjectMapper defaultObjectMapper = defaultObjectMapper();
OkHttpClient client = buildClient(miniMaxServiceConfig);
Retrofit retrofit = defaultRetrofit(client, defaultObjectMapper);
scheduledExecutor = Executors.newSingleThreadScheduledExecutor();
this.futureRetrier =
new FutureRetrier(
scheduledExecutor,
miniMaxServiceConfig.backoffStrategy(),
miniMaxServiceConfig.retryConfig());
this.api = retrofit.create(MiniMaxApi.class);
}
public static Retrofit defaultRetrofit(OkHttpClient client, ObjectMapper mapper) {
return new Retrofit.Builder()
.baseUrl(BASE_URL)
.client(client)
.addConverterFactory(JacksonConverterFactory.create(mapper))
.addCallAdapterFactory(GuavaCallAdapterFactory.create())
.build();
}
public static OkHttpClient buildClient(MiniMaxServiceConfig miniMaxServiceConfig) {
OkHttpClient.Builder builder =
new OkHttpClient.Builder()
.addInterceptor(
new MiniMaxAuthenticationInterceptor(
miniMaxServiceConfig.groupId(), miniMaxServiceConfig.apiKey()))
.connectionPool(new ConnectionPool(5, 1, TimeUnit.SECONDS))
.readTimeout(miniMaxServiceConfig.timeoutDuration().toMillis(), TimeUnit.MILLISECONDS);
return builder.build();
}
public static ObjectMapper defaultObjectMapper() {
ObjectMapper mapper = new ObjectMapper();
mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
mapper.setSerializationInclusion(JsonInclude.Include.NON_NULL);
mapper.setPropertyNamingStrategy(PropertyNamingStrategy.SNAKE_CASE);
return mapper;
}
public ChatCompletionResult createChatCompletion(ChatCompletionRequest request) {
ChatCompletionResult chatCompletionResult =
execute(
futureRetrier.runWithRetries(() -> api.createChatCompletion(request), result -> true));
checkResp(chatCompletionResult.getBaseResp());
return chatCompletionResult;
}
public ListenableFuture<ChatCompletionResult> createChatCompletionAsync(
ChatCompletionRequest request) {
return futureRetrier.runWithRetries(() -> api.createChatCompletion(request), result -> true);
}
public EmbeddingResult createEmbeddings(EmbeddingRequest request) {
EmbeddingResult embeddingResult =
execute(futureRetrier.runWithRetries(() -> api.createEmbeddings(request), result -> true));
checkResp(embeddingResult.getBaseResp());
return embeddingResult;
}
public ListenableFuture<EmbeddingResult> createEmbeddingsAsync(EmbeddingRequest request) {
return futureRetrier.runWithRetries(() -> api.createEmbeddings(request), result -> true);
}
/** Throw exception messages if the request fails */
public void checkResp(BaseResp baseResp) {
if (baseResp.getStatusCode() != 0) {
throw new MiniMaxApiBusinessErrorException(baseResp.getStatusCode(), baseResp.getStatusMsg());
}
}
/**
* Calls the MiniMax AI api, returns the response, and parses error messages if the request fails
*/
public static <T> T execute(ListenableFuture<T> apiCall) {
try {
return apiCall.get();
} catch (InterruptedException e) {
// Restore the interrupt status
Thread.currentThread().interrupt();
// Optionally, log or handle the exception here.
logger.atSevere().withCause(e).log("Thread was interrupted during API call.");
throw new MiniMaxApiServiceInterruptedException(e);
} catch (ExecutionException e) {
if (e.getCause() instanceof HttpException) {
HttpException httpException = (HttpException) e.getCause();
try {
String errorBody = httpException.response().errorBody().string();
logger.atSevere().log("HTTP Error: %s", errorBody);
} catch (IOException ioException) {
logger.atSevere().withCause(ioException).log("Error while reading errorBody");
}
}
throw new MiniMaxApiExecutionException(e);
}
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/minimax
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/minimax/schema/MiniMaxApiBusinessErrorException.java
|
package ai.knowly.langtorch.llm.minimax.schema;
/**
* @author maxiao
* @date 2023/06/17
*/
public class MiniMaxApiBusinessErrorException extends RuntimeException {
final Long statusCode;
public MiniMaxApiBusinessErrorException(Long statusCode, String statusMessage) {
super(statusMessage);
this.statusCode = statusCode;
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/minimax
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/minimax/schema/MiniMaxApiExecutionException.java
|
package ai.knowly.langtorch.llm.minimax.schema;
/**
* @author maxiao
* @date 2023/06/07
*/
public class MiniMaxApiExecutionException extends RuntimeException {
public MiniMaxApiExecutionException(Exception e) {
super(e);
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/minimax
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/minimax/schema/MiniMaxApiServiceInterruptedException.java
|
package ai.knowly.langtorch.llm.minimax.schema;
/**
* @author maxiao
* @date 2023/06/08
*/
public class MiniMaxApiServiceInterruptedException extends RuntimeException {
public MiniMaxApiServiceInterruptedException(InterruptedException e) {
super(e);
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/minimax/schema
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/minimax/schema/config/MiniMaxServiceConfig.java
|
package ai.knowly.langtorch.llm.minimax.schema.config;
import ai.knowly.langtorch.utils.future.retry.RetryConfig;
import ai.knowly.langtorch.utils.future.retry.strategy.BackoffStrategy;
import ai.knowly.langtorch.utils.future.retry.strategy.ExponentialBackoffStrategy;
import com.google.auto.value.AutoValue;
import java.time.Duration;
/**
* @author maxiao
* @date 2023/06/07
*/
@AutoValue
public abstract class MiniMaxServiceConfig {
public static Builder builder() {
return new AutoValue_MiniMaxServiceConfig.Builder()
.setTimeoutDuration(Duration.ofSeconds(10))
.setRetryConfig(RetryConfig.getDefaultInstance())
.setBackoffStrategy(new ExponentialBackoffStrategy());
}
public abstract String groupId();
public abstract String apiKey();
public abstract Duration timeoutDuration();
public abstract BackoffStrategy backoffStrategy();
public abstract RetryConfig retryConfig();
@AutoValue.Builder
public abstract static class Builder {
public abstract Builder setGroupId(String newGroupId);
public abstract Builder setApiKey(String newApiKey);
public abstract Builder setTimeoutDuration(Duration newTimeoutDuration);
public abstract Builder setBackoffStrategy(BackoffStrategy newBackoffStrategy);
public abstract Builder setRetryConfig(RetryConfig newRetryConfig);
public abstract MiniMaxServiceConfig build();
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/minimax/schema
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/minimax/schema/dto/BaseResp.java
|
package ai.knowly.langtorch.llm.minimax.schema.dto;
import lombok.Data;
/**
* @author maxiao
* @date 2023/06/17
*/
@Data
public class BaseResp {
/**
* Status code 1000, unknown error 1001, timeout 1002, triggering current limit 1004,
* authentication failure 1008, balance less than 1013, internal service error 1027, serious
* violation of output content 2013, abnormal input format information
*/
private Long statusCode;
/** Error details */
private String statusMsg;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/minimax/schema/dto
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/minimax/schema/dto/completion/ChatCompletionRequest.java
|
package ai.knowly.langtorch.llm.minimax.schema.dto.completion;
import ai.knowly.langtorch.schema.io.Input;
import ai.knowly.langtorch.schema.io.Output;
import ai.knowly.langtorch.store.memory.MemoryValue;
import java.util.List;
import lombok.Builder;
import lombok.Data;
@Data
@Builder(toBuilder = true, setterPrefix = "set")
public class ChatCompletionRequest {
/** The algorithm model being called can currently only take one value: abab5-chat */
private String model;
/**
* Add emotional predictions to the response. Attention, when with_emotion=true and the request
* context (input and output text) is long, the request will significantly slow down, reaching
* several seconds
*/
@Builder.Default private Boolean withEmotion = false;
/**
* Whether to return results through streaming in batches. If set to true, the results will be
* returned in batches, with a separator for each character; If you want to use the standard SSE
* response format, you can set use_standard_sse parameter is true
*/
@Builder.Default private Boolean stream = false;
/**
* Whether to use the standard SSE format, when set to true, the results returned by streaming
* will be separated by two alternate lines. This parameter only takes effect when stream is set
* to true
*/
@Builder.Default private Boolean useStandardSse = false;
/**
* How many results are generated; Not set to default to 1, with a maximum of 4. Due to beam_
* Generating multiple results with width will consume more tokens
*/
@Builder.Default private Integer beamWidth = 1;
/**
* The maximum length limit for dialogue background, characters, or functions is 4096 tokens, and
* cannot be empty, Length affects interface performance
*/
private String prompt;
/** Dialogue Meta Information */
private RoleMeta roleMeta;
/** Dialogue content */
private List<Message> messages;
/**
* If true, it indicates that the current request is set to continue mode, and the reply content
* is the continuation of the last sentence of the incoming messages; At this point, the last
* sentence from the sender is not limited to USER, but can also be BOT Assuming the last sentence
* of the incoming messages is {"sender_type": "USER", "text": "The Gifted"}, The reply to the
* completion may be 'must be useful'”
*/
@Builder.Default private Boolean continueLastMessage = false;
/**
* The maximum number of generated tokens. It should be noted that this parameter does not affect
* the generation effect of the model itself, but only achieves the function by truncating the
* exceeded tokens. It is necessary to ensure that the number of tokens input in the previous text
* and the sum of this value are less than 4096,Otherwise, the request will fail
*/
@Builder.Default private Long tokensToGenerate = 128L;
/**
* Higher values will make the output more random, while lower values will make the output more
* concentrated and deterministic. Suggest temperature and top_ p just only one of them at the
* same time
*/
@Builder.Default private Float temperature = 0.9f;
/**
* Sampling method, the smaller the numerical value, the stronger the certainty of the result; The
* larger the value, the more random the result
*/
@Builder.Default private Float topP = 0.95f;
/**
* Desensitize text information that is prone to privacy issues in the output, currently including
* but not limited to email, domain name, link, ID number, home address, etc. The default is
* false, which means that desensitization is enabled
*/
@Builder.Default private Boolean skipInfoMask = false;
@Data
@Builder(toBuilder = true, setterPrefix = "set")
public static class RoleMeta {
/** User name */
private String userName;
/** AI synonym */
private String botName;
}
@Data
@Builder(toBuilder = true, setterPrefix = "set")
public static class Message implements Input, Output, MemoryValue {
/** Sender, currently only the following two legal values are allowed: USER、BOT */
private String senderType;
/** Message content length affects interface performance */
private String text;
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/minimax/schema/dto
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/minimax/schema/dto/completion/ChatCompletionResult.java
|
package ai.knowly.langtorch.llm.minimax.schema.dto.completion;
import ai.knowly.langtorch.llm.minimax.schema.dto.BaseResp;
import ai.knowly.langtorch.schema.io.Input;
import ai.knowly.langtorch.schema.io.Output;
import ai.knowly.langtorch.store.memory.MemoryValue;
import java.util.List;
import lombok.Data;
/** Object containing a response from the chat completions api. */
@Data
public class ChatCompletionResult {
/** Request initiation time,Unixtime,Nanosecond */
private Long created;
/** Request the specified model */
private String model;
/** Recommended Best Result */
private String reply;
/** Input hit sensitive words */
private Boolean inputSensitive;
/**
* Enter the type of hit sensitive word, and when inputSensitive is true, the return value is one
* of the following: 1. Serious violation 2. Pornography 3. Advertising 4. Prohibition 5. Abuse 6.
* Violence 7. Others
*/
private Long inputSensitiveType;
/** Output hit sensitive words */
private Boolean outputSensitive;
/**
* Output hit sensitive word type, when outputSensitive is true, returns the same value as
* inputSensitiveType
*/
private Long outputSensitiveType;
/** All results, quantity<=4 */
private List<Choices> choices;
/** Usage of tokens */
private Usage usage;
private BaseResp baseResp;
@Data
public static class Choices implements Input, Output, MemoryValue {
/** text results */
private String text;
/** ranking */
private Long index;
/** score */
private Float logprobes;
/**
* End reason, enumeration value stop: API returned the complete result generated by the model
* length: The model generated result exceeds tokens_ To_ The length of the generate, the
* content is truncated
*/
private String finishReason;
/**
* Reply to the text's emotional prediction, with values ranging from one of the following
* eight: sadness, embarrassment, happiness, surprise, anger, panic, confusion, and confusion
*/
private String emotion;
/**
* When request.stream is true and in streaming mode, the reply text is returned in batches
* through delta. The delta of the last reply is empty, and sensitive word detection is
* performed on the overall reply
*/
private String delta;
}
@Data
public static class Usage {
/**
* The total number of consumed tokens, including input and output; The specific calculation
* method is input tokens+maximum output tokens x beam_width uses token as the basic unit to
* understand input and output
*
* <p>Assuming beam_width is 2, the input tokens are 100, and the output results are 20 tokens
* and 30 tokens, respectively. The final consumption is 160 tokens
*/
private Long totalTokens;
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/minimax/schema/dto
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/minimax/schema/dto/embedding/EmbeddingRequest.java
|
package ai.knowly.langtorch.llm.minimax.schema.dto.embedding;
import java.util.List;
import lombok.*;
/** Creates an embedding vector representing the input text. */
@Builder
@NoArgsConstructor
@AllArgsConstructor
@Data
public class EmbeddingRequest {
/** Requested model, Currently only supported embo-01 */
private String model;
/** Text expected to generate vectors */
private List<String> texts;
/**
* The target usage scenario after generating the vector is used to build the vector library, and
* the generated vector is stored in the library as the retrieved text; db: Used to generate
* vectors for queries, query: retrieving text
*/
private String type;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/minimax/schema/dto
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/minimax/schema/dto/embedding/EmbeddingResult.java
|
package ai.knowly.langtorch.llm.minimax.schema.dto.embedding;
import ai.knowly.langtorch.llm.minimax.schema.dto.BaseResp;
import java.util.List;
import lombok.Data;
/** An object containing a response from the answer api */
@Data
public class EmbeddingResult {
/** Vector result, one text corresponds to a float32 array, with a length of 1536 */
private List<List<Float>> vectors;
private BaseResp baseResp;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/OpenAIApi.java
|
package ai.knowly.langtorch.llm.openai;
import ai.knowly.langtorch.llm.openai.schema.dto.completion.CompletionRequest;
import ai.knowly.langtorch.llm.openai.schema.dto.completion.CompletionResult;
import ai.knowly.langtorch.llm.openai.schema.dto.completion.chat.ChatCompletionRequest;
import ai.knowly.langtorch.llm.openai.schema.dto.completion.chat.ChatCompletionResult;
import ai.knowly.langtorch.llm.openai.schema.dto.edit.EditRequest;
import ai.knowly.langtorch.llm.openai.schema.dto.edit.EditResult;
import ai.knowly.langtorch.llm.openai.schema.dto.embedding.EmbeddingRequest;
import ai.knowly.langtorch.llm.openai.schema.dto.embedding.EmbeddingResult;
import ai.knowly.langtorch.llm.openai.schema.dto.image.CreateImageRequest;
import ai.knowly.langtorch.llm.openai.schema.dto.image.ImageResult;
import ai.knowly.langtorch.llm.openai.schema.dto.moderation.ModerationRequest;
import ai.knowly.langtorch.llm.openai.schema.dto.moderation.ModerationResult;
import com.google.common.util.concurrent.ListenableFuture;
import okhttp3.RequestBody;
import retrofit2.http.Body;
import retrofit2.http.POST;
// This is a Java interface defining methods for making API requests to the OpenAI API. Each method
// corresponds to a specific endpoint in the API and takes a request object as a parameter.
public interface OpenAIApi {
@POST("/v1/completions")
ListenableFuture<CompletionResult> createCompletion(@Body CompletionRequest request);
@POST("/v1/chat/completions")
ListenableFuture<ChatCompletionResult> createChatCompletion(@Body ChatCompletionRequest request);
@POST("/v1/edits")
ListenableFuture<EditResult> createEdit(@Body EditRequest request);
@POST("/v1/embeddings")
ListenableFuture<EmbeddingResult> createEmbeddings(@Body EmbeddingRequest request);
@POST("/v1/images/generations")
ListenableFuture<ImageResult> createImage(@Body CreateImageRequest request);
@POST("/v1/images/edits")
ListenableFuture<ImageResult> createImageEdit(@Body RequestBody requestBody);
@POST("/v1/images/variations")
ListenableFuture<ImageResult> createImageVariation(@Body RequestBody requestBody);
@POST("/v1/moderations")
ListenableFuture<ModerationResult> createModeration(@Body ModerationRequest request);
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/OpenAIAuthenticationInterceptor.java
|
package ai.knowly.langtorch.llm.openai;
import java.io.IOException;
import java.util.Objects;
import okhttp3.Interceptor;
import okhttp3.Request;
import okhttp3.Response;
/** OkHttp Interceptor that adds an authorization token header */
public class OpenAIAuthenticationInterceptor implements Interceptor {
private final String token;
OpenAIAuthenticationInterceptor(String token) {
Objects.requireNonNull(token, "OpenAI token required");
this.token = token;
}
@Override
public Response intercept(Chain chain) throws IOException {
Request request =
chain.request().newBuilder().header("Authorization", "Bearer " + token).build();
return chain.proceed(request);
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/OpenAIService.java
|
package ai.knowly.langtorch.llm.openai;
import ai.knowly.langtorch.hub.module.token.EnableOpenAITokenRecord;
import ai.knowly.langtorch.llm.openai.schema.config.OpenAIProxyConfig.ProxyType;
import ai.knowly.langtorch.llm.openai.schema.config.OpenAIServiceConfig;
import ai.knowly.langtorch.llm.openai.schema.dto.OpenAIError;
import ai.knowly.langtorch.llm.openai.schema.dto.OpenAIHttpParseException;
import ai.knowly.langtorch.llm.openai.schema.dto.completion.CompletionRequest;
import ai.knowly.langtorch.llm.openai.schema.dto.completion.CompletionResult;
import ai.knowly.langtorch.llm.openai.schema.dto.completion.chat.ChatCompletionRequest;
import ai.knowly.langtorch.llm.openai.schema.dto.completion.chat.ChatCompletionResult;
import ai.knowly.langtorch.llm.openai.schema.dto.edit.EditRequest;
import ai.knowly.langtorch.llm.openai.schema.dto.edit.EditResult;
import ai.knowly.langtorch.llm.openai.schema.dto.embedding.EmbeddingRequest;
import ai.knowly.langtorch.llm.openai.schema.dto.embedding.EmbeddingResult;
import ai.knowly.langtorch.llm.openai.schema.dto.image.CreateImageEditRequest;
import ai.knowly.langtorch.llm.openai.schema.dto.image.CreateImageRequest;
import ai.knowly.langtorch.llm.openai.schema.dto.image.CreateImageVariationRequest;
import ai.knowly.langtorch.llm.openai.schema.dto.image.ImageResult;
import ai.knowly.langtorch.llm.openai.schema.dto.moderation.ModerationRequest;
import ai.knowly.langtorch.llm.openai.schema.dto.moderation.ModerationResult;
import ai.knowly.langtorch.llm.openai.schema.exception.OpenAIApiExecutionException;
import ai.knowly.langtorch.llm.openai.schema.exception.OpenAIServiceInterruptedException;
import ai.knowly.langtorch.utils.future.retry.FutureRetrier;
import com.fasterxml.jackson.annotation.JsonInclude.Include;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.PropertyNamingStrategy;
import com.google.common.flogger.FluentLogger;
import com.google.common.util.concurrent.ListenableFuture;
import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.Proxy;
import java.net.Proxy.Type;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import javax.inject.Inject;
import okhttp3.ConnectionPool;
import okhttp3.MediaType;
import okhttp3.MultipartBody;
import okhttp3.OkHttpClient;
import okhttp3.OkHttpClient.Builder;
import okhttp3.RequestBody;
import org.jetbrains.annotations.NotNull;
import retrofit2.HttpException;
import retrofit2.Retrofit;
import retrofit2.adapter.guava.GuavaCallAdapterFactory;
import retrofit2.converter.jackson.JacksonConverterFactory;
/** The OpenAIService provides methods for calling the OpenAI API and handling errors. */
public class OpenAIService {
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
private static final String BASE_URL = "https://api.openai.com/";
private static final ObjectMapper mapper = defaultObjectMapper();
private static final String RESPONSE_FORMAT = "response_format";
private static final MediaType MULTI_PART_FORM_DATA = MediaType.parse("multipart/form-data");
private static final String IMAGE = "image";
private static final MediaType IMAGE_MEDIA_TYPE = MediaType.parse(IMAGE);
private final OpenAIApi api;
private final FutureRetrier futureRetrier;
private final ScheduledExecutorService scheduledExecutor;
@Inject
public OpenAIService(final OpenAIServiceConfig openAIServiceConfig) {
ObjectMapper defaultObjectMapper = defaultObjectMapper();
OkHttpClient client = buildClient(openAIServiceConfig);
Retrofit retrofit = defaultRetrofit(client, defaultObjectMapper);
scheduledExecutor = Executors.newSingleThreadScheduledExecutor();
this.futureRetrier =
new FutureRetrier(
scheduledExecutor,
openAIServiceConfig.backoffStrategy(),
openAIServiceConfig.retryConfig());
this.api = retrofit.create(OpenAIApi.class);
}
/** Calls the Open AI api, returns the response, and parses error messages if the request fails */
public static <T> T execute(ListenableFuture<T> apiCall) {
try {
return apiCall.get();
} catch (InterruptedException e) {
// Restore the interrupt status
Thread.currentThread().interrupt();
// Optionally, log or handle the exception here.
logger.atSevere().withCause(e).log("Thread was interrupted during API call.");
throw new OpenAIServiceInterruptedException(e);
} catch (ExecutionException e) {
if (e.getCause() instanceof HttpException) {
HttpException httpException = (HttpException) e.getCause();
try {
String errorBody = httpException.response().errorBody().string();
logger.atSevere().log("HTTP Error: %s", errorBody);
OpenAIError error = mapper.readValue(errorBody, OpenAIError.class);
throw new OpenAIHttpParseException(error, e, httpException.code());
} catch (IOException ioException) {
logger.atSevere().withCause(ioException).log("Error while reading errorBody");
}
}
throw new OpenAIApiExecutionException(e);
}
}
public static ObjectMapper defaultObjectMapper() {
ObjectMapper mapper = new ObjectMapper();
mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
mapper.setSerializationInclusion(Include.NON_NULL);
mapper.setPropertyNamingStrategy(PropertyNamingStrategy.SNAKE_CASE);
return mapper;
}
public static OkHttpClient buildClient(OpenAIServiceConfig openAIServiceConfig) {
Builder builder =
new Builder()
.addInterceptor(new OpenAIAuthenticationInterceptor(openAIServiceConfig.apiKey()))
.connectionPool(new ConnectionPool(5, 1, TimeUnit.SECONDS))
.readTimeout(openAIServiceConfig.timeoutDuration().toMillis(), TimeUnit.MILLISECONDS);
openAIServiceConfig
.proxyConfig()
.ifPresent(
proxyConfig ->
builder.proxy(
new Proxy(
convertProxyEnum(proxyConfig.proxyType()),
new InetSocketAddress(proxyConfig.proxyHost(), proxyConfig.proxyPort()))));
return builder.build();
}
public static Retrofit defaultRetrofit(OkHttpClient client, ObjectMapper mapper) {
return new Retrofit.Builder()
.baseUrl(BASE_URL)
.client(client)
.addConverterFactory(JacksonConverterFactory.create(mapper))
.addCallAdapterFactory(GuavaCallAdapterFactory.create())
.build();
}
private static Type convertProxyEnum(ProxyType proxyType) {
if (proxyType == ProxyType.HTTP) {
return Type.HTTP;
} else if (proxyType == ProxyType.SOCKS) {
return Type.SOCKS;
} else {
throw new IllegalArgumentException("Unknown proxy type: " + proxyType);
}
}
@NotNull
private static MultipartBody.Builder getMultipartBodyDefaultBuilder(
CreateImageEditRequest request, RequestBody imageBody) {
return new MultipartBody.Builder()
.setType(MULTI_PART_FORM_DATA)
.addFormDataPart("prompt", request.getPrompt())
.addFormDataPart("size", request.getSize())
.addFormDataPart(RESPONSE_FORMAT, request.getResponseFormat())
.addFormDataPart(IMAGE, IMAGE, imageBody);
}
public CompletionResult createCompletion(CompletionRequest request) {
return execute(createCompletionAsync(request));
}
@EnableOpenAITokenRecord
public ListenableFuture<CompletionResult> createCompletionAsync(CompletionRequest request) {
return futureRetrier.runWithRetries(() -> api.createCompletion(request), result -> true);
}
public ChatCompletionResult createChatCompletion(ChatCompletionRequest request) {
return execute(createChatCompletionAsync(request));
}
@EnableOpenAITokenRecord
public ListenableFuture<ChatCompletionResult> createChatCompletionAsync(
ChatCompletionRequest request) {
return futureRetrier.runWithRetries(() -> api.createChatCompletion(request), result -> true);
}
public EditResult createEdit(EditRequest request) {
return execute(createEditAsync(request));
}
public ListenableFuture<EditResult> createEditAsync(EditRequest request) {
return futureRetrier.runWithRetries(() -> api.createEdit(request), result -> true);
}
public EmbeddingResult createEmbeddings(EmbeddingRequest request) {
return execute(createEmbeddingsAsync(request));
}
public ListenableFuture<EmbeddingResult> createEmbeddingsAsync(EmbeddingRequest request) {
return futureRetrier.runWithRetries(() -> api.createEmbeddings(request), result -> true);
}
public ImageResult createImage(CreateImageRequest request) {
return execute(createImageAsync(request));
}
public ListenableFuture<ImageResult> createImageAsync(CreateImageRequest request) {
return futureRetrier.runWithRetries(() -> api.createImage(request), result -> true);
}
public ImageResult createImageEdit(
CreateImageEditRequest request, String imagePath, String maskPath) {
File image = new File(imagePath);
File mask = null;
if (maskPath != null) {
mask = new File(maskPath);
}
return createImageEdit(request, image, mask);
}
public ListenableFuture<ImageResult> createImageEditAsync(
CreateImageEditRequest request, String imagePath, String maskPath) {
File image = new File(imagePath);
File mask = null;
if (maskPath != null) {
mask = new File(maskPath);
}
return createImageEditAsync(request, image, mask);
}
public ImageResult createImageEdit(CreateImageEditRequest request, File image, File mask) {
RequestBody imageBody = RequestBody.create(image, IMAGE_MEDIA_TYPE);
MultipartBody.Builder builder = getMultipartBodyDefaultBuilder(request, imageBody);
if (request.getN() != null) {
builder.addFormDataPart("n", request.getN().toString());
}
if (mask != null) {
RequestBody maskBody = RequestBody.create(mask, IMAGE_MEDIA_TYPE);
builder.addFormDataPart("mask", "mask", maskBody);
}
return execute(
futureRetrier.runWithRetries(() -> api.createImageEdit(builder.build()), result -> true));
}
public ListenableFuture<ImageResult> createImageEditAsync(
CreateImageEditRequest request, File image, File mask) {
RequestBody imageBody = RequestBody.create(image, IMAGE_MEDIA_TYPE);
MultipartBody.Builder builder = getMultipartBodyDefaultBuilder(request, imageBody);
if (request.getN() != null) {
builder.addFormDataPart("n", request.getN().toString());
}
if (mask != null) {
RequestBody maskBody = RequestBody.create(mask, IMAGE_MEDIA_TYPE);
builder.addFormDataPart("mask", "mask", maskBody);
}
return futureRetrier.runWithRetries(() -> api.createImageEdit(builder.build()), result -> true);
}
public ImageResult createImageVariation(CreateImageVariationRequest request, String imagePath) {
File image = new File(imagePath);
return createImageVariation(request, image);
}
public ListenableFuture<ImageResult> createImageVariationAsync(
CreateImageVariationRequest request, String imagePath) {
File image = new File(imagePath);
return createImageVariationAsync(request, image);
}
public ImageResult createImageVariation(CreateImageVariationRequest request, File image) {
RequestBody imageBody = RequestBody.create(image, IMAGE_MEDIA_TYPE);
MultipartBody.Builder builder =
new MultipartBody.Builder()
.setType(MULTI_PART_FORM_DATA)
.addFormDataPart("size", request.getSize())
.addFormDataPart(RESPONSE_FORMAT, request.getResponseFormat())
.addFormDataPart(IMAGE, IMAGE, imageBody);
if (request.getN() != null) {
builder.addFormDataPart("n", request.getN().toString());
}
return execute(
futureRetrier.runWithRetries(
() -> api.createImageVariation(builder.build()), result -> true));
}
public ListenableFuture<ImageResult> createImageVariationAsync(
CreateImageVariationRequest request, File image) {
RequestBody imageBody = RequestBody.create(image, IMAGE_MEDIA_TYPE);
MultipartBody.Builder builder =
new MultipartBody.Builder()
.setType(MULTI_PART_FORM_DATA)
.addFormDataPart("size", request.getSize())
.addFormDataPart(RESPONSE_FORMAT, request.getResponseFormat())
.addFormDataPart(IMAGE, IMAGE, imageBody);
if (request.getN() != null) {
builder.addFormDataPart("n", request.getN().toString());
}
return futureRetrier.runWithRetries(
() -> api.createImageVariation(builder.build()), result -> true);
}
public ModerationResult createModeration(ModerationRequest request) {
return execute(
futureRetrier.runWithRetries(() -> api.createModeration(request), result -> true));
}
public ListenableFuture<ModerationResult> createModerationAsync(ModerationRequest request) {
return futureRetrier.runWithRetries(() -> api.createModeration(request), result -> true);
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/OpenAIServiceConfigWithExplicitAPIKeyModule.java
|
package ai.knowly.langtorch.llm.openai;
import ai.knowly.langtorch.llm.openai.schema.config.OpenAIServiceConfig;
import com.google.inject.AbstractModule;
import com.google.inject.Provides;
/** Provides the OpenAI service configuration. */
public class OpenAIServiceConfigWithExplicitAPIKeyModule extends AbstractModule {
private final String apikey;
public OpenAIServiceConfigWithExplicitAPIKeyModule(String apikey) {
this.apikey = apikey;
}
@Provides
public OpenAIServiceConfig provideOpenAIServiceConfig() {
return OpenAIServiceConfig.builder().setApiKey(apikey).build();
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/OpenAIServiceConfigWithImplicitAPIKeyModule.java
|
package ai.knowly.langtorch.llm.openai;
import ai.knowly.langtorch.llm.openai.schema.config.OpenAIServiceConfig;
import ai.knowly.langtorch.utils.Environment;
import ai.knowly.langtorch.utils.api.key.OpenAIKeyUtil;
import com.google.inject.AbstractModule;
import com.google.inject.Provides;
/** Provides the OpenAI service configuration. */
public class OpenAIServiceConfigWithImplicitAPIKeyModule extends AbstractModule {
// Get the OpenAI key from the environment variables and provide it to the OpenAI service.
@Provides
public OpenAIServiceConfig provideOpenAIServiceConfig() {
return OpenAIServiceConfig.builder()
.setApiKey(OpenAIKeyUtil.getKey(Environment.PRODUCTION))
.build();
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/modules
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/modules/key/OpenAIServiceConfigWithExplicitAPIKeyModule.java
|
package ai.knowly.langtorch.llm.openai.modules.key;
import ai.knowly.langtorch.llm.openai.schema.config.OpenAIServiceConfig;
import com.google.inject.AbstractModule;
import com.google.inject.Provides;
/** This Java class provides an OpenAIServiceConfig object with an explicit API key. */
public class OpenAIServiceConfigWithExplicitAPIKeyModule extends AbstractModule {
private final String apikey;
public OpenAIServiceConfigWithExplicitAPIKeyModule(String apikey) {
this.apikey = apikey;
}
/**
* This Java function provides an OpenAIServiceConfig object with an API key.
*
* @return An instance of the `OpenAIServiceConfig` class with the API key set.
*/
@Provides
public OpenAIServiceConfig provideOpenAIServiceConfig() {
return OpenAIServiceConfig.builder().setApiKey(apikey).build();
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/modules
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/modules/key/OpenAIServiceConfigWithImplicitAPIKeyModule.java
|
package ai.knowly.langtorch.llm.openai.modules.key;
import ai.knowly.langtorch.llm.openai.schema.config.OpenAIServiceConfig;
import ai.knowly.langtorch.utils.Environment;
import ai.knowly.langtorch.utils.api.key.OpenAIKeyUtil;
import com.google.inject.AbstractModule;
import com.google.inject.Provides;
/**
* This Java class provides the OpenAI key from environment variables to the OpenAI service
* configuration.
*/
public class OpenAIServiceConfigWithImplicitAPIKeyModule extends AbstractModule {
/**
* This function provides an OpenAIServiceConfig object with an API key set based on the current
* environment.
*
* @return An instance of the `OpenAIServiceConfig` class is being returned with the API key read
* from the environment variable.
*/
@Provides
public OpenAIServiceConfig provideOpenAIServiceConfig() {
return OpenAIServiceConfig.builder()
.setApiKey(OpenAIKeyUtil.getKey(Environment.PRODUCTION))
.build();
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/config/OpenAIProxyConfig.java
|
package ai.knowly.langtorch.llm.openai.schema.config;
import com.google.auto.value.AutoValue;
/** This is a Java class for configuring a proxy with options for HTTP or SOCKS proxy types. */
@AutoValue
public abstract class OpenAIProxyConfig {
public static Builder builder() {
return new AutoValue_OpenAIProxyConfig.Builder();
}
public abstract ProxyType proxyType();
public abstract String proxyHost();
public abstract Integer proxyPort();
public enum ProxyType {
HTTP,
SOCKS
}
@AutoValue.Builder
public abstract static class Builder {
public abstract Builder setProxyType(ProxyType newProxyType);
public abstract Builder setProxyHost(String newProxyHost);
public abstract Builder setProxyPort(int newProxyPort);
public abstract OpenAIProxyConfig build();
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/config/OpenAIServiceConfig.java
|
package ai.knowly.langtorch.llm.openai.schema.config;
import ai.knowly.langtorch.utils.future.retry.RetryConfig;
import ai.knowly.langtorch.utils.future.retry.strategy.BackoffStrategy;
import ai.knowly.langtorch.utils.future.retry.strategy.ExponentialBackoffStrategy;
import com.google.auto.value.AutoValue;
import java.time.Duration;
import java.util.Optional;
/**
* The OpenAIServiceConfig class is an AutoValue class with a builder pattern that contains various
* configurations for an OpenAI service.
*/
@AutoValue
public abstract class OpenAIServiceConfig {
public static Builder builder() {
return new AutoValue_OpenAIServiceConfig.Builder()
.setTimeoutDuration(Duration.ofSeconds(10))
.setRetryConfig(RetryConfig.getDefaultInstance())
.setBackoffStrategy(new ExponentialBackoffStrategy());
}
public abstract String apiKey();
public abstract Duration timeoutDuration();
public abstract Optional<OpenAIProxyConfig> proxyConfig();
public abstract BackoffStrategy backoffStrategy();
public abstract RetryConfig retryConfig();
@AutoValue.Builder
public abstract static class Builder {
public abstract Builder setApiKey(String newApiKey);
public abstract Builder setTimeoutDuration(Duration newTimeoutDuration);
public abstract Builder setProxyConfig(OpenAIProxyConfig newProxyConfig);
public abstract Builder setBackoffStrategy(BackoffStrategy newBackoffStrategy);
public abstract Builder setRetryConfig(RetryConfig newRetryConfig);
public abstract OpenAIServiceConfig build();
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/OpenAIError.java
|
package ai.knowly.langtorch.llm.openai.schema.dto;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
/** Represents the error body when an OpenAI request fails */
@Data
@NoArgsConstructor
@AllArgsConstructor
public class OpenAIError {
private OpenAiErrorDetails error;
@Data
@NoArgsConstructor
@AllArgsConstructor
public static class OpenAiErrorDetails {
/** Human-readable error message */
private String message;
/**
* OpenAI error type, for example "invalid_request_error"
* https://platform.openai.com/docs/guides/error-codes/python-library-error-types
*/
private String type;
private String param;
/** OpenAI error code, for example "invalid_api_key" */
private String code;
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/OpenAIHttpParseException.java
|
package ai.knowly.langtorch.llm.openai.schema.dto;
public class OpenAIHttpParseException extends RuntimeException {
/** HTTP status code */
public final int statusCode;
/** OpenAI error code, for example "invalid_api_key" */
public final String code;
public final String param;
/**
* OpenAI error type, for example "invalid_request_error"
* https://platform.openai.com/docs/guides/error-codes/python-library-error-types
*/
public final String type;
public OpenAIHttpParseException(OpenAIError error, Exception parent, int statusCode) {
super(error.getError().getMessage(), parent);
this.statusCode = statusCode;
this.code = error.getError().getCode();
this.param = error.getError().getParam();
this.type = error.getError().getType();
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/OpenAIResponse.java
|
package ai.knowly.langtorch.llm.openai.schema.dto;
import java.util.List;
import lombok.Data;
/** A wrapper class to fit the OpenAI engine and search endpoints */
@Data
public class OpenAIResponse<T> {
/** A list containing the actual results */
private List<T> data;
/** The type of object returned, should be "list" */
private String object;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/Usage.java
|
package ai.knowly.langtorch.llm.openai.schema.dto;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.Data;
/** The OpenAI resources used by a request */
@Data
public class Usage {
/** The number of prompt tokens used. */
@JsonProperty("prompt_tokens")
long promptTokens;
/** The number of completion tokens used. */
@JsonProperty("completion_tokens")
long completionTokens;
/** The number of total tokens used */
@JsonProperty("total_tokens")
long totalTokens;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/completion/CompletionChoice.java
|
package ai.knowly.langtorch.llm.openai.schema.dto.completion;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.Data;
/**
* A completion generated by OpenAI
*
* <p>https://beta.openai.com/docs/api-reference/completions/create
*/
@Data
public class CompletionChoice {
/** The generated text. Will include the prompt if {@link CompletionRequest#echo } is true */
String text;
/** This index of this completion in the returned list. */
Integer index;
/**
* The log probabilities of the chosen tokens and the top {@link CompletionRequest#logprobs}
* tokens
*/
LogProbResult logprobs;
/** The reason why GPT stopped generating, for example "length". */
@JsonProperty("finish_reason")
String finishReason;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/completion/CompletionChunk.java
|
package ai.knowly.langtorch.llm.openai.schema.dto.completion;
import java.util.List;
import lombok.Data;
/**
* Object containing a response chunk from the completions streaming api.
*
* <p>https://beta.openai.com/docs/api-reference/completions/create
*/
@Data
public class CompletionChunk {
/** A unique id assigned to this completion. */
String id;
/**
* https://beta.openai.com/docs/api-reference/create-completion The type of object returned,
* should be "text_completion"
*/
String object;
/** The creation time in epoch seconds. */
long created;
/** The model used. */
String model;
/** A list of generated completions. */
List<CompletionChoice> choices;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/completion/CompletionRequest.java
|
package ai.knowly.langtorch.llm.openai.schema.dto.completion;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.List;
import java.util.Map;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
/**
* A request for OpenAi to generate a predicted completion for a prompt. All fields are nullable.
*
* <p>https://beta.openai.com/docs/api-reference/completions/create
*/
@Builder
@NoArgsConstructor
@AllArgsConstructor
@Data
public class CompletionRequest {
/**
* The name of the model to use. Required if specifying a fine-tuned model or if using the new
* v1/completions endpoint.
*/
String model;
/** An optional prompt to complete from */
String prompt;
/** The suffix that comes after a completion of inserted text. */
String suffix;
/**
* The maximum number of tokens to generate. Requests can use up to 2048 tokens shared between
* prompt and completion. (One token is roughly 4 characters for normal English text)
*/
@JsonProperty("max_tokens")
Integer maxTokens;
/**
* What sampling temperature to use. Higher values means the model will take more risks. Try 0.9
* for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
*
* <p>We generally recommend using this or {@link CompletionRequest#topP} but not both.
*/
Double temperature;
/**
* An alternative to sampling with temperature, called nucleus sampling, where the model considers
* the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising
* the top 10% probability mass are considered.
*
* <p>We generally recommend using this or {@link CompletionRequest#temperature} but not both.
*/
@JsonProperty("top_p")
Double topP;
/**
* How many completions to generate for each prompt.
*
* <p>Because this parameter generates many completions, it can quickly consume your token quota.
* Use carefully and ensure that you have reasonable settings for {@link
* CompletionRequest#maxTokens} and {@link CompletionRequest#stop}.
*/
Integer n;
/**
* Whether to stream back partial progress. If set, tokens will be sent as data-only server-sent
* events as they become available, with the stream terminated by a data: DONE message.
*/
Boolean stream;
/**
* Include the log probabilities on the logprobs most likely tokens, as well the chosen tokens.
* For example, if logprobs is 10, the API will return a list of the 10 most likely tokens. The
* API will always return the logprob of the sampled token, so there may be up to logprobs+1
* elements in the response.
*/
Integer logprobs;
/** Echo back the prompt in addition to the completion */
Boolean echo;
/**
* Up to 4 sequences where the API will stop generating further tokens. The returned text will not
* contain the stop sequence.
*/
List<String> stop;
/**
* Number between 0 and 1 (default 0) that penalizes new tokens based on whether they appear in
* the text so far. Increases the model's likelihood to talk about new topics.
*/
@JsonProperty("presence_penalty")
Double presencePenalty;
/**
* Number between 0 and 1 (default 0) that penalizes new tokens based on their existing frequency
* in the text so far. Decreases the model's likelihood to repeat the same line verbatim.
*/
@JsonProperty("frequency_penalty")
Double frequencyPenalty;
/**
* Generates best_of completions server-side and returns the "best" (the one with the lowest log
* probability per token). Results cannot be streamed.
*
* <p>When used with {@link CompletionRequest#n}, best_of controls the number of candidate
* completions and n specifies how many to return, best_of must be greater than n.
*/
@JsonProperty("best_of")
Integer bestOf;
/**
* Modify the likelihood of specified tokens appearing in the completion.
*
* <p>Maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value
* from -100 to 100.
*
* <p>https://beta.openai.com/docs/api-reference/completions/create#completions/create-logit_bias
*/
@JsonProperty("logit_bias")
Map<String, Integer> logitBias;
/**
* A unique identifier representing your end-user, which will help OpenAI to monitor and detect
* abuse.
*/
String user;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/completion/CompletionResult.java
|
package ai.knowly.langtorch.llm.openai.schema.dto.completion;
import ai.knowly.langtorch.llm.openai.schema.dto.Usage;
import java.util.List;
import lombok.Data;
/**
* An object containing a response from the completion api
*
* <p>https://beta.openai.com/docs/api-reference/completions/create
*/
@Data
public class CompletionResult {
/** A unique id assigned to this completion. */
String id;
/**
* https://beta.openai.com/docs/api-reference/create-completion The type of object returned,
* should be "text_completion"
*/
String object;
/** The creation time in epoch seconds. */
long created;
/** The GPT model used. */
String model;
/** A list of generated completions. */
List<CompletionChoice> choices;
/** The API usage for this request */
Usage usage;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/completion/LogProbResult.java
|
package ai.knowly.langtorch.llm.openai.schema.dto.completion;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.List;
import java.util.Map;
import lombok.Data;
/**
* Log probabilities of different token options Returned if {@link CompletionRequest#logprobs} is
* greater than zero
*
* <p>https://beta.openai.com/docs/api-reference/create-completion
*/
@Data
public class LogProbResult {
/** The tokens chosen by the completion api */
List<String> tokens;
/** The log probability of each token in {@link tokens} */
@JsonProperty("token_logprobs")
List<Double> tokenLogprobs;
/**
* A map for each index in the completion result. The map contains the top {@link
* CompletionRequest#logprobs} tokens and their probabilities
*/
@JsonProperty("top_logprobs")
List<Map<String, Double>> topLogprobs;
/** The character offset from the start of the returned text for each of the chosen tokens. */
List<Integer> textOffset;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/completion
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/completion/chat/ChatCompletionChoice.java
|
package ai.knowly.langtorch.llm.openai.schema.dto.completion.chat;
import ai.knowly.langtorch.schema.chat.ChatMessage;
import com.fasterxml.jackson.annotation.JsonAlias;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.Data;
/** A chat completion generated by OpenAI */
@Data
public class ChatCompletionChoice {
/** This index of this completion in the returned list. */
Integer index;
/** The {@link ChatMessageRole#assistant} message or delta (when streaming) which was generated */
@JsonAlias("delta")
ChatMessage message;
/** The reason why GPT stopped generating, for example "length". */
@JsonProperty("finish_reason")
String finishReason;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/completion
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/completion/chat/ChatCompletionChunk.java
|
package ai.knowly.langtorch.llm.openai.schema.dto.completion.chat;
import java.util.List;
import lombok.Data;
/** Object containing a response chunk from the chat completions streaming api. */
@Data
public class ChatCompletionChunk {
/** Unique id assigned to this chat completion. */
String id;
/** The type of object returned, should be "chat.completion.chunk" */
String object;
/** The creation time in epoch seconds. */
long created;
/** The model used. */
String model;
/** A list of all generated completions. */
List<ChatCompletionChoice> choices;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/completion
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/completion/chat/ChatCompletionRequest.java
|
package ai.knowly.langtorch.llm.openai.schema.dto.completion.chat;
import ai.knowly.langtorch.schema.chat.ChatMessage;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.List;
import java.util.Map;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
@Data
@Builder(toBuilder = true, setterPrefix = "set")
@AllArgsConstructor
@NoArgsConstructor
public class ChatCompletionRequest {
/** ID of the model to use. */
String model;
/**
* The messages to generate chat completions for, in the <a
* href="https://platform.openai.com/docs/guides/chat/introduction">chat format</a>.<br>
* see {@link com.theokanning.openai.completion.chat.ChatMessage}
*/
List<ChatMessage> messages;
/**
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output
* more random, while lower values like 0.2 will make it more focused and deterministic.<br>
* We generally recommend altering this or top_p but not both.
*/
Double temperature;
/**
* An alternative to sampling with temperature, called nucleus sampling, where the model considers
* the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising
* the top 10% probability mass are considered.<br>
* We generally recommend altering this or temperature but not both.
*/
@JsonProperty("top_p")
Double topP;
/** How many chat completion chatCompletionChoices to generate for each input message. */
Integer n;
/**
* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only
* <a
* href="https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format">server-sent
* events</a> as they become available, with the stream terminated by a data: [DONE] message.
*/
Boolean stream;
/** Up to 4 sequences where the API will stop generating further tokens. */
List<String> stop;
/**
* The maximum number of tokens allowed for the generated answer. By default, the number of tokens
* the model can return will be (4096 - prompt tokens).
*/
@JsonProperty("max_tokens")
Integer maxTokens;
/**
* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear
* in the text so far, increasing the model's likelihood to talk about new topics.
*/
@JsonProperty("presence_penalty")
Double presencePenalty;
/**
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing
* frequency in the text so far, decreasing the model's likelihood to repeat the same line
* verbatim.
*/
@JsonProperty("frequency_penalty")
Double frequencyPenalty;
/**
* Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an
* associated bias value from -100 to 100. Mathematically, the bias is added to the logits
* generated by the model prior to sampling. The exact effect will vary per model, but values
* between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100
* should result in a ban or exclusive selection of the relevant token.
*/
@JsonProperty("logit_bias")
Map<String, Integer> logitBias;
/**
* A unique identifier representing your end-user, which will help OpenAI to monitor and detect
* abuse.
*/
String user;
private List<Function> functions;
@JsonProperty("function_call")
private Object functionCall;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/completion
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/completion/chat/ChatCompletionResult.java
|
package ai.knowly.langtorch.llm.openai.schema.dto.completion.chat;
import ai.knowly.langtorch.llm.openai.schema.dto.Usage;
import java.util.List;
import lombok.Data;
/** Object containing a response from the chat completions api. */
@Data
public class ChatCompletionResult {
/** Unique id assigned to this chat completion. */
String id;
/** The type of object returned, should be "chat.completion" */
String object;
/** The creation time in epoch seconds. */
long created;
/** The GPT model used. */
String model;
/** A list of all generated completions. */
List<ChatCompletionChoice> choices;
/** The API usage for this request. */
Usage usage;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/completion
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/completion/chat/Function.java
|
package ai.knowly.langtorch.llm.openai.schema.dto.completion.chat;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
@Data
@Builder(toBuilder = true, setterPrefix = "set")
@AllArgsConstructor
@NoArgsConstructor
public class Function {
private String name;
private String description;
private Parameters parameters;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/completion
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/completion/chat/FunctionCall.java
|
package ai.knowly.langtorch.llm.openai.schema.dto.completion.chat;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
@Data
@Builder
@AllArgsConstructor
@NoArgsConstructor
public class FunctionCall {
private String name;
private String arguments;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/completion
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/completion/chat/Parameters.java
|
package ai.knowly.langtorch.llm.openai.schema.dto.completion.chat;
import java.util.List;
import java.util.Map;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
@Data
@Builder(toBuilder = true, setterPrefix = "set")
@AllArgsConstructor
@NoArgsConstructor
public class Parameters {
private String type;
private Map<String, Object> properties;
private List<String> required;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/edit/EditChoice.java
|
package ai.knowly.langtorch.llm.openai.schema.dto.edit;
import lombok.Data;
/**
* An edit generated by OpenAi
*
* <p>https://beta.openai.com/docs/api-reference/edits/create
*/
@Data
public class EditChoice {
/** The edited text. */
String text;
/** This index of this completion in the returned list. */
Integer index;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/edit/EditRequest.java
|
package ai.knowly.langtorch.llm.openai.schema.dto.edit;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.*;
/**
* Given a prompt and an instruction, OpenAi will return an edited version of the prompt
*
* <p>https://beta.openai.com/docs/api-reference/edits/create
*/
@Builder
@NoArgsConstructor
@AllArgsConstructor
@Data
public class EditRequest {
/** The name of the model to use. Required if using the new v1/edits endpoint. */
String model;
/** The input text to use as a starting point for the edit. */
String input;
/**
* The instruction that tells the model how to edit the prompt. For example, "Fix the spelling
* mistakes"
*/
@NonNull String instruction;
/** How many edits to generate for the input and instruction. */
Integer n;
/**
* What sampling temperature to use. Higher values means the model will take more risks. Try 0.9
* for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
*
* <p>We generally recommend altering this or {@link EditRequest#topP} but not both.
*/
Double temperature;
/**
* An alternative to sampling with temperature, called nucleus sampling, where the model considers
* the results of the tokens with top_p probability mass.So 0.1 means only the tokens comprising
* the top 10% probability mass are considered.
*
* <p>We generally recommend altering this or {@link EditRequest#temperature} but not both.
*/
@JsonProperty("top_p")
Double topP;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/edit/EditResult.java
|
package ai.knowly.langtorch.llm.openai.schema.dto.edit;
import ai.knowly.langtorch.llm.openai.schema.dto.Usage;
import java.util.List;
import lombok.Data;
/**
* A list of edits generated by OpenAI
*
* <p>https://beta.openai.com/docs/api-reference/edits/create
*/
@Data
public class EditResult {
/** The type of object returned, should be "edit" */
private String object;
/** The creation time in epoch milliseconds. */
private long created;
/** A list of generated edits. */
private List<EditChoice> choices;
/** The API usage for this request */
private Usage usage;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/embedding/Embedding.java
|
package ai.knowly.langtorch.llm.openai.schema.dto.embedding;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.List;
import lombok.Data;
/**
* Represents an embedding returned by the embedding api
*
* <p>https://beta.openai.com/docs/api-reference/classifications/create
*/
@Data
public class Embedding {
/** The type of object returned, should be "embedding" */
String object;
/** The embedding vector */
@JsonProperty("embedding")
List<Double> value;
/** The position of this embedding in the list */
Integer index;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/embedding/EmbeddingRequest.java
|
package ai.knowly.langtorch.llm.openai.schema.dto.embedding;
import java.util.List;
import lombok.*;
/**
* Creates an embedding vector representing the input text.
*
* <p>https://beta.openai.com/docs/api-reference/embeddings/create
*/
@Builder
@NoArgsConstructor
@AllArgsConstructor
@Data
public class EmbeddingRequest {
/** The name of the model to use. Required if using the new v1/embedding endpoint. */
String model;
/**
* Input text to get embedding for, encoded as a string or array of tokens. To get embedding for
* multiple inputs in a single request, pass an array of strings or array of token arrays. Each
* input must not exceed 2048 tokens in length.
*
* <p>Unless you are embedding code, we suggest replacing newlines (\n) in your input with a
* single space, as we have observed inferior results when newlines are present.
*/
@NonNull List<String> input;
/**
* A unique identifier representing your end-user, which will help OpenAI to monitor and detect
* abuse.
*/
String user;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/embedding/EmbeddingResult.java
|
package ai.knowly.langtorch.llm.openai.schema.dto.embedding;
import ai.knowly.langtorch.llm.openai.schema.dto.Usage;
import java.util.List;
import lombok.Data;
/**
* An object containing a response from the answer api
*
* <p>https://beta.openai.com/docs/api-reference/embeddings/create
*/
@Data
public class EmbeddingResult {
/** The GPTmodel used for generating embedding */
String model;
/** The type of object returned, should be "list" */
String object;
/** A list of the calculated embedding */
List<Embedding> data;
/** The API usage for this request */
Usage usage;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/image/CreateImageEditRequest.java
|
package ai.knowly.langtorch.llm.openai.schema.dto.image;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.*;
/**
* A request for OpenAi to edit an image based on a prompt All fields except prompt are optional
*
* <p>https://beta.openai.com/docs/api-reference/images/create-edit
*/
@Builder
@NoArgsConstructor
@AllArgsConstructor
@Data
public class CreateImageEditRequest {
/** A text description of the desired image(s). The maximum length in 1000 characters. */
@NonNull String prompt;
/** The number of images to generate. Must be between 1 and 10. Defaults to 1. */
Integer n;
/**
* The size of the generated images. Must be one of "256x256", "512x512", or "1024x1024". Defaults
* to "1024x1024".
*/
String size;
/**
* The format in which the generated images are returned. Must be one of url or b64_json. Defaults
* to url.
*/
@JsonProperty("response_format")
String responseFormat;
/**
* A unique identifier representing your end-user, which will help OpenAI to monitor and detect
* abuse.
*/
String user;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/image/CreateImageRequest.java
|
package ai.knowly.langtorch.llm.openai.schema.dto.image;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.*;
/**
* A request for OpenAi to create an image based on a prompt All fields except prompt are optional
*
* <p>https://beta.openai.com/docs/api-reference/images/create
*/
@Builder
@NoArgsConstructor
@AllArgsConstructor
@Data
public class CreateImageRequest {
/** A text description of the desired image(s). The maximum length in 1000 characters. */
@NonNull String prompt;
/** The number of images to generate. Must be between 1 and 10. Defaults to 1. */
Integer n;
/**
* The size of the generated images. Must be one of "256x256", "512x512", or "1024x1024". Defaults
* to "1024x1024".
*/
String size;
/**
* The format in which the generated images are returned. Must be one of url or b64_json. Defaults
* to url.
*/
@JsonProperty("response_format")
String responseFormat;
/**
* A unique identifier representing your end-user, which will help OpenAI to monitor and detect
* abuse.
*/
String user;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/image/CreateImageVariationRequest.java
|
package ai.knowly.langtorch.llm.openai.schema.dto.image;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.*;
/**
* A request for OpenAi to create a variation of an image All fields are optional
*
* <p>https://beta.openai.com/docs/api-reference/images/create-variation
*/
@Builder
@NoArgsConstructor
@AllArgsConstructor
@Data
public class CreateImageVariationRequest {
/** The number of images to generate. Must be between 1 and 10. Defaults to 1. */
Integer n;
/**
* The size of the generated images. Must be one of "256x256", "512x512", or "1024x1024". Defaults
* to "1024x1024".
*/
String size;
/**
* The format in which the generated images are returned. Must be one of url or b64_json. Defaults
* to url.
*/
@JsonProperty("response_format")
String responseFormat;
/**
* A unique identifier representing your end-user, which will help OpenAI to monitor and detect
* abuse.
*/
String user;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/image/Image.java
|
package ai.knowly.langtorch.llm.openai.schema.dto.image;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.Data;
/**
* An object containing either a URL or a base 64 encoded image.
*
* <p>https://beta.openai.com/docs/api-reference/images
*/
@Data
public class Image {
/** The URL where the image can be accessed. */
String url;
/** Base64 encoded image string. */
@JsonProperty("b64_json")
String b64Json;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/image/ImageResult.java
|
package ai.knowly.langtorch.llm.openai.schema.dto.image;
import java.util.List;
import lombok.Data;
/**
* An object with a list of image results.
*
* <p>https://beta.openai.com/docs/api-reference/images
*/
@Data
public class ImageResult {
/** The creation time in epoch seconds. */
Long created;
/** List of image results. */
List<Image> data;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/moderation/Moderation.java
|
package ai.knowly.langtorch.llm.openai.schema.dto.moderation;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.Data;
/**
* An object containing the moderation data for a single input string
*
* <p>https://beta.openai.com/docs/api-reference/moderations/create
*/
@Data
public class Moderation {
/**
* Set to true if the model classifies the content as violating OpenAI's content policy, false
* otherwise
*/
private boolean flagged;
/**
* Object containing per-category binary content policy violation flags. For each category, the
* value is true if the model flags the corresponding category as violated, false otherwise.
*/
private ModerationCategories categories;
/**
* Object containing per-category raw scores output by the model, denoting the model's confidence
* that the input violates the OpenAI's policy for the category. The value is between 0 and 1,
* where higher values denote higher confidence. The scores should not be interpreted as
* probabilities.
*/
@JsonProperty("category_scores")
private ModerationCategoryScores categoryScores;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/moderation/ModerationCategories.java
|
package ai.knowly.langtorch.llm.openai.schema.dto.moderation;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.Data;
/**
* An object containing the flags for each moderation category
*
* <p>https://beta.openai.com/docs/api-reference/moderations/create
*/
@Data
public class ModerationCategories {
private boolean hate;
@JsonProperty("hate/threatening")
private boolean hateThreatening;
@JsonProperty("self-harm")
private boolean selfHarm;
private boolean sexual;
@JsonProperty("sexual/minors")
private boolean sexualMinors;
private boolean violence;
@JsonProperty("violence/graphic")
private boolean violenceGraphic;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/moderation/ModerationCategoryScores.java
|
package ai.knowly.langtorch.llm.openai.schema.dto.moderation;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.Data;
/**
* An object containing the scores for each moderation category
*
* <p>https://beta.openai.com/docs/api-reference/moderations/create
*/
@Data
public class ModerationCategoryScores {
private double hate;
@JsonProperty("hate/threatening")
private double hateThreatening;
@JsonProperty("self-harm")
private double selfHarm;
private double sexual;
@JsonProperty("sexual/minors")
private double sexualMinors;
private double violence;
@JsonProperty("violence/graphic")
private double violenceGraphic;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/moderation/ModerationRequest.java
|
package ai.knowly.langtorch.llm.openai.schema.dto.moderation;
import lombok.*;
/**
* A request for OpenAi to detect if text violates OpenAi's content policy.
*
* <p>https://beta.openai.com/docs/api-reference/moderations/create
*/
@Builder
@NoArgsConstructor
@AllArgsConstructor
@Data
public class ModerationRequest {
/** The input text to classify. */
@NonNull String input;
/** The name of the model to use, defaults to text-moderation-stable. */
String model;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/dto/moderation/ModerationResult.java
|
package ai.knowly.langtorch.llm.openai.schema.dto.moderation;
import java.util.List;
import lombok.Data;
/**
* An object containing a response from the moderation api
*
* <p>https://beta.openai.com/docs/api-reference/moderations/create
*/
@Data
public class ModerationResult {
/** A unique id assigned to this moderation. */
private String id;
/** The model used. */
private String model;
/** A list of moderation scores. */
private List<Moderation> results;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/exception/OpenAIApiExecutionException.java
|
package ai.knowly.langtorch.llm.openai.schema.exception;
/**
* The class defines a custom exception for errors that occur during the execution of OpenAI API.
*/
public class OpenAIApiExecutionException extends RuntimeException {
public OpenAIApiExecutionException(Exception e) {
super(e);
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/schema/exception/OpenAIServiceInterruptedException.java
|
package ai.knowly.langtorch.llm.openai.schema.exception;
/** The class defines a custom exception for interrupting OpenAI service. */
public class OpenAIServiceInterruptedException extends RuntimeException {
public OpenAIServiceInterruptedException(InterruptedException e) {
super(e);
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/tokenization/Encodings.java
|
package ai.knowly.langtorch.llm.openai.tokenization;
import ai.knowly.langtorch.llm.openai.util.OpenAIModel;
import com.google.common.collect.ImmutableMap;
import com.knuddels.jtokkit.api.Encoding;
import com.knuddels.jtokkit.api.EncodingRegistry;
import com.knuddels.jtokkit.api.ModelType;
import lombok.AccessLevel;
import lombok.AllArgsConstructor;
/**
* The class Encodings contains a static map of OpenAI models and their corresponding encodings
* obtained from a default encoding registry.
*/
@AllArgsConstructor(access = AccessLevel.PRIVATE)
public class Encodings {
private static final EncodingRegistry registry =
com.knuddels.jtokkit.Encodings.newDefaultEncodingRegistry();
public static final ImmutableMap<OpenAIModel, Encoding> ENCODING_BY_MODEL =
ImmutableMap.of(
OpenAIModel.GPT_3_5_TURBO,
registry.getEncodingForModel(ModelType.GPT_3_5_TURBO),
OpenAIModel.GPT_3_5_TURBO_16K,
registry.getEncodingForModel(ModelType.GPT_3_5_TURBO),
OpenAIModel.GPT_3_5_TURBO_0613,
registry.getEncodingForModel(ModelType.GPT_3_5_TURBO),
OpenAIModel.GPT_3_5_TURBO_16K_0613,
registry.getEncodingForModel(ModelType.GPT_3_5_TURBO),
OpenAIModel.GPT_4,
registry.getEncodingForModel(ModelType.GPT_4),
OpenAIModel.GPT_4_0613,
registry.getEncodingForModel(ModelType.GPT_4),
OpenAIModel.GPT_4_32K,
registry.getEncodingForModel(ModelType.GPT_4_32K),
OpenAIModel.GPT_4_32K_0613,
registry.getEncodingForModel(ModelType.GPT_4_32K));
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/tokenization/OpenAITokenizer.java
|
package ai.knowly.langtorch.llm.openai.tokenization;
import ai.knowly.langtorch.llm.openai.util.OpenAIModel;
import ai.knowly.langtorch.schema.chat.ChatMessage;
import com.google.common.collect.ImmutableList;
import java.util.List;
import java.util.Objects;
import lombok.AccessLevel;
import lombok.AllArgsConstructor;
/**
* Tokenizer for OpenAI models. It's currently not used as it's provided by OpenAI rest response.
* Will need this when we support streaming response.
*/
@AllArgsConstructor(access = AccessLevel.PRIVATE)
public class OpenAITokenizer {
private static final ImmutableList<OpenAIModel> GPT_3_MODELS =
ImmutableList.of(
OpenAIModel.GPT_3_5_TURBO,
OpenAIModel.GPT_3_5_TURBO_16K,
OpenAIModel.GPT_3_5_TURBO_0613,
OpenAIModel.GPT_3_5_TURBO_16K_0613);
private static final ImmutableList<OpenAIModel> GPT_4_MODELS =
ImmutableList.of(
OpenAIModel.GPT_4,
OpenAIModel.GPT_4_0613,
OpenAIModel.GPT_4_32K,
OpenAIModel.GPT_4_32K_0613);
/**
* This Java function encodes a given text using a specified OpenAI model and returns the
* generated embeddding.
*
* @param model The model parameter is an instance of the OpenAIModel class, which represents the
* OpenAI language model being used for encoding the text.
* @param text The text parameter is a string that represents the input text that needs to be
* encoded using the specified OpenAIModel.
* @return Embedding: a List of Integers is being returned.
*/
public static List<Integer> encode(OpenAIModel model, String text) {
return Objects.requireNonNull(Encodings.ENCODING_BY_MODEL.get(model)).encode(text);
}
/**
* This Java function decodes the generated embedding back to text using a specified OpenAI model.
*
* @param model The parameter "model" is an instance of the OpenAIModel class, which represents a
* pre-trained language model provided by OpenAI. It is used to decode a list of integer
* tokens into a human-readable string.
* @param tokens The `tokens` parameter is a list of integers representing a sequence of tokens.
* These tokens are typically generated by a language model and can be used to represent
* words, phrases, or other units of text. The `decode` method takes these tokens and returns
* a string representation of the original text
* @return The method is returning a decoded string.
*/
public static String decode(OpenAIModel model, List<Integer> tokens) {
return Objects.requireNonNull(Encodings.ENCODING_BY_MODEL.get(model)).decode(tokens);
}
/**
* This Java function returns the number of tokens in a given text after encoding it using an
* OpenAI model.
*
* @param model The OpenAIModel object that represents the pre-trained language model used for
* encoding the text.
* @param text The `text` parameter is a string that represents the input text for which we want
* to generate a token number.
* @return The method `getTokenNumber` is returning the number of tokens generated by the OpenAI
* model for the given input text. The `encode` method is used to generate the tokens and the
* `size()` method is used to get the number of tokens.
*/
public static long getTokenNumber(OpenAIModel model, String text) {
return encode(model, text).size();
}
/**
* The function calculates the number of tokens in a list of chat messages based on the OpenAI
* model.
*
* @param model The OpenAI model being used for generating text.
* @param messages A list of ChatMessage objects representing the conversation messages.
* @return The method is returning a long value which represents the total number of tokens in the
* given list of ChatMessage objects.
* <p>The algorithm for counting tokens is based on
* https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
*/
public static long getTokenNumber(OpenAIModel model, List<ChatMessage> messages) {
int tokensPerMessage = 0;
int tokensPerName = 0;
if (GPT_3_MODELS.contains(model)) {
// Every message follows <|start|>{role/name}\n{content}<|end|>\n
tokensPerMessage = 4;
// If there's a name, the role is omitted
tokensPerName = -1;
} else if (GPT_4_MODELS.contains(model)) {
// Every message follows <|start|>{role/name}\n{content}<|end|>\n
tokensPerMessage = 3;
// If there's a name, the role is omitted
tokensPerName = 1;
} else {
throw new UnsupportedOperationException("You model is not supported yet for token counting.");
}
int numberOfTokens = 0;
for (ChatMessage message : messages) {
numberOfTokens += tokensPerMessage;
numberOfTokens += encode(model, message.getContent()).size();
numberOfTokens += encode(model, message.getRole().name()).size();
numberOfTokens += encode(model, message.getName()).size() + tokensPerName;
}
// Every reply is primed with <|start|>assistant<|message|>
numberOfTokens += 3;
return numberOfTokens;
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/llm/openai/util/OpenAIModel.java
|
package ai.knowly.langtorch.llm.openai.util;
import lombok.AllArgsConstructor;
import lombok.Getter;
// This is a Java enum class called `OpenAIModel` that defines a list of constants representing
// different OpenAI models. Each constant has a corresponding `String` value that represents the
// name
// of the model.
@Getter
@AllArgsConstructor
public enum OpenAIModel {
GPT_3_5_TURBO("gpt-3.5-turbo"),
GPT_3_5_TURBO_0613("gpt-3.5-turbo-0613"),
GPT_3_5_TURBO_16K("gpt-3.5-turbo-16k"),
GPT_3_5_TURBO_16K_0613("gpt-3.5-turbo-16k-0613"),
GPT_4("gpt-4"),
GPT_4_32K("gpt-4-32k"),
GPT_4_0613("gpt-4-0613"),
GPT_4_32K_0613("gpt-4-32k-0613");
private String value;
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/preprocessing
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/preprocessing/parser/ChatMessageToStringParser.java
|
package ai.knowly.langtorch.preprocessing.parser;
import ai.knowly.langtorch.schema.chat.ChatMessage;
/** Implements a parser to convert a ChatMessage object to a String by returning its content. */
public class ChatMessageToStringParser implements Parser<ChatMessage, String> {
private ChatMessageToStringParser() {
super();
}
public static ChatMessageToStringParser create() {
return new ChatMessageToStringParser();
}
@Override
public String parse(ChatMessage chatMessage) {
return chatMessage.getContent();
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/preprocessing
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/preprocessing/parser/MiniMaxStringToMultiChatMessageParser.java
|
package ai.knowly.langtorch.preprocessing.parser;
import ai.knowly.langtorch.schema.chat.MiniMaxUserMessage;
import ai.knowly.langtorch.schema.text.MultiChatMessage;
/**
* @author maxiao
* @date 2023/06/14
*/
public final class MiniMaxStringToMultiChatMessageParser
implements Parser<String, MultiChatMessage> {
private MiniMaxStringToMultiChatMessageParser() {
super();
}
public static MiniMaxStringToMultiChatMessageParser create() {
return new MiniMaxStringToMultiChatMessageParser();
}
@Override
public MultiChatMessage parse(String content) {
return MultiChatMessage.of(MiniMaxUserMessage.of(content));
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/preprocessing
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/preprocessing/parser/Parser.java
|
package ai.knowly.langtorch.preprocessing.parser;
// This code defines a functional interface named `Parser` with two generic type parameters `T` and
// `R`. It has a single abstract method `parse` that takes an input of type `T` and returns a result
// of
// type `R`.
@FunctionalInterface
public interface Parser<T, R> {
R parse(T input);
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/preprocessing
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/preprocessing/parser/PromptTemplateToSingleTextParser.java
|
package ai.knowly.langtorch.preprocessing.parser;
import ai.knowly.langtorch.schema.text.SingleText;
import ai.knowly.langtorch.prompt.template.PromptTemplate;
/**
* The PromptTemplateToSingleTextParser class that converts a PromptTemplate object into a
* SingleText object by using the format method of the input.
*/
public class PromptTemplateToSingleTextParser implements Parser<PromptTemplate, SingleText> {
private PromptTemplateToSingleTextParser() {
super();
}
public static PromptTemplateToSingleTextParser create() {
return new PromptTemplateToSingleTextParser();
}
@Override
public SingleText parse(PromptTemplate input) {
return SingleText.of(input.format());
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/preprocessing
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/preprocessing/parser/SingleTextToStringParser.java
|
package ai.knowly.langtorch.preprocessing.parser;
import ai.knowly.langtorch.schema.text.SingleText;
/**
* The SingleTextToStringParser class implements the Parser interface to parse a SingleText object
* into a String.
*/
public class SingleTextToStringParser implements Parser<SingleText, String> {
private SingleTextToStringParser() {
super();
}
public static SingleTextToStringParser create() {
return new SingleTextToStringParser();
}
@Override
public String parse(SingleText input) {
return input.getText();
}
}
|
0
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/preprocessing
|
java-sources/ai/knowly/langtorch/0.0.17/ai/knowly/langtorch/preprocessing/parser/StringToMultiChatMessageParser.java
|
package ai.knowly.langtorch.preprocessing.parser;
import ai.knowly.langtorch.schema.chat.UserMessage;
import ai.knowly.langtorch.schema.text.MultiChatMessage;
/** This is a Java class that parses a string into a MultiChatMessage object. */
public final class StringToMultiChatMessageParser implements Parser<String, MultiChatMessage> {
private StringToMultiChatMessageParser() {
super();
}
public static StringToMultiChatMessageParser create() {
return new StringToMultiChatMessageParser();
}
@Override
public MultiChatMessage parse(String content) {
return MultiChatMessage.of(UserMessage.of(content));
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.