uncloseai.
Java Examples - Free LLM & TTS AI Service
Java Examples
This page demonstrates how to use the uncloseai. API endpoints with Java using the official OpenAI Java library. All examples use the same OpenAI-compatible API interface, making it easy to switch between different models and endpoints.
Available Endpoints:
- Hermes:
https://hermes.ai.unturf.com/v1
- General purpose conversational AI - Qwen 3 Coder:
https://qwen.ai.unturf.com/v1
- Specialized coding model - TTS:
https://speech.ai.unturf.com/v1
- Text-to-speech generation
Java Client Installation
Add the official OpenAI Java library to your Maven pom.xml
:
<dependency>
<groupId>com.openai</groupId>
<artifactId>openai-java</artifactId>
<version>4.3.0</version>
</dependency>
Or for Gradle:
implementation 'com.openai:openai-java:4.3.0'
Non-Streaming Examples
Non-streaming mode waits for the complete response before returning. This is simpler to use but provides no intermediate feedback during generation.
Using Hermes (General Purpose)
import com.openai.client.OpenAIClient;
import com.openai.client.okhttp.OpenAIOkHttpClient;
import com.openai.models.*;
import java.util.List;
public class HermesExample {
public static void main(String[] args) {
OpenAIClient client = OpenAIOkHttpClient.builder()
.apiKey("choose-any-value")
.baseURL("https://hermes.ai.unturf.com/v1")
.build();
ChatCompletionCreateParams params = ChatCompletionCreateParams.builder()
.model("adamo1139/Hermes-3-Llama-3.1-8B-FP8-Dynamic")
.addMessage(ChatCompletionMessageParam.ofChatCompletionUserMessageParam(
ChatCompletionUserMessageParam.builder()
.role(ChatCompletionUserMessageParam.Role.USER)
.content(ChatCompletionUserMessageParam.Content.ofTextContent(
"Give a Python Fizzbuzz solution in one line of code?"
))
.build()
))
.temperature(0.5)
.maxTokens(150L)
.build();
ChatCompletion completion = client.chat().completions().create(params);
System.out.println(completion.choices().get(0).message().content().get());
}
}
Using Qwen 3 Coder (Specialized for Coding)
import com.openai.client.OpenAIClient;
import com.openai.client.okhttp.OpenAIOkHttpClient;
import com.openai.models.*;
import java.util.List;
public class QwenExample {
public static void main(String[] args) {
OpenAIClient client = OpenAIOkHttpClient.builder()
.apiKey("choose-any-value")
.baseURL("https://qwen.ai.unturf.com/v1")
.build();
ChatCompletionCreateParams params = ChatCompletionCreateParams.builder()
.model("hf.co/unsloth/Qwen3-Coder-30B-A3B-Instruct-GGUF:Q4_K_M")
.addMessage(ChatCompletionMessageParam.ofChatCompletionUserMessageParam(
ChatCompletionUserMessageParam.builder()
.role(ChatCompletionUserMessageParam.Role.USER)
.content(ChatCompletionUserMessageParam.Content.ofTextContent(
"Give a Python Fizzbuzz solution in one line of code?"
))
.build()
))
.temperature(0.5)
.maxTokens(150L)
.build();
ChatCompletion completion = client.chat().completions().create(params);
System.out.println(completion.choices().get(0).message().content().get());
}
}
Streaming Examples
Streaming mode returns chunks of the response as they are generated, providing real-time feedback. This is ideal for interactive applications and long responses.
Using Hermes (General Purpose)
import com.openai.client.OpenAIClient;
import com.openai.client.okhttp.OpenAIOkHttpClient;
import com.openai.models.*;
public class HermesStreamExample {
public static void main(String[] args) {
OpenAIClient client = OpenAIOkHttpClient.builder()
.apiKey("choose-any-value")
.baseURL("https://hermes.ai.unturf.com/v1")
.build();
ChatCompletionCreateParams params = ChatCompletionCreateParams.builder()
.model("adamo1139/Hermes-3-Llama-3.1-8B-FP8-Dynamic")
.addMessage(ChatCompletionMessageParam.ofChatCompletionUserMessageParam(
ChatCompletionUserMessageParam.builder()
.role(ChatCompletionUserMessageParam.Role.USER)
.content(ChatCompletionUserMessageParam.Content.ofTextContent(
"Give a Python Fizzbuzz solution in one line of code?"
))
.build()
))
.temperature(0.5)
.maxTokens(150L)
.build();
client.chat().completions().createStreaming(params)
.forEach(chunk -> {
String content = chunk.choices().get(0).delta().content().orElse("");
if (!content.isEmpty()) {
System.out.print(content);
}
});
}
}
Using Qwen 3 Coder (Specialized for Coding)
import com.openai.client.OpenAIClient;
import com.openai.client.okhttp.OpenAIOkHttpClient;
import com.openai.models.*;
public class QwenStreamExample {
public static void main(String[] args) {
OpenAIClient client = OpenAIOkHttpClient.builder()
.apiKey("choose-any-value")
.baseURL("https://qwen.ai.unturf.com/v1")
.build();
ChatCompletionCreateParams params = ChatCompletionCreateParams.builder()
.model("hf.co/unsloth/Qwen3-Coder-30B-A3B-Instruct-GGUF:Q4_K_M")
.addMessage(ChatCompletionMessageParam.ofChatCompletionUserMessageParam(
ChatCompletionUserMessageParam.builder()
.role(ChatCompletionUserMessageParam.Role.USER)
.content(ChatCompletionUserMessageParam.Content.ofTextContent(
"Give a Python Fizzbuzz solution in one line of code?"
))
.build()
))
.temperature(0.5)
.maxTokens(150L)
.build();
client.chat().completions().createStreaming(params)
.forEach(chunk -> {
String content = chunk.choices().get(0).delta().content().orElse("");
if (!content.isEmpty()) {
System.out.print(content);
}
});
}
}
Text-to-Speech Example
Generate audio speech from text using the TTS endpoint. The audio is saved as an MP3 file.
import com.openai.client.OpenAIClient;
import com.openai.client.okhttp.OpenAIOkHttpClient;
import com.openai.models.*;
import java.io.FileOutputStream;
import java.io.IOException;
public class TTSExample {
public static void main(String[] args) throws IOException {
OpenAIClient client = OpenAIOkHttpClient.builder()
.apiKey("YOLO")
.baseURL("https://speech.ai.unturf.com/v1")
.build();
SpeechCreateParams params = SpeechCreateParams.builder()
.model(SpeechModel.TTS_1)
.voice(SpeechCreateParams.Voice.ALLOY)
.input("I think so therefore, Today is a wonderful day to grow something people love!")
.speed(0.9)
.build();
byte[] audioBytes = client.audio().speech().create(params);
try (FileOutputStream fos = new FileOutputStream("speech.mp3")) {
fos.write(audioBytes);
}
}
}