uncloseai.
Go Examples - Free LLM & TTS AI Service
Go Examples
This page demonstrates how to use the uncloseai. API endpoints with Go using the official OpenAI Go client library. All examples use the same OpenAI-compatible API interface, making it easy to switch between different models and endpoints.
Available Endpoints:
- Hermes:
https://hermes.ai.unturf.com/v1
- General purpose conversational AI - Qwen 3 Coder:
https://qwen.ai.unturf.com/v1
- Specialized coding model - TTS:
https://speech.ai.unturf.com/v1
- Text-to-speech generation
Go Client Installation
To install the official OpenAI Go package, use go get
:
go get github.com/openai/openai-go/v3@v3.3.0
Non-Streaming Examples
Non-streaming mode waits for the complete response before returning. This is simpler to use but provides no intermediate feedback during generation.
Using Hermes (General Purpose)
package main
import (
"context"
"fmt"
"github.com/openai/openai-go"
"github.com/openai/openai-go/option"
)
func main() {
client := openai.NewClient(
option.WithBaseURL("https://hermes.ai.unturf.com/v1"),
option.WithAPIKey("choose-any-value"),
)
response, err := client.Chat.Completions.New(context.Background(), openai.ChatCompletionNewParams{
Model: openai.F("adamo1139/Hermes-3-Llama-3.1-8B-FP8-Dynamic"),
Messages: openai.F([]openai.ChatCompletionMessageParamUnion{
openai.UserMessage("Give a Python Fizzbuzz solution in one line of code?"),
}),
Temperature: openai.Float(0.5),
MaxTokens: openai.Int(150),
})
if err != nil {
panic(err)
}
fmt.Println(response.Choices[0].Message.Content)
}
Using Qwen 3 Coder (Specialized for Coding)
package main
import (
"context"
"fmt"
"github.com/openai/openai-go"
"github.com/openai/openai-go/option"
)
func main() {
client := openai.NewClient(
option.WithBaseURL("https://qwen.ai.unturf.com/v1"),
option.WithAPIKey("choose-any-value"),
)
response, err := client.Chat.Completions.New(context.Background(), openai.ChatCompletionNewParams{
Model: openai.F("hf.co/unsloth/Qwen3-Coder-30B-A3B-Instruct-GGUF:Q4_K_M"),
Messages: openai.F([]openai.ChatCompletionMessageParamUnion{
openai.UserMessage("Give a Python Fizzbuzz solution in one line of code?"),
}),
Temperature: openai.Float(0.5),
MaxTokens: openai.Int(150),
})
if err != nil {
panic(err)
}
fmt.Println(response.Choices[0].Message.Content)
}
Streaming Examples
Streaming mode returns chunks of the response as they are generated, providing real-time feedback. This is ideal for interactive applications and long responses.
Using Hermes (General Purpose)
package main
import (
"context"
"fmt"
"github.com/openai/openai-go"
"github.com/openai/openai-go/option"
)
func main() {
client := openai.NewClient(
option.WithBaseURL("https://hermes.ai.unturf.com/v1"),
option.WithAPIKey("choose-any-value"),
)
stream := client.Chat.Completions.NewStreaming(context.Background(), openai.ChatCompletionNewParams{
Model: openai.F("adamo1139/Hermes-3-Llama-3.1-8B-FP8-Dynamic"),
Messages: openai.F([]openai.ChatCompletionMessageParamUnion{
openai.UserMessage("Give a Python Fizzbuzz solution in one line of code?"),
}),
Temperature: openai.Float(0.5),
MaxTokens: openai.Int(150),
})
for stream.Next() {
chunk := stream.Current()
if len(chunk.Choices) > 0 && chunk.Choices[0].Delta.Content != "" {
fmt.Print(chunk.Choices[0].Delta.Content)
}
}
if err := stream.Err(); err != nil {
panic(err)
}
}
Using Qwen 3 Coder (Specialized for Coding)
package main
import (
"context"
"fmt"
"github.com/openai/openai-go"
"github.com/openai/openai-go/option"
)
func main() {
client := openai.NewClient(
option.WithBaseURL("https://qwen.ai.unturf.com/v1"),
option.WithAPIKey("choose-any-value"),
)
stream := client.Chat.Completions.NewStreaming(context.Background(), openai.ChatCompletionNewParams{
Model: openai.F("hf.co/unsloth/Qwen3-Coder-30B-A3B-Instruct-GGUF:Q4_K_M"),
Messages: openai.F([]openai.ChatCompletionMessageParamUnion{
openai.UserMessage("Give a Python Fizzbuzz solution in one line of code?"),
}),
Temperature: openai.Float(0.5),
MaxTokens: openai.Int(150),
})
for stream.Next() {
chunk := stream.Current()
if len(chunk.Choices) > 0 && chunk.Choices[0].Delta.Content != "" {
fmt.Print(chunk.Choices[0].Delta.Content)
}
}
if err := stream.Err(); err != nil {
panic(err)
}
}
Text-to-Speech Example
Generate audio speech from text using the TTS endpoint. The audio is saved as an MP3 file.
package main
import (
"context"
"io"
"os"
"github.com/openai/openai-go"
"github.com/openai/openai-go/option"
)
func main() {
client := openai.NewClient(
option.WithBaseURL("https://speech.ai.unturf.com/v1"),
option.WithAPIKey("YOLO"),
)
response, err := client.Audio.Speech.New(context.Background(), openai.AudioSpeechNewParams{
Model: openai.F(openai.SpeechModelTTS1),
Voice: openai.F(openai.AudioSpeechNewParamsVoiceAlloy),
Input: openai.F("I think so therefore, Today is a wonderful day to grow something people love!"),
Speed: openai.Float(0.9),
})
if err != nil {
panic(err)
}
defer response.Body.Close()
file, err := os.Create("speech.mp3")
if err != nil {
panic(err)
}
defer file.Close()
_, err = io.Copy(file, response.Body)
if err != nil {
panic(err)
}
}