uncloseai.

Node.js Examples - Free LLM & TTS AI Service

Node.js Examples

This page demonstrates how to use the uncloseai. API endpoints with Node.js using the OpenAI client library. All examples use the same OpenAI-compatible API interface, making it easy to switch between different models and endpoints.

Available Endpoints:

Node.js Client Installation

To install the OpenAI package for Node.js, you can use npm in your package.json:

{
  "dependencies": {
    "openai": "6.3.0"
  }
}

Run the following command to install it:

npm install

Non-Streaming Examples

Non-streaming mode waits for the complete response before returning. This is simpler to use but provides no intermediate feedback during generation.

Using Hermes (General Purpose)

const OpenAI = require('openai');

const client = new OpenAI({
    baseURL: "https://hermes.ai.unturf.com/v1",
    apiKey: "dummy-api-key",
});

const MODEL = "adamo1139/Hermes-3-Llama-3.1-8B-FP8-Dynamic";

const messages = [{"role": "user", "content": "Give a Python Fizzbuzz solution in one line of code?"}];

async function getResponse() {
    try {
        const response = await client.chat.completions.create({
            model: MODEL,
            messages: messages,
            temperature: 0.5,
            max_tokens: 150,
        });

        console.log(response.choices[0].message.content);
    } catch (error) {
        console.error("Error:", error.response ? error.response.data : error.message);
    }
}

getResponse();

Using Qwen 3 Coder (Specialized for Coding)

const OpenAI = require('openai');

const client = new OpenAI({
    baseURL: "https://qwen.ai.unturf.com/v1",
    apiKey: "dummy-api-key",
});

const MODEL = "hf.co/unsloth/Qwen3-Coder-30B-A3B-Instruct-GGUF:Q4_K_M";

const messages = [{"role": "user", "content": "Give a Python Fizzbuzz solution in one line of code?"}];

async function getResponse() {
    try {
        const response = await client.chat.completions.create({
            model: MODEL,
            messages: messages,
            temperature: 0.5,
            max_tokens: 150,
        });

        console.log(response.choices[0].message.content);
    } catch (error) {
        console.error("Error:", error.response ? error.response.data : error.message);
    }
}

getResponse();

Streaming Examples

Streaming mode returns chunks of the response as they are generated, providing real-time feedback. This is ideal for interactive applications and long responses.

Using Hermes (General Purpose)

const OpenAI = require('openai');

const client = new OpenAI({
    baseURL: "https://hermes.ai.unturf.com/v1",
    apiKey: "dummy-api-key",
});

const MODEL = "adamo1139/Hermes-3-Llama-3.1-8B-FP8-Dynamic";

const messages = [{"role": "user", "content": "Give a Python Fizzbuzz solution in one line of code?"}];

async function streamResponse() {
    try {
        const stream = await client.chat.completions.create({
            model: MODEL,
            messages: messages,
            temperature: 0.5,
            max_tokens: 150,
            stream: true,  // Enable streaming
        });

        // Use async iterator to read each chunk
        for await (const chunk of stream) {
            const msg = chunk.choices[0].delta.content;
            process.stdout.write(msg);  // Print each chunk as it arrives
        }
    } catch (error) {
        console.error("Error:", error.response ? error.response.data : error.message);
    }
}

streamResponse();

Using Qwen 3 Coder (Specialized for Coding)

const OpenAI = require('openai');

const client = new OpenAI({
    baseURL: "https://qwen.ai.unturf.com/v1",
    apiKey: "dummy-api-key",
});

const MODEL = "hf.co/unsloth/Qwen3-Coder-30B-A3B-Instruct-GGUF:Q4_K_M";

const messages = [{"role": "user", "content": "Give a Python Fizzbuzz solution in one line of code?"}];

async function streamResponse() {
    try {
        const stream = await client.chat.completions.create({
            model: MODEL,
            messages: messages,
            temperature: 0.5,
            max_tokens: 150,
            stream: true,  // Enable streaming
        });

        // Use async iterator to read each chunk
        for await (const chunk of stream) {
            const msg = chunk.choices[0].delta.content;
            process.stdout.write(msg);  // Print each chunk as it arrives
        }
    } catch (error) {
        console.error("Error:", error.response ? error.response.data : error.message);
    }
}

streamResponse();

Text-to-Speech Example

Generate audio speech from text using the TTS endpoint. The audio is saved as an MP3 file.

const OpenAI = require('openai');

const client = new OpenAI({
    baseURL: "https://speech.ai.unturf.com/v1",
    apiKey: "YOLO",
});

async function getSpeech() {
    try {
        const response = await client.audio.speech.with_streaming_response.create({
            model: "tts-1",
            voice: "alloy",
            speed: 0.9,
            input: "I think so therefore, Today is a wonderful day to grow something people love!"
        });

        response.stream_to_file("speech.mp3");
    } catch (error) {
        console.error("Error:", error.response ? error.response.data : error.message);
    }
}

getSpeech();