Ship production-ready AI features

In minutes, not months

In minutes, not months

No backend needed

You only need this

import { useBasicChatCompletions } from '@fencyai/react'

export default function BasicChatCompletionExample() {
    const { createBasicChatCompletion, latest } = useBasicChatCompletions()

    const response = latest?.data?.response
    const error = latest?.error
    const loading = latest?.loading

    return (
        <div>
            <button
                onClick={async () => {
                    await createBasicChatCompletion({
                        openai: {
                            model: 'gpt-4o-mini',
                            messages: [
                                {
                                    role: 'user',
                                    content: 'Hello, how are you?',
                                },
                            ],
                        },
                    })
                }}
                disabled={loading}
            >
                Send Message
            </button>
            {error && <div>{error.message}</div>}
            {response && <div>{response}</div>}
        </div>
    )
}

import { useBasicChatCompletions } from '@fencyai/react'

export default function BasicChatCompletionExample() {
    const { createBasicChatCompletion, latest } = useBasicChatCompletions()

    const response = latest?.data?.response
    const error = latest?.error
    const loading = latest?.loading

    return (
        <div>
            <button
                onClick={async () => {
                    await createBasicChatCompletion({
                        openai: {
                            model: 'gpt-4o-mini',
                            messages: [
                                {
                                    role: 'user',
                                    content: 'Hello, how are you?',
                                },
                            ],
                        },
                    })
                }}
                disabled={loading}
            >
                Send Message
            </button>
            {error && <div>{error.message}</div>}
            {response && <div>{response}</div>}
        </div>
    )
}

import { useBasicChatCompletions } from '@fencyai/react'

export default function BasicChatCompletionExample() {
    const { createBasicChatCompletion, latest } = useBasicChatCompletions()

    const response = latest?.data?.response
    const error = latest?.error
    const loading = latest?.loading

    return (
        <div>
            <button
                onClick={async () => {
                    await createBasicChatCompletion({
                        openai: {
                            model: 'gpt-4o-mini',
                            messages: [
                                {
                                    role: 'user',
                                    content: 'Hello, how are you?',
                                },
                            ],
                        },
                    })
                }}
                disabled={loading}
            >
                Send Message
            </button>
            {error && <div>{error.message}</div>}
            {response && <div>{response}</div>}
        </div>
    )
}

Start for free

Start for free

And go pro later, alligator

And go pro later, alligator

Hobby

Free

Pay only for what you use

Access models from OpenAI, Anthropic, Gemini++

Perfect for prototyping

Get started

Pro (coming soon)

$49 / month

Authenticated chat completions

Connect chat history to users

Track token usage for users

Production-ready

Get started

FAQ

Can I stream responses?

Yes! Streaming responses can be very tedious to set up yourself, but with Fency it's supported out of the box.

Can I stream responses?

Yes! Streaming responses can be very tedious to set up yourself, but with Fency it's supported out of the box.

Can I stream responses?

Yes! Streaming responses can be very tedious to set up yourself, but with Fency it's supported out of the box.

Can I use structured responses?

Yes! We allow you to pass a Zod-object to the chat completion, giving you full control of the type that is being returned.

Can I use structured responses?

Yes! We allow you to pass a Zod-object to the chat completion, giving you full control of the type that is being returned.

Can I use structured responses?

Yes! We allow you to pass a Zod-object to the chat completion, giving you full control of the type that is being returned.

Can I extract text from files?

Yes! We provide a file uploads API where you can upload your files. The text content of the file is automatically extracted for easy use with LLMs.

Can I extract text from files?

Yes! We provide a file uploads API where you can upload your files. The text content of the file is automatically extracted for easy use with LLMs.

Can I extract text from files?

Yes! We provide a file uploads API where you can upload your files. The text content of the file is automatically extracted for easy use with LLMs.

Can I scrape websites?

Yes! We provide a scraping API where you can extract the raw HTML or text content of a website for easy use with LLMs.

Can I scrape websites?

Yes! We provide a scraping API where you can extract the raw HTML or text content of a website for easy use with LLMs.

Can I scrape websites?

Yes! We provide a scraping API where you can extract the raw HTML or text content of a website for easy use with LLMs.

No backend? How does that work?

We give you a publishable key that accesses the Fency API. This allows you to make chat completions without having to handle your own secret API key. You set hard spending limits on your publishable keys to avoid surprise costs.

No backend? How does that work?

We give you a publishable key that accesses the Fency API. This allows you to make chat completions without having to handle your own secret API key. You set hard spending limits on your publishable keys to avoid surprise costs.

No backend? How does that work?

We give you a publishable key that accesses the Fency API. This allows you to make chat completions without having to handle your own secret API key. You set hard spending limits on your publishable keys to avoid surprise costs.