Building your own ChatGPT clone has never been easier. With the Fency SDK and Vercel AI Elements, you can deliver real-time AI chat experiences with elegant UI components to your users in minutes!
Here’s an example of what your clone will look like:

1. Create a new Vite project
Start with a fresh React + TypeScript project:
npm create vite@latest chatgpt-clone -- --template react-ts
cd chatgpt-clone
npm install
npm
You should now see the default Vite app running at http://localhost:5173
.
2. Add TailwindCSS
Install Tailwind and its Vite plugin:
Import tailwind at the very top of your index.css
:
3. Configure TypeScript paths required by shadcn
Add baseUrl
and paths
to compilerOptions
in tsconfig.json
:
{
"files": [],
"references": [
{ "path": "./tsconfig.app.json" },
{ "path": "./tsconfig.node.json" }
],
"compilerOptions": {
"baseUrl": ".",
"paths": {
"@/*": ["./src/*"]
}
}
}
Add the same baseUrl
and paths
to compilerOptions
in tsconfig.app.json
:
{
"compilerOptions": {
"tsBuildInfoFile": "./node_modules/.tmp/tsconfig.app.tsbuildinfo",
"target": "ES2022",
"useDefineForClassFields": true,
"lib": ["ES2022", "DOM", "DOM.Iterable"],
"module": "ESNext",
"types": ["vite/client"],
"skipLibCheck": true,
"moduleResolution": "bundler",
"allowImportingTsExtensions": true,
"verbatimModuleSyntax": true,
"moduleDetection": "force",
"noEmit": true,
"jsx": "react-jsx",
"strict": true,
"noUnusedLocals": true,
"noUnusedParameters": true,
"erasableSyntaxOnly": true,
"noFallthroughCasesInSwitch": true,
"noUncheckedSideEffectImports": true,
"baseUrl": ".",
"paths": {
"@/*": ["./src/*"]
}
},
"include": ["src"]
}
4. Configure Vite for Tailwind
Install @types/node
to enable path
and __dirname
.
npm install -D @types/node
Update vite.config.ts
so Tailwind and path aliases work:
import tailwindcss from '@tailwindcss/vite'
import react from '@vitejs/plugin-react'
import path from 'path'
import { defineConfig } from 'vite'
export default defineConfig({
plugins: [react(), tailwindcss()],
resolve: {
alias: {
'@': path.resolve(__dirname, './src'),
},
},
})
5. Setup shadcn and Vercel AI Elements
Initialize shadcn:
You will be asked a few questions to configure components.json
.
Add Vercel AI Elements:
Finally, update index.css
so the styling for streamdown
works correctly. The start of your index.css
should now look like this:
@import "tailwindcss";
@import "tw-animate-css";
@source "../node_modules/streamdown/dist/index.js";
@custom-variant dark (&:is(.dark *));
:root {
6. Add Fency.ai
We’ll use Fency to utilize LLMs in React. If you haven’t already:
Sign up at app.fency.ai/signup
Create a new publishable key in the dashboard
Install the npm packages:
Update main.tsx
to include the FencyProvider
and your newly created publishable key.
import { loadFency } from '@fencyai/js'
import { FencyProvider } from '@fencyai/react'
import { StrictMode } from 'react'
import { createRoot } from 'react-dom/client'
import App from './App.tsx'
import './index.css'
const fency = loadFency({
publishableKey: 'fency_pk_replace_with_your_own',
})
createRoot(document.getElementById('root')!).render(
<StrictMode>
<FencyProvider fency={fency}>
<App />
</FencyProvider>
</StrictMode>
)
7. Build the App
Update your App.tsx
to include a simple app that uses Vercel AI Elements to render a ChatGPT-like interface:
import {
Conversation,
ConversationContent,
ConversationEmptyState,
ConversationScrollButton,
} from '@/components/ai-elements/conversation'
import { Message, MessageContent } from '@/components/ai-elements/message'
import {
PromptInput,
PromptInputTextarea,
} from '@/components/ai-elements/prompt-input'
import { Response } from '@/components/ai-elements/response'
import { Suggestion, Suggestions } from '@/components/ai-elements/suggestion'
import { useStreamingChatCompletions } from '@fencyai/react'
import { MessageSquare } from 'lucide-react'
import { useMemo, useState } from 'react'
import { v4 as uuidv4 } from 'uuid'
interface Message {
id: string
from: 'user' | 'assistant'
content: string
}
export default function App() {
const [prompt, setPrompt] = useState('')
const { createStreamingChatCompletion, latest } =
useStreamingChatCompletions()
const messages = useMemo(() => {
const latestContext: Message[] =
latest?.prompt.generic?.messages.flatMap((message) => {
if (message.role === 'system') {
return []
}
return {
id: uuidv4(),
from: message.role,
content: message.content,
}
}) ?? []
if (latest?.response) {
latestContext.push({
id: uuidv4(),
from: 'assistant',
content: latest.response,
})
}
return latestContext
}, [latest])
const onSubmit = (input: string) => {
setPrompt('')
createStreamingChatCompletion({
generic: {
messages: [
...messages.map((message) => ({
role: message.from,
content: message.content,
})),
{ role: 'user', content: input },
],
model: 'gpt-4.1-nano',
},
})
}
const suggestions = [
'Show me a table of 5 famous actors and their most popular movies.',
'What are the latest trends in AI?',
]
return (
<div className="w-screen h-screen flex justify-center bg-gray-100">
<div className="max-w-4xl h-[60vh] bg-white rounded-t-lg mt-4">
<Conversation style={{ height: '100%' }}>
<ConversationContent>
{messages.length === 0 ? (
<ConversationEmptyState
icon={<MessageSquare className="size-12" />}
title="No messages yet"
description="Start a conversation to see messages here"
/>
) : (
messages.map((message) => (
<Message from={message.from} key={message.id}>
<MessageContent>
<Response>{message.content}</Response>
</MessageContent>
</Message>
))
)}
</ConversationContent>
<ConversationScrollButton />
</Conversation>
<Suggestions className="w-full mx-auto relative bg-white p-4">
{suggestions.map((suggestion) => (
<Suggestion
key={suggestion}
suggestion={suggestion}
onClick={() => onSubmit(suggestion)}
/>
))}
</Suggestions>
<PromptInput
onSubmit={() => onSubmit(prompt)}
className="w-full mx-auto relative bg-white p-4 rounded-b-lg"
>
<PromptInputTextarea
value={prompt}
placeholder="Say something..."
onChange={(e) => setPrompt(e.currentTarget.value)}
className="pr-12"
/>
</PromptInput>
</div>
</div>
)
}
8. Try It Out 🚀
Run the app with npm run dev
Try out your very own ChatGPT clone!
The complete codebase is available at https://github.com/fencyai/chatgpt-clone-example