Reputation: 11
After I ask a question to the ai, and the ai responds. The information is not being saved to the database.I've tried adding console.log statements in my route.ts file and it does seem like the messages given from ai and from the user are being saved to the database, but when I refresh my application the messages aren't saved.
"use client";
import React, { useEffect } from "react";
import { Input } from "./ui/input";
import { useChat } from "ai/react";
import { Button } from "./ui/button";
import { Send } from "lucide-react";
import MessageList from "./MessageList";
import { useQuery } from "@tanstack/react-query";
import axios from "axios";
import { Message } from "ai";
type Props = {chatId: number};
const ChatComponent = ({chatId}: Props) => {
const { data } = useQuery({
queryKey: ["chat", chatId],
queryFn: async () => {
const response = await axios.post<Message[]>("/api/get-messages", chatId)
return response.data
}
})
const { input, handleInputChange, handleSubmit, messages } = useChat({
api: '/api/chat',
body: {
chatId
},
initialMessages: Array.isArray(data) ? data : []
});
useEffect(() => {
const messageContainer = document.getElementById('message-container')
if (messageContainer) {
messageContainer.scrollTo({
top: messageContainer.scrollHeight,
behavior: 'smooth'
})
}
}, [messages])
return (
<div className="relative max-h-screen overflow-scroll" id='message-container'>
<div className="sticky top-0 inset-x-0 p-2 bg-white h-fit">
<h3 className="text-xl font-bold">Chat</h3>
</div>
<MessageList messages={messages}/>
<form
onSubmit={handleSubmit}
className="sticky bottom-0 inset-x-0 px-2 py-4 bg-white"
>
<div className="flex">
<Input
value={input}
onChange={handleInputChange}
placeholder="Ask any Question..."
className="w-full"
/>
<Button className="bg-blue-600 ml-2 relative ">
<Send className="w-4 h-4" />
</Button>
</div>
</form>
</div>
);
};
export default ChatComponent;
import {Configuration, OpenAIApi} from 'openai-edge'
import {OpenAIStream, StreamingTextResponse, Message} from 'ai'
import { getContext } from '@/lib/context'
import { db } from '@/lib/db'
import { chats, messages as _messages } from '@/lib/db/schema'
import { eq } from 'drizzle-orm'
import { NextResponse } from 'next/server'
//makes it faster when deployed to vercel
export const runtime = 'edge'
const config = new Configuration({
apiKey: process.env.OPENAI_API_KEY
})
const openai = new OpenAIApi(config)
export async function POST(req: Request) {
try {
const { messages, chatId } = await req.json()
const _chats = await db.select().from(chats).where(eq(chats.id, chatId))
if (_chats.length !=1) {
return NextResponse.json({ 'error': 'chat not found' }, { status: 404 } )
}
const fileKey = _chats[0].fileKey
const lastmessage = messages[messages.length - 1]
const context = await getContext(lastmessage.content, fileKey)
const prompt = {
role: "system",
content: `AI assistant is a brand new, powerful, human-like artificial intelligence.
The traits of AI include expert knowledge, helpfulness, cleverness, and articulateness.
AI is a well-behaved and well-mannered individual.
AI is always friendly, kind, and inspiring, and he is eager to provide vivid and thoughtful responses to the user.
AI has the sum of all knowledge in their brain, and is able to accurately answer nearly any question about any topic in conversation.
AI assistant is a big fan of Pinecone and Vercel.
START CONTEXT BLOCK
${context}
END OF CONTEXT BLOCK
AI assistant will take into account any CONTEXT BLOCK that is provided in a conversation.
If the context does not provide the answer to question, the AI assistant will say, "I'm sorry, but I don't know the answer to that question".
AI assistant will not apologize for previous responses, but instead will indicated new information was gained.
AI assistant will not invent anything that is not drawn directly from the context.
`,
};
const response = await openai.createChatCompletion({
model: 'gpt-3.5-turbo',
messages: [
prompt,
...messages.filter((message: Message) => message.role === 'user')
],
stream: true
})
const stream = OpenAIStream(response, {
onStart: async () => {
//save user message into db
await db.insert(_messages).values({
chatId,
messageContent: lastmessage.content,
role: "user"
}
)
console.log("saved user message to db")
},
onCompletion: async (completion) => {
//save ai message into db
await db.insert(_messages).values({
chatId,
messageContent: completion,
role: "system"
})
console.log("saved ai message to db")
}
})
return new StreamingTextResponse(stream)
} catch (error) {
console.log('error in /api/chat', error)
}
}
import {integer, pgEnum, pgTable, serial, text, timestamp, varchar} from 'drizzle-orm/pg-core'
export const userSystemEnum = pgEnum('user_system_enum', ['system', 'user'])
export const chats = pgTable('chats', {
id: serial('id').primaryKey(),
pdfName: text('pdf_name').notNull(),
pdfUrl: text('pdf_url').notNull(),
createdAt: timestamp('created_at').notNull().defaultNow(),
userId: varchar('user_id', {length:256}).notNull(),
fileKey: text('file_key').notNull(),
})
export type DrizzleChat = typeof chats.$inferSelect
export const messages = pgTable('messages', {
id: serial('id').primaryKey(),
chatId: integer('chat_id').references(() => chats.id).notNull(),
messageContent: text('content').notNull(),
createdAt: timestamp('created_at').notNull().defaultNow(),
role: userSystemEnum('role').notNull()
})
Upvotes: 0
Views: 144
Reputation: 51
You can utilize the onFinish callback on the streamText function. This callback is triggered upon the completion of the model's response and all tool executions.
Have you looked at the example provided by vercel:
import { openai } from '@ai-sdk/openai';
import { streamText, convertToCoreMessages } from 'ai';
// Allow streaming responses up to 30 seconds
export const maxDuration = 30;
export async function POST(req: Request) {
const { messages } = await req.json();
const result = await streamText({
model: openai('gpt-4-turbo'),
messages: convertToCoreMessages(messages),
async onFinish({ text, toolCalls, toolResults, usage, finishReason }) {
// implement your own storage logic:
await saveChat({ text, toolCalls, toolResults });
},
});
return result.toDataStreamResponse();
}
Upvotes: 0