RAG-Enhanced Chat Example

Build a chat application with document-grounded responses.

What You'll Build

A chat application that searches your uploaded documents and provides accurate, citation-backed answers. Perfect for customer support, documentation Q&A, and knowledge base applications.

Complete RAG Chat Application

TypeScript/Node.js

rag-chat.ts
import OpenAI from 'openai';
import * as readline from 'readline';

const client = new OpenAI({
  baseURL: 'https://superagentstack.orionixtech.com/api/v1',
  apiKey: process.env.OPENROUTER_KEY!,
  defaultHeaders: {
    'superAgentKey': process.env.SUPER_AGENT_KEY!,
  },
});

const sessionId = `user-${Date.now()}`;

async function ragChat(userMessage: string) {
  try {
    const completion = await client.chat.completions.create({
      model: 'anthropic/claude-3-sonnet',
      messages: [
        {
          role: 'system',
          content: 'You are a helpful assistant. Always cite sources when answering from documents.'
        },
        {
          role: 'user',
          content: userMessage
        }
      ],
      sessionId,
      saveToMemory: true,
      useRAG: true,  // Enable RAG search
    });

    const response = completion.choices[0].message.content;
    const metadata = (completion as any)._metadata;

    console.log(`\nAI: ${response}\n`);

    // Show sources if available
    if (metadata?.rag?.resultsFound > 0) {
      console.log('📚 Sources:');
      metadata.rag.sources?.forEach((source: any, i: number) => {
        console.log(`  ${i + 1}. ${source.filename} (relevance: ${(source.relevanceScore * 100).toFixed(0)}%)`);
      });
      console.log();
    } else {
      console.log('ℹ️  No documents found - answering from general knowledge\n');
    }

    return response;
  } catch (error: any) {
    console.error('Error:', error.message);
    throw error;
  }
}

async function main() {
  const rl = readline.createInterface({
    input: process.stdin,
    output: process.stdout,
  });

  console.log('RAG-Enhanced Chat started!');
  console.log('Ask questions about your uploaded documents.\n');

  const askQuestion = () => {
    rl.question('You: ', async (input) => {
      if (input.toLowerCase() === 'exit') {
        console.log('Goodbye!');
        rl.close();
        return;
      }

      await ragChat(input);
      askQuestion();
    });
  };

  askQuestion();
}

main();

Next.js Implementation

API Route with RAG

app/api/rag-chat/route.ts
import OpenAI from 'openai';
import { NextResponse } from 'next/server';

const client = new OpenAI({
  baseURL: 'https://superagentstack.orionixtech.com/api/v1',
  apiKey: process.env.OPENROUTER_KEY!,
  defaultHeaders: {
    'superAgentKey': process.env.SUPER_AGENT_KEY!,
  },
});

export async function POST(req: Request) {
  const { message, sessionId } = await req.json();

  try {
    const completion = await client.chat.completions.create({
      model: 'anthropic/claude-3-sonnet',
      messages: [
        {
          role: 'system',
          content: 'You are a helpful assistant. Always cite sources when answering from documents.'
        },
        {
          role: 'user',
          content: message
        }
      ],
      sessionId,
      saveToMemory: true,
      useRAG: true,
    });

    const response = completion.choices[0].message.content;
    const metadata = (completion as any)._metadata;

    return NextResponse.json({
      response,
      sources: metadata?.rag?.sources || [],
      resultsFound: metadata?.rag?.resultsFound || 0,
    });
  } catch (error: any) {
    return NextResponse.json(
      { error: error.message },
      { status: 500 }
    );
  }
}

React Component

app/rag-chat/page.tsx
'use client';

import { useState } from 'react';

interface Message {
  role: string;
  content: string;
  sources?: Array<{ filename: string; relevanceScore: number }>;
}

export default function RAGChatPage() {
  const [messages, setMessages] = useState<Message[]>([]);
  const [input, setInput] = useState('');
  const [isLoading, setIsLoading] = useState(false);
  const sessionId = `user-${Date.now()}`;

  async function handleSubmit(e: React.FormEvent) {
    e.preventDefault();
    if (!input.trim() || isLoading) return;

    const userMessage = input;
    setInput('');
    setMessages(prev => [...prev, { role: 'user', content: userMessage }]);
    setIsLoading(true);

    try {
      const response = await fetch('/api/rag-chat', {
        method: 'POST',
        headers: { 'Content-Type': 'application/json' },
        body: JSON.stringify({ message: userMessage, sessionId }),
      });

      const data = await response.json();

      setMessages(prev => [
        ...prev,
        {
          role: 'assistant',
          content: data.response,
          sources: data.sources,
        },
      ]);
    } catch (error) {
      console.error('Error:', error);
    } finally {
      setIsLoading(false);
    }
  }

  return (
    <div className="flex flex-col h-screen max-w-4xl mx-auto p-4">
      <h1 className="text-2xl font-bold mb-4">RAG-Enhanced Chat</h1>
      
      <div className="flex-1 overflow-y-auto mb-4 space-y-4">
        {messages.map((m, i) => (
          <div key={i}>
            <div
              className={`p-4 rounded-lg ${
                m.role === 'user'
                  ? 'bg-blue-100 ml-auto max-w-[80%]'
                  : 'bg-gray-100 mr-auto max-w-[80%]'
              }`}
            >
              <div className="font-semibold mb-1">
                {m.role === 'user' ? 'You' : 'AI'}
              </div>
              <div className="whitespace-pre-wrap">{m.content}</div>
            </div>
            
            {/* Show sources */}
            {m.sources && m.sources.length > 0 && (
              <div className="mt-2 ml-4 text-sm text-gray-600">
                <div className="font-semibold mb-1">📚 Sources:</div>
                <ul className="list-disc list-inside">
                  {m.sources.map((source, idx) => (
                    <li key={idx}>
                      {source.filename} ({(source.relevanceScore * 100).toFixed(0)}% relevant)
                    </li>
                  ))}
                </ul>
              </div>
            )}
          </div>
        ))}
      </div>

      <form onSubmit={handleSubmit} className="flex gap-2">
        <input
          value={input}
          onChange={(e) => setInput(e.target.value)}
          placeholder="Ask about your documents..."
          className="flex-1 p-3 border rounded-lg"
          disabled={isLoading}
        />
        <button
          type="submit"
          disabled={isLoading}
          className="px-6 py-3 bg-blue-500 text-white rounded-lg"
        >
          {isLoading ? 'Searching...' : 'Ask'}
        </button>
      </form>
    </div>
  );
}

Using Custom RAG Queries

Sometimes you want to search for different terms than what the user asked:

custom-rag-query.ts
async function intelligentRAGChat(userMessage: string) {
  // Extract key terms for better search
  const searchTerms = extractKeyTerms(userMessage);
  
  const completion = await client.chat.completions.create({
    model: 'anthropic/claude-3-sonnet',
    messages: [
      { role: 'user', content: userMessage }
    ],
    useRAG: true,
    ragQuery: searchTerms,  // Custom search query
  });

  return completion.choices[0].message.content;
}

function extractKeyTerms(message: string): string {
  // Simple keyword extraction
  const keywords = message
    .toLowerCase()
    .replace(/[?.,!]/g, '')
    .split(' ')
    .filter(word => word.length > 3);
  
  return keywords.join(' ');
}

// Example
await intelligentRAGChat('How do I get my money back if I'm not satisfied?');
// Searches for: "money back satisfied refund policy"

Conditional RAG Usage

Enable RAG only for company-specific questions:

conditional-rag.ts
function shouldUseRAG(message: string): boolean {
  const companyKeywords = [
    'policy', 'pricing', 'product', 'service',
    'refund', 'shipping', 'support', 'account'
  ];
  
  const messageLower = message.toLowerCase();
  return companyKeywords.some(keyword => messageLower.includes(keyword));
}

async function smartChat(userMessage: string) {
  const useRAG = shouldUseRAG(userMessage);
  
  console.log(`RAG ${useRAG ? 'enabled' : 'disabled'} for this query`);
  
  const completion = await client.chat.completions.create({
    model: 'anthropic/claude-3-sonnet',
    messages: [{ role: 'user', content: userMessage }],
    useRAG,
  });

  return completion.choices[0].message.content;
}

// General knowledge - RAG disabled
await smartChat('What is the capital of France?');

// Company-specific - RAG enabled
await smartChat('What is your refund policy?');

With File Upload

Allow users to upload documents and immediately query them:

upload-and-query.ts
async function uploadAndQuery(filePath: string, question: string) {
  // 1. Upload the file
  console.log('Uploading file...');
  const formData = new FormData();
  const fileBuffer = await fs.readFile(filePath);
  const blob = new Blob([fileBuffer]);
  formData.append('file', blob, path.basename(filePath));

  const uploadResponse = await fetch(
    'https://superagentstack.orionixtech.com/api/rag/upload',
    {
      method: 'POST',
      headers: {
        'Authorization': `Bearer ${process.env.OPENROUTER_KEY}`,
        'superAgentKey': process.env.SUPER_AGENT_KEY!,
      },
      body: formData,
    }
  );

  const uploadResult = await uploadResponse.json();
  console.log(`✅ File uploaded: ${uploadResult.fileId}`);

  // 2. Wait for processing
  console.log('Processing file...');
  await new Promise(resolve => setTimeout(resolve, 5000));

  // 3. Query the file
  console.log('Querying file...');
  const completion = await client.chat.completions.create({
    model: 'anthropic/claude-3-sonnet',
    messages: [{ role: 'user', content: question }],
    useRAG: true,
  });

  return completion.choices[0].message.content;
}

// Usage
const answer = await uploadAndQuery(
  './privacy-policy.pdf',
  'What does this document say about data retention?'
);
console.log(answer);

Python Example

rag_chat.py
from openai import OpenAI
import os

client = OpenAI(
    base_url="https://superagentstack.orionixtech.com/api/v1",
    api_key=os.environ.get("OPENROUTER_KEY"),
    default_headers={
        "superAgentKey": os.environ.get("SUPER_AGENT_KEY"),
    }
)

session_id = f"user-{int(time.time())}"

def rag_chat(user_message: str):
    completion = client.chat.completions.create(
        model="anthropic/claude-3-sonnet",
        messages=[
            {
                "role": "system",
                "content": "You are a helpful assistant. Always cite sources."
            },
            {
                "role": "user",
                "content": user_message
            }
        ],
        session_id=session_id,
        save_to_memory=True,
        use_rag=True,
    )
    
    response = completion.choices[0].message.content
    metadata = getattr(completion, '_metadata', {})
    
    print(f"\nAI: {response}\n")
    
    # Show sources
    if metadata.get('rag', {}).get('resultsFound', 0) > 0:
        print("📚 Sources:")
        for i, source in enumerate(metadata['rag'].get('sources', []), 1):
            score = source['relevanceScore'] * 100
            print(f"  {i}. {source['filename']} ({score:.0f}% relevant)")
        print()
    
    return response

def main():
    print("RAG-Enhanced Chat started!")
    print("Ask questions about your uploaded documents.\n")
    
    while True:
        user_input = input("You: ")
        
        if user_input.lower() == 'exit':
            print("Goodbye!")
            break
        
        try:
            rag_chat(user_input)
        except Exception as e:
            print(f"Error: {e}\n")

if __name__ == "__main__":
    main()

Best Practices

  • Show sources: Display which documents were used for the answer
  • Handle no results: Gracefully handle cases where no documents match
  • Use system prompts: Instruct the AI to cite sources
  • Conditional RAG: Disable RAG for general knowledge questions
  • Custom queries: Extract key terms for better search results
  • Monitor relevance: Check relevance scores to ensure quality

Pro Tip

Always show users which documents were used to generate the answer. This builds trust and allows users to verify information.

Next Steps