Building a Chatbot with Django and GPT-3
django python ai
Before the ChatGPT API, we had text-davinci-003. Here’s how to build a conversational chatbot with Django using OpenAI’s completion API. The principles apply to modern chat APIs too.
Setup
pip install openai django
# settings.py
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
The Conversation Model
# chatbot/models.py
from django.db import models
from django.conf import settings
class Conversation(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
created_at = models.DateTimeField(auto_now_add=True)
def get_context(self, limit=10):
"""Get recent messages for context."""
messages = self.messages.order_by('-created_at')[:limit]
return list(reversed(messages))
class Message(models.Model):
ROLE_CHOICES = [
('user', 'User'),
('assistant', 'Assistant'),
('system', 'System'),
]
conversation = models.ForeignKey(
Conversation,
on_delete=models.CASCADE,
related_name='messages'
)
role = models.CharField(max_length=10, choices=ROLE_CHOICES)
content = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['created_at']
OpenAI Service
# chatbot/services.py
import openai
from django.conf import settings
class ChatService:
def __init__(self):
openai.api_key = settings.OPENAI_API_KEY
self.system_prompt = """You are a helpful assistant.
You provide concise, accurate responses.
If you don't know something, say so."""
def build_prompt(self, conversation):
"""Build prompt from conversation history."""
messages = conversation.get_context()
prompt = f"{self.system_prompt}\n\n"
for msg in messages:
if msg.role == 'user':
prompt += f"User: {msg.content}\n"
else:
prompt += f"Assistant: {msg.content}\n"
prompt += "Assistant:"
return prompt
def get_response(self, conversation, user_message):
"""Get AI response for user message."""
# Save user message
Message.objects.create(
conversation=conversation,
role='user',
content=user_message
)
# Build prompt with context
prompt = self.build_prompt(conversation)
# Call OpenAI API (pre-chat completion style)
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
max_tokens=500,
temperature=0.7,
stop=["User:"], # Stop when user turn begins
)
assistant_message = response.choices[0].text.strip()
# Save assistant message
Message.objects.create(
conversation=conversation,
role='assistant',
content=assistant_message
)
return assistant_message
Views
# chatbot/views.py
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
from django.views.decorators.http import require_POST
from .models import Conversation, Message
from .services import ChatService
@login_required
def chat_view(request, conversation_id=None):
if conversation_id:
conversation = get_object_or_404(
Conversation, id=conversation_id, user=request.user
)
else:
conversation = Conversation.objects.create(user=request.user)
return redirect('chatbot:chat', conversation_id=conversation.id)
return render(request, 'chatbot/chat.html', {
'conversation': conversation,
'messages': conversation.messages.all(),
})
@login_required
@require_POST
def send_message(request, conversation_id):
conversation = get_object_or_404(
Conversation, id=conversation_id, user=request.user
)
user_message = request.POST.get('message', '').strip()
if not user_message:
return JsonResponse({'error': 'Message required'}, status=400)
service = ChatService()
response = service.get_response(conversation, user_message)
return JsonResponse({
'response': response,
'conversation_id': conversation.id,
})
Template
<!-- templates/chatbot/chat.html -->
{% extends 'base.html' %}
{% block content %}
<div class="chat-container">
<div id="messages" class="messages">
{% for message in messages %}
<div class="message {{ message.role }}">
<strong>{{ message.role|title }}:</strong>
{{ message.content }}
</div>
{% endfor %}
</div>
<form id="chat-form" class="chat-form">
{% csrf_token %}
<input type="text" name="message" id="message-input"
placeholder="Type your message..." autocomplete="off">
<button type="submit">Send</button>
</form>
</div>
<script>
const form = document.getElementById('chat-form');
const input = document.getElementById('message-input');
const messagesDiv = document.getElementById('messages');
form.addEventListener('submit', async (e) => {
e.preventDefault();
const message = input.value.trim();
if (!message) return;
// Add user message to UI
addMessage('user', message);
input.value = '';
// Show loading
const loadingDiv = addMessage('assistant', 'Thinking...');
try {
const response = await fetch('{% url "chatbot:send_message" conversation.id %}', {
method: 'POST',
headers: {
'Content-Type': 'application/x-www-form-urlencoded',
'X-CSRFToken': '{{ csrf_token }}'
},
body: `message=${encodeURIComponent(message)}`
});
const data = await response.json();
loadingDiv.textContent = data.response;
} catch (error) {
loadingDiv.textContent = 'Error: Could not get response';
}
});
function addMessage(role, content) {
const div = document.createElement('div');
div.className = `message ${role}`;
div.innerHTML = `<strong>${role.charAt(0).toUpperCase() + role.slice(1)}:</strong> ${content}`;
messagesDiv.appendChild(div);
messagesDiv.scrollTop = messagesDiv.scrollHeight;
return div;
}
</script>
{% endblock %}
Streaming Responses
For better UX, stream responses:
# services.py
def get_streaming_response(self, conversation, user_message):
Message.objects.create(
conversation=conversation,
role='user',
content=user_message
)
prompt = self.build_prompt(conversation)
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
max_tokens=500,
temperature=0.7,
stop=["User:"],
stream=True
)
full_response = ""
for chunk in response:
text = chunk.choices[0].text
full_response += text
yield text
Message.objects.create(
conversation=conversation,
role='assistant',
content=full_response
)
# views.py
from django.http import StreamingHttpResponse
@login_required
def send_message_stream(request, conversation_id):
conversation = get_object_or_404(
Conversation, id=conversation_id, user=request.user
)
user_message = request.POST.get('message', '').strip()
service = ChatService()
def stream():
for chunk in service.get_streaming_response(conversation, user_message):
yield f"data: {chunk}\n\n"
return StreamingHttpResponse(
stream(),
content_type='text/event-stream'
)
Rate Limiting
# chatbot/middleware.py
from django.core.cache import cache
from django.http import JsonResponse
def rate_limit_chat(get_response):
def middleware(request):
if request.path.startswith('/chatbot/send/'):
user_id = request.user.id if request.user.is_authenticated else 'anon'
key = f"chat_rate:{user_id}"
count = cache.get(key, 0)
if count >= 20: # 20 messages per minute
return JsonResponse({'error': 'Rate limit exceeded'}, status=429)
cache.set(key, count + 1, 60)
return get_response(request)
return middleware
Error Handling
# services.py
import logging
from openai.error import RateLimitError, APIError
logger = logging.getLogger(__name__)
def get_response(self, conversation, user_message):
try:
# ... API call ...
except RateLimitError:
logger.warning("OpenAI rate limit hit")
return "I'm receiving too many requests. Please try again in a moment."
except APIError as e:
logger.error(f"OpenAI API error: {e}")
return "Sorry, I'm having trouble responding right now."
except Exception as e:
logger.exception(f"Unexpected error: {e}")
return "An unexpected error occurred."
Final Thoughts
This pattern—context building, conversation history, API calls—applies whether you’re using:
- text-davinci-003 (completion API)
- gpt-3.5-turbo (chat API)
- gpt-4 (latest)
The interface may change, but the architecture remains.
Build the chatbot. The API will improve.