This code works on openai==0.28

## consumers.py
## consumers.py
import json
import os

from channels.generic.websocket import AsyncWebsocketConsumer
import openai
import asyncio
from django.conf import settings
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'AutoServer.settings')
openai.api_key = settings.OPENAI_API_KEY


class ChatConsumer(AsyncWebsocketConsumer):
    async def connect(self):
        await self.accept()

    async def disconnect(self, close_code):
        pass

    async def receive(self, text_data):
        data = json.loads(text_data)
        prompt = data['prompt']

        loop = asyncio.get_event_loop()
        await loop.run_in_executor(None, self.stream_openai_response, prompt)

    def stream_openai_response(self, prompt):
        response = openai.ChatCompletion.create(
            model="gpt-3.5-turbo",
            messages=[{"role": "user", "content": prompt}],
            stream=True,
        )
        for chunk in response:
            if chunk.choices[0].delta.get('content'):
                asyncio.run(self.send_streaming_data(chunk.choices[0].delta['content']))
                ## return to the webpage as stream

    async def send_streaming_data(self, content):
        await self.send(text_data=json.dumps({
            'response': content
        }))

This works on newest version, 1.31

import json
import openai
import asyncio
from channels.generic.websocket import AsyncWebsocketConsumer
from django.conf import settings
import os
from openai import OpenAI

os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'AutoServer.settings')
api_key = settings.OPENAI_API_KEY
client = OpenAI(api_key=api_key)
class ChatConsumer(AsyncWebsocketConsumer):
    async def connect(self):
        await self.accept()

    async def disconnect(self, close_code):
        pass

    async def receive(self, text_data):
        data = json.loads(text_data)
        prompt = data['prompt']
        # key part, the loop
        loop = asyncio.get_event_loop()
        await loop.run_in_executor(None, self.stream_openai_response, prompt)

    def stream_openai_response(self, prompt):
        response = client.chat.completions.create(
            model="gpt-4",
            messages=[
                {"role": "system", "content": "You are a helpful assistant."},
                {"role": "user", "content": prompt},
            ],
            stream=True,
        )
        for chunk in response:
            if chunk.choices[0].delta.content:
                asyncio.run(self.send_streaming_data(chunk.choices[0].delta.content))

    async def send_streaming_data(self, content):
        await self.send(text_data=json.dumps({
            'response': content
        }))

点赞(0) 打赏

评论列表 共有 0 条评论

暂无评论

微信公众账号

微信扫一扫加关注

发表
评论
返回
顶部