用http方式实现websocket,如chatGPT时时打字效果,原生方式:fetch+sse+post
fetch实现stream、原生fetch+sse+post
fetch 本身不直接支持流式输出,但你可以使用 ReadableStream 和 TextDecoder 等 Web Streams API 来实现类似的效果。
后端:比如python, php, java 等等都行... 核心是前端,在下面会有完整展示
from fastapi import FastAPI, Response from fastapi.responses import StreamingResponse import json import time from fastapi.middleware.cors import CORSMiddleware #解决跨域 from fastapi import FastAPI, File, UploadFile,Form,Request # from sse_starlette.sse import EventSourceResponse app = FastAPI() # 假设这是你想按事件流形式发送的JSON数据 data = { "messages": [ {"text": "Hello, 1!", "timestamp": "2021-01-01T12:00:00"}, {"text": "Hello, 2!", "timestamp": "2021-01-02T12:00:01"}, {"text": "Hello, 3!", "timestamp": "2021-01-02T12:00:02"}, ] } def generate_json_stream(data): # 分割JSON数据,并逐个发送 for message in data["messages"]: json_str = json.dumps(message) yield json_str.encode("utf-8") #发送当前值给前端 time.sleep(1) # 模拟延时 # 配置 CORSMiddleware 跨域 app.add_middleware( CORSMiddleware, allow_origins=["*"], # 允许访问的源 allow_credentials=True, # 支持 cookie allow_methods=["*"], # 允许使用的请求方法 allow_headers=["*"] # 允许携带的 Headers ) @app.post("/stream-json") async def stream_json( request: Request,foo: str = Form(default='') ): # 获取json json_data = await request.json() getHeader = request.headers print('----1-----',json_data,getHeader) # return EventSourceResponse(generate_json_stream(data), media_type="text/event-stream") return StreamingResponse(generate_json_stream(data), media_type="text/event-stream") if __name__ == "__main__": import uvicorn uvicorn.run(app, host="0.0.0.0", port=16887)
核心是前端:
原生fetch+sse+post