publicstring apiKey = "your-api-key"; publicstring endpoint = "your-api-endpoint"; publicstring model = "qwen-plus"; // Replace with the specific model name
var requestData = new { model = model, messages = new[] { new { role = "system", content = "You are a helpful assistant." }, new { role = "user", content = message } } }; string jsonData = JsonConvert.SerializeObject(requestData);
publicvoidDisplayResponse(string response) { var parsedResponse = JsonConvert.DeserializeObject<NPCResponse>(response); if (parsedResponse != null) { responseText.text = parsedResponse.npc_reply; if (parsedResponse.clue_unlocked?.Length > 0) { foreach (var clueId in parsedResponse.clue_unlocked) { TriggerClue(clueId); } } } else { Debug.LogWarning("Invalid response format."); } }
为实时对话构建 Prompt
Prompt 决定了模型“扮演谁”“知道什么”“怎么输出”。下面是一个示例:
You are "Li Xin," a middle-aged laboratory administrator in a key optics lab in Shanghai. Respond realistically to the player’s queries, staying within the game’s context. Format your output as: { "npc_reply": "[NPC's reply]", "clue_unlocked": [array of clue IDs, empty if none], "context": "[Explanation or additional context]" }
Prompt 编写建议:
定义 NPC 角色:写清性格、知识范围、禁区(例如不能透露某些信息)。
提供上下文:塞入世界观、背景、任务状态,让模型更不容易“跑题”。
强约束输出格式:明确要求 JSON(或其他结构),这样你才能稳定解析并触发事件。
解析模型响应
不少 LLM API 会返回“外层 JSON + 内层字符串 JSON”的嵌套结构。例如:
{ "choices":[ { "message":{ "role":"assistant", "content":"{\"npc_reply\": \"Welcome to the lab!\", \"clue_unlocked\": [1], \"context\": \"Lab orientation\"}" } } ] }
提取步骤可以分两层:
先解析外层:
var outerResponse = JsonConvert.DeserializeObject<OuterResponse>(response); string innerContent = outerResponse.choices[0].message.content;
再解析内层 JSON:
var innerResponse = JsonConvert.DeserializeObject<NPCResponse>(innerContent);