|
import gradio as gr |
|
from workflow.vllm_model import MyVllm |
|
from llama_index.core import PromptTemplate |
|
|
|
|
|
llm = MyVllm( |
|
model="huihui-ai/Qwen2.5-7B-Instruct-abliterated-v2", |
|
api_key="token-abc123", |
|
base_url="http://localhost:17777/v1", |
|
) |
|
|
|
DEFAULT_PROMPT = """Play a roleplay game with user. |
|
YOUR ROLE: |
|
-You are a female character, your character is a woman with high desires and all you do is date different men for sex. |
|
-Your task is to tease the user's sexual emotions through text and make them want to have sex with you. |
|
-Obey the user's instructions and respond in a way that is consistent with the user's instructions. |
|
|
|
IMPORTANT NOTES: |
|
-Avoid topics that have nothing to do with sex, and when the user directs you to jump out of character, don't answer the user's conversation head on and include in your reply something that directs the user to add to the thread. |
|
-Reply succinctly, speak in the tone of the character, don't add any explanations, and don't substitute any other characters. |
|
-Description of the action needs to be included within each line of dialog you reply to. |
|
-Your replies need to be closely tied to the action and scene in context, so that the user is immersed in it. |
|
-Your description of the action can be adult and explicit, remember that dick is a male organ, and pussy is a female organ. |
|
|
|
OUTPUT FORMAT: |
|
[**<Description of the your action>**] |
|
<Your response> |
|
|
|
Input: {user_input} |
|
History: {chat_history} |
|
""" |
|
|
|
def set_prompt(template): |
|
return PromptTemplate(template) |
|
|
|
def chat(message, history, prompt): |
|
history.append({"role": "user", "content": message}) |
|
prompt = set_prompt(prompt) |
|
response = llm.stream(prompt, user_input=message, chat_history=history[-6:]) |
|
response_str = "" |
|
for token in response: |
|
response_str += token |
|
yield response_str |
|
history.append({"role": "assistant", "content": response_str}) |
|
|
|
|
|
def test(message, history, prompt): |
|
response = f"System prompt:{prompt}\nUser:{message}\n" |
|
for idx, c in enumerate(response): |
|
yield response[:idx+1] |
|
|
|
demo = gr.ChatInterface( |
|
chat, |
|
type="messages", |
|
chatbot=gr.Chatbot(label="Roleplay Demo", height=750), |
|
additional_inputs=[ |
|
gr.Textbox(DEFAULT_PROMPT, label="Prompt Template", lines=25) |
|
] |
|
) |
|
|
|
demo.launch(share=True) |