Skip to content

Commit 69c6ab4

Browse files
committed
adding multimedia bot example
1 parent 52c13fe commit 69c6ab4

File tree

1 file changed

+169
-0
lines changed

1 file changed

+169
-0
lines changed

‎examples/UI/multimedia_bot.py‎

Lines changed: 169 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,169 @@
1+
2+
""" This example shows a multimedia bot created in less than 100 lines of code that
3+
leverages the CPU, GPU and NPU
4+
5+
-- designed to run on an AI PC with Intel Lunar Lake with CPU, GPU and NPU
6+
-- if you do not have GPU, it will auto-fallback to CPU
7+
-- if you do not have NPU, you can change the option to GPU
8+
9+
To run this example, we will need the following dependencies in addition to llmware:
10+
11+
-- pip3 install openvino_genai
12+
-- pip3 install pywebio
13+
14+
"""
15+
16+
from llmware.models import ModelCatalog
17+
from llmware.configs import LLMWareConfig
18+
19+
import os
20+
import threading
21+
22+
from pywebio.input import input_group, textarea, actions
23+
from pywebio.output import put_text, put_markdown, put_image, use_scope, put_info
24+
from pywebio.session import set_env
25+
26+
27+
def text_gen_bot(**kwargs):
28+
29+
""" Simple text generation streaming bot - will run using GGUF on CPU """
30+
31+
user_msg = kwargs.get("user_msg", "")
32+
img_counter = kwargs.get("img_counter", 0)
33+
34+
# llmware load_model
35+
text_gen_model = ModelCatalog().load_model("phi-3-gguf",
36+
max_output=200)
37+
38+
inst = "Complete this story: "
39+
prompt = inst + user_msg
40+
text_output = ""
41+
42+
with use_scope(f"text_gen" + str(img_counter)):
43+
44+
# llmware stream generation
45+
for token in text_gen_model.stream(prompt):
46+
put_text(token, inline=True)
47+
text_output += token
48+
49+
put_text("\nTo be continued ...")
50+
51+
# for demo example, we will write the text from the thread to a tmp file
52+
fp = os.path.join(LLMWareConfig().get_llmware_path(), "txt_tmp.txt")
53+
if os.path.exists(fp):
54+
os.remove(fp)
55+
f = open(fp, "w")
56+
f.write(text_output)
57+
f.close()
58+
59+
return text_output
60+
61+
62+
def image_gen_bot(**kwargs):
63+
64+
""" Image generation bot that will run on GPU. """
65+
66+
user_msg = kwargs.get("user_msg", "")
67+
img_counter = kwargs.get("img_counter", 0)
68+
69+
# llmware load_model
70+
model = ModelCatalog().load_model("lcm-dreamshaper-ov")
71+
72+
inst = "Draw an image: "
73+
prompt = inst + user_msg
74+
75+
# specialized pipeline on the model
76+
img_path = model.text_to_image_gen(prompt, f"test_image_{img_counter}")
77+
content = open(img_path, "rb").read()
78+
79+
# display the image on the screen with pywebio
80+
with use_scope(f"img_gen" + str(img_counter)):
81+
put_image(content)
82+
83+
return img_path
84+
85+
86+
def classifier_agent_bot(**kwargs):
87+
88+
""" Simple classification agent running on NPU """
89+
90+
text_output = kwargs.get("text_output", "")
91+
npu_model = kwargs.get("npu_model", None)
92+
93+
# pass the model to the thread - and execute a function call
94+
response = npu_model.function_call(text_output)
95+
96+
put_text("\n\nNPU Classification Agent: " + str(response["llm_response"]))
97+
98+
return True
99+
100+
101+
def run_bot():
102+
103+
""" Main function - starts a user prompt loop, and then kicks off
104+
three threads in parallel on CPU, GPU and NPU. """
105+
106+
set_env(input_panel_fixed=False, output_animation=False)
107+
put_markdown("""# Multimedia Bot with LLMWare, OpenVINO, & PyWebio""")
108+
109+
img_counter = 0
110+
start_bot = True
111+
112+
while start_bot:
113+
114+
# user input chat box
115+
116+
form = input_group('', [
117+
textarea(name='msg', placeholder='Ask LLMWare Bot', rows=3),
118+
actions(name='cmd', buttons=['Send', 'Exit'])
119+
])
120+
121+
if form['cmd'] == "Exit":
122+
start_bot = False
123+
break
124+
125+
user_msg = form['msg']
126+
127+
# display the user prompt
128+
put_info(user_msg)
129+
130+
# thread 1 - CPU - text gen
131+
text_gen_thread = threading.Thread(target=text_gen_bot,
132+
kwargs={"user_msg": user_msg,
133+
"img_counter": img_counter})
134+
text_gen_thread.start()
135+
136+
# thread 2 - GPU - text to image gen
137+
image_gen_thread = threading.Thread(target=image_gen_bot,
138+
kwargs={"user_msg": user_msg,
139+
"img_counter": img_counter})
140+
image_gen_thread.start()
141+
142+
# load the npu model in main and pass to thread
143+
npu_model = ModelCatalog().load_model("slim-topics-npu-ov",
144+
sample=False,temperature=0.0,
145+
device="NPU")
146+
147+
image_gen_thread.join()
148+
text_gen_thread.join()
149+
150+
# pull the text output file created in the text gen thread
151+
fp = os.path.join(LLMWareConfig().get_llmware_path(), "txt_tmp.txt")
152+
text_output = ""
153+
if os.path.exists(fp):
154+
text_output = open(fp, "r").read()
155+
156+
# kick off NPU thread
157+
npu_gen_thread = threading.Thread(target=classifier_agent_bot,
158+
kwargs={"text_output": text_output,
159+
"npu_model": npu_model})
160+
161+
npu_gen_thread.start()
162+
163+
img_counter += 1
164+
165+
return True
166+
167+
168+
if __name__ == "__main__":
169+
run_bot()

0 commit comments

Comments
 (0)