Skip to content

Commit 425a645

Browse files
DARREN OBERSTDARREN OBERST
authored andcommitted
updating UI examples
1 parent a6b24fc commit 425a645

File tree

4 files changed

+261
-97
lines changed

4 files changed

+261
-97
lines changed

‎examples/Notebooks/ui_without_a_database.py‎

Lines changed: 0 additions & 97 deletions
This file was deleted.
Lines changed: 94 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,94 @@
1+
2+
""" This example shows how to build a simple UI RAG application for longer documents in which a retrieval query step
3+
is required to build a context from selected text chunks in the document.
4+
5+
This example is build with a Streamlit UI. To run, it requires a separate `pip install streamlit`, and
6+
to execute the script, you should run from the command line with:
7+
8+
`streamlit run using_with_streamlit_ui.py`
9+
10+
For more information about Streamlit, check out their docs: https://docs.streamlit.io/develop/tutorials
11+
12+
To build out the application, you would replace the very simple 'text search' mechanism used below with
13+
techniques outlined in examples in Embeddings and Retrieval.
14+
15+
"""
16+
17+
18+
import os
19+
import streamlit as st
20+
21+
from llmware.prompts import Prompt
22+
from llmware.setup import Setup
23+
24+
# st.set_page_config(layout="wide")
25+
26+
27+
def simple_analyzer_with_topic_query ():
28+
29+
st.title("Simple RAG Analyzer with Focusing Query")
30+
31+
prompter = Prompt()
32+
33+
sample_files_path = Setup().load_sample_files(over_write=False)
34+
doc_path = os.path.join(sample_files_path, "Agreements")
35+
36+
files = os.listdir(doc_path)
37+
file_name = st.selectbox("Choose an Agreement", files)
38+
39+
# ** topic_query ** = this is a proxy for a more complex focusing retrieval strategy to target only a
40+
# specific part of the document, rather then the whole document
41+
# in this case, this will run a 'text match' search against the topic query to reduce the
42+
# text chunks reviewed in trying to answer the question
43+
44+
topic_query = st.text_area("Filtering Topic (hint: 'vacation')")
45+
46+
# ** prompt_text ** - this is the question that will be passed to the LLM
47+
prompt_text = st.text_area("Question (hint: 'how many vacation days will the executive receive'")
48+
49+
model_name = st.selectbox("Choose a model for answering questions", ["bling-phi-3-gguf",
50+
"bling-tiny-llama-1b",
51+
"bling-stablelm-3b-tool",
52+
"llama-3-instruct-bartowski-gguf",
53+
"dragon-llama-answer-tool"])
54+
55+
if st.button("Run Analysis"):
56+
57+
if file_name and prompt_text and model_name:
58+
59+
prompter.load_model(model_name, temperature=0.0, sample=False)
60+
61+
# parse the PDF in memory and attach to the prompt
62+
if not topic_query:
63+
sources = prompter.add_source_document(doc_path,file_name)
64+
else:
65+
# this is where we use the topic_query to filter the parsed document
66+
sources = prompter.add_source_document(doc_path,file_name, query=topic_query)
67+
68+
# run the inference with the source
69+
response = prompter.prompt_with_source(prompt_text)
70+
71+
# fact checks
72+
fc = prompter.evidence_check_numbers(response)
73+
cs = prompter.evidence_check_sources(response)
74+
75+
if len(response) > 0:
76+
if "llm_response" in response[0]:
77+
response = response[0]["llm_response"]
78+
79+
st.write(f"Answer: {response}")
80+
81+
if len(fc) > 0:
82+
if "fact_check" in fc[0]:
83+
fc_out = fc[0]["fact_check"]
84+
st.write(f"Numbers Check: {fc_out}")
85+
86+
if len(cs) > 0:
87+
if "source_review" in cs[0]:
88+
sr_out = cs[0]["source_review"]
89+
st.write(f"Source review: {sr_out}")
90+
91+
92+
if __name__ == "__main__":
93+
94+
simple_analyzer_with_topic_query()
Lines changed: 86 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,86 @@
1+
2+
""" This example shows how to build a simple RAG application with UI with Streamlit and LLMWare.
3+
4+
Note: it requires a separate `pip install streamlit`, and to run the script, you should run from the
5+
command line with:
6+
7+
`streamlit run using_with_streamlit_ui.py`
8+
9+
For this example, we will be prompting against a set of Invoice documents, provided in the LLMWare
10+
sample files.
11+
12+
If you would like to substitute longer documents then please look at the UI example:
13+
-- rag_ui_with_query_topic_with_streamlit.py
14+
15+
as a framework to get started integrating a retrieval step before the prompt of the source
16+
17+
For more information about Streamlit, check out their docs: https://docs.streamlit.io/develop/tutorials
18+
19+
"""
20+
21+
22+
import os
23+
import streamlit as st
24+
25+
from llmware.prompts import Prompt
26+
from llmware.setup import Setup
27+
28+
# st.set_page_config(layout="wide")
29+
30+
31+
def simple_analyzer ():
32+
33+
st.title("Simple RAG Analyzer")
34+
35+
prompter = Prompt()
36+
37+
sample_files_path = Setup().load_sample_files(over_write=False)
38+
doc_path = os.path.join(sample_files_path, "Invoices")
39+
40+
files = os.listdir(doc_path)
41+
file_name = st.selectbox("Choose an Invoice", files)
42+
43+
prompt_text = st.text_area("Question (hint: 'what is the total amount of the invoice?'")
44+
45+
model_name = st.selectbox("Choose a model for answering questions", ["bling-phi-3-gguf",
46+
"bling-tiny-llama-1b",
47+
"bling-stablelm-3b-tool",
48+
"llama-3-instruct-bartowski-gguf",
49+
"dragon-llama-answer-tool"])
50+
51+
if st.button("Run Analysis"):
52+
53+
if file_name and prompt_text and model_name:
54+
55+
prompter.load_model(model_name, temperature=0.0, sample=False)
56+
57+
# parse the PDF in memory and attach to the prompt
58+
sources = prompter.add_source_document(doc_path,file_name)
59+
60+
# run the inference with the source
61+
response = prompter.prompt_with_source(prompt_text)
62+
63+
# fact checks
64+
fc = prompter.evidence_check_numbers(response)
65+
cs = prompter.evidence_check_sources(response)
66+
67+
if len(response) > 0:
68+
if "llm_response" in response[0]:
69+
response = response[0]["llm_response"]
70+
71+
st.write(f"Answer: {response}")
72+
73+
if len(fc) > 0:
74+
if "fact_check" in fc[0]:
75+
fc_out = fc[0]["fact_check"]
76+
st.write(f"Numbers Check: {fc_out}")
77+
78+
if len(cs) > 0:
79+
if "source_review" in cs[0]:
80+
sr_out = cs[0]["source_review"]
81+
st.write(f"Source review: {sr_out}")
82+
83+
84+
if __name__ == "__main__":
85+
86+
simple_analyzer()
Lines changed: 81 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,81 @@
1+
2+
""" This example provides a basic framework to build a Chatbot UI interface in conjunction with LLMWare
3+
using Streamlit Chat UI.
4+
5+
To run this example requires an install of Streamlit, e.g., `pip3 install streamlit`
6+
7+
To execute the script, run from the command line with: `streamlit run using_with_streamlit_ui.py`
8+
9+
Also, please note that the first time you run with a new model, the model will be downloaded and cached locally,
10+
so expect a delay on the 'first run' which will be much faster on every successive run.
11+
12+
All components of the chatbot will be running locally, so the speed will be determined greatly by the
13+
CPU/GPU capacities of your machine.
14+
15+
We have set the max_output at 250 tokens - for faster, set lower ...
16+
17+
For more information on the Streamlit Chat UI,
18+
see https://docs.streamlit.io/develop/tutorials/llms/build-conversational-apps
19+
20+
21+
"""
22+
23+
24+
import streamlit as st
25+
from llmware.models import ModelCatalog
26+
27+
28+
def simple_chat_ui_app (model_name):
29+
30+
st.title(f"Simple Chat with {model_name}")
31+
32+
model = ModelCatalog().load_model(model_name, temperature=0.3, sample=True, max_output=250)
33+
34+
# initialize chat history
35+
if "messages" not in st.session_state:
36+
st.session_state.messages = []
37+
38+
# display chat messages from history on app rerun
39+
for message in st.session_state.messages:
40+
with st.chat_message(message["role"]):
41+
st.markdown(message["content"])
42+
43+
# accept user input
44+
prompt = st.chat_input("Say something")
45+
if prompt:
46+
47+
with st.chat_message("user"):
48+
st.markdown(prompt)
49+
50+
with st.chat_message("assistant"):
51+
52+
model_response = model.inference(prompt)
53+
54+
# insert additional error checking / post-processing of output here
55+
bot_response = model_response["llm_response"]
56+
57+
st.markdown(bot_response)
58+
59+
st.session_state.messages.append({"role": "user", "content": prompt})
60+
st.session_state.messages.append({"role": "assistant", "content": bot_response})
61+
62+
return 0
63+
64+
65+
if __name__ == "__main__":
66+
67+
# a few representative good chat models that can run locally
68+
# note: will take a minute for the first time it is downloaded and cached locally
69+
70+
chat_models = ["phi-3-gguf",
71+
"llama-2-7b-chat-gguf",
72+
"llama-3-instruct-bartowski-gguf",
73+
"openhermes-mistral-7b-gguf",
74+
"zephyr-7b-gguf"]
75+
76+
model_name = chat_models[0]
77+
78+
simple_chat_ui_app(model_name)
79+
80+
81+

0 commit comments

Comments
 (0)