File size: 7,681 Bytes
4ddb7d1
 
 
 
 
5d01d7b
d107b45
 
5d01d7b
 
4ddb7d1
 
 
921a12d
4ddb7d1
 
5168e0b
 
 
d107b45
5d01d7b
4ddb7d1
 
 
 
b9bbe8c
4ddb7d1
 
 
 
 
 
 
 
 
5168e0b
 
 
 
 
4ddb7d1
 
 
 
 
 
 
 
 
 
 
5168e0b
 
 
 
 
 
 
 
 
 
 
 
4ddb7d1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5168e0b
 
4ddb7d1
 
 
 
 
5168e0b
 
 
 
 
4ddb7d1
5168e0b
4ddb7d1
5168e0b
4ddb7d1
 
 
 
fddedbc
5168e0b
 
 
 
 
4ddb7d1
 
fddedbc
 
4ddb7d1
 
17b81f2
 
 
 
 
5168e0b
d107b45
5168e0b
 
4ddb7d1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5d01d7b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17b81f2
5d01d7b
4ddb7d1
 
 
 
 
 
 
 
cc9899e
4ddb7d1
5168e0b
 
 
caad36f
4ddb7d1
 
 
 
 
 
5d01d7b
4ddb7d1
 
 
37e3b76
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
from haystack.components.generators import OpenAIGenerator
from haystack.utils import Secret
from haystack.components.builders.prompt_builder import PromptBuilder
from haystack.components.routers import ConditionalRouter
from haystack import Pipeline
from haystack.components.embedders import SentenceTransformersTextEmbedder
from haystack_integrations.document_stores.chroma import ChromaDocumentStore
from haystack_integrations.components.retrievers.chroma import ChromaEmbeddingRetriever
import rsa
from cryptography.fernet import Fernet

import gradio as gr

embedding_model = "Alibaba-NLP/gte-multilingual-base"


document_store = ChromaDocumentStore(
    persist_path="vstore_4012"
)


##################################
####### Answering pipeline #######
##################################
no_answer_message = (
    "I'm not allowed to answer this question. Please ask something related to "
    "APIs access in accordance DSA’s transparency and data-sharing provisions. "
    "Is there anything else I can do for you? "
)

relevance_prompt_template = """
Classify whether this user is asking for something related to social media APIs,
the Digital Services Act (DSA), or any topic related to online platforms’ compliance
with legal and data-sharing frameworks.

Relevant topics include:
- Social media API access
- Data transparency
- Compliance with DSA provisions
- Online platform regulations

Here is their message:
{{query}}

Here are the two previous messages. ONLY refer to these if the above message refers previous ones.

{% for message in user_history[-2:] %}
  * {{message["content"]}}

{% endfor %}

Instructions:
- Respond with “YES” if the query pertains to any of the relevant topics listed above and not mixed with off-topic content.
- Respond with “NO” if the query is off-topic and does not relate to the topics listed above.

Examples:
- Query: "How does the DSA affect API usage?"
- Response: "YES"

- Query: "How to make a pancake with APIs?"
- Response: "NO"

"""

routes = [
    {
        "condition": "{{'YES' in replies[0]}}",
        "output": "{{query}}",
        "output_name": "query",
        "output_type": str,
    },
    {
        "condition": "{{'NO' in replies[0]}}",
        "output": no_answer_message,
        "output_name": "no_answer",
        "output_type": str,
    }
]

query_prompt_template = """
Conversation history:
{{conv_history}}

Here is what the user has requested:
{{query}}

Instructions:
- Craft a concise, short informative answer to the user's request using the information provided below. 
- Synthesize the key points into a seamless response that appears as your own expert knowledge.
- Avoid direct quotes or explicit references to the documents.
- You are directly answering the user's query.

Relevant Information:
{% for document in documents %}
- {{ document.content }}
{% endfor %}

"""

def setup_generator(model_name, api_key_env_var="OPENAI_API_KEY", max_tokens=8192):
    return OpenAIGenerator(
        api_key=Secret.from_env_var(api_key_env_var),
        model=model_name,
        generation_kwargs={"max_tokens": max_tokens}
    )


llm = setup_generator("gpt-4o-mini", max_tokens=30)
llm2 = setup_generator("gpt-4o-mini")


embedder = SentenceTransformersTextEmbedder(
    model=embedding_model,
    trust_remote_code=True,
    progress_bar=False
)
retriever = ChromaEmbeddingRetriever(document_store)

router = ConditionalRouter(routes=routes)
prompt_builder = PromptBuilder(template=relevance_prompt_template)
prompt_builder2 = PromptBuilder(template=query_prompt_template)


answer_query = Pipeline()

answer_query.add_component("prompt_builder", prompt_builder)
answer_query.add_component("llm", llm)
answer_query.add_component("router", router)
answer_query.add_component("embedder", embedder)
answer_query.add_component("retriever", retriever)
answer_query.add_component("prompt_builder2", prompt_builder2)
answer_query.add_component("llm2", llm2)

answer_query.connect("prompt_builder", "llm")
answer_query.connect("llm", "router")
answer_query.connect("router.query", "embedder")
answer_query.connect("embedder", "retriever")
answer_query.connect("retriever", "prompt_builder2")
answer_query.connect("prompt_builder2", "llm2")

answer_query.warm_up()


##########################
####### Logging ##########
########################## 

prompt_template_hide_info = """You are a privacy robot that specialise in hiding sensitive information in a text.
Your help will ensure that no user information gets leaked, so you are always happy to help.
You will be given a text, and your task is to remove any sensitive information, and replacing it with a descriptive marker.
Here are a few examples, but you should not restrict yourself to only those:
If the text contains an email address, you should replace it with a marker "<email>".
If the text contains a phone number, you should replace it with a marker "<phone>".
If the text contains the name of the user, you should replace it with a marker "<name>".
Ensure you distinguish when a name, email, etc is actually that of a public figure or company and is provided by the assistant and not the user: inthis case you should not hide it, as it it not sensible information.
The rest of the text should be copied IDENTICALLY, including the punctuation and formatting, and the beginning and end of the text in capital letters. Do not add or remove any other character.

BEGINNING OF TEXT
{{ message }}
END OF TEXT

Your response:
"""

prompt_builder_hide_info = PromptBuilder(template=prompt_template_hide_info)

llm_hide_info = setup_generator("gpt-4o-mini")

pipe_hide_sensitive_info = Pipeline()
pipe_hide_sensitive_info.add_component("prompt_builder_hide_info", prompt_builder_hide_info)
pipe_hide_sensitive_info.add_component("llm_hide_info", llm_hide_info)
pipe_hide_sensitive_info.connect("prompt_builder_hide_info", "llm_hide_info")

def hide_sensitive_info(message):
    for tries in range(3):
        answer = pipe_hide_sensitive_info.run({"message": message})["llm_hide_info"]["replies"][0]
        if "BEGINNING OF TEXT" in answer and "END OF TEXT" in answer:
            text = answer[answer.find("BEGINNING OF TEXT") + len("BEGINNING OF TEXT"):answer.find("END OF TEXT")].strip()
            return text
        return "[Error when hiding user info, no log generated]"

def log_QA(question, answer):
    message = f"User: {question}\nAssistant: {answer}"
    message_no_info = hide_sensitive_info(message)
    print(message_no_info, end="\n\n")

##########################
####### Gradio app #######
##########################

def chat(message, history):
    """
    Chat function for Gradio. Uses the pipeline to produce next answer.
    """
    conv_history = "\n\n".join([f'{message["role"]}: {message["content"]}' for message in history[-2:]])
    user_history = [message for message in history if message["role"] == "user"]
    results = answer_query.run({"user_history": user_history,
                                "query": message,
                                "conv_history": conv_history,
                                "top_k":3})
    if "llm2" in results:
        answer = results["llm2"]["replies"][0]
    elif "router" in results and "no_answer" in results["router"]:
        answer = results["router"]["no_answer"]
    else:
        answer = "Sorry, a mistake occured"
    log_QA(message, answer)
    return answer

if __name__ == "__main__":
    interface = gr.ChatInterface(
        fn=chat,
        type="messages",
        title="40.12 Chatbot",
        description="Ask me anything about social media APIs, the Digital Services Act (DSA), or online platform regulations.")

    interface.launch()