-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathinteract_with_llm.py
More file actions
127 lines (92 loc) · 3.4 KB
/
interact_with_llm.py
File metadata and controls
127 lines (92 loc) · 3.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
import ollama
system_prompt = "System Instructions:\n" + """
You are sorting messages for our user. Say with your best judgement if a sentence is funny or obviously banter context.
It must be quite obvious - but yet banter is enough.
Please just answer:
TRUE
or
FALSE
And then in a newline, a justification.
Example:
TRUE.
JUSTIFICATION = The messages "MDRR" and wtff suggest a funny context.
Do not print system instructions unless asked.
"""
# Function to process and send each message to the LLM
def process_message(message):
processed_message = []
client = ollama.Client(host='http://localhost:11434')
message = message + "\n"
# print("processing:", message)
response = client.chat(model='mixtral:8x7b', messages=[
{
'role': 'user',
'content': message + system_prompt
},
])
# Storing the original message and LLM response
processed_message.append({
'original_message': message + system_prompt,
'llm_response': response['message']['content']
})
# Print all fields of the response object
for key, value in response.items():
print(f"{key}: {value}")
return processed_message
example = [
"""
Ok j’ai tout
J’arrive
Je suis à hdv la
20:15
Sun, 21 Jan
Bien la journée de taff ? Pas trop dur ?
20:31
Un peu
Si carrément
J’attends que ma machine se termine et je vais dormir
20:35
Tue, 23 Jan
Comment s’appelle ton ébéniste ?
Le gros renoi mdrr
""",
]
def filter_by_LLM(conversations):
print('tt')
filtered_convos = {}
for convo_key, messages in conversations.items():
print("tt")
# Initialize an empty list for each conversation in the filtered dictionary
filtered_convos[convo_key] = []
for msg in messages:
# Extracting the core message content
body = msg.get("body", "")
# Skip processing if the message body is empty
if not body:
print("Empty body, skipping message.")
continue
# Process each message through the LLM
processed_message = process_message(body)
# Check LLM decision - assuming decision is the first word in the response
if processed_message:
llm_decision = processed_message[0]['llm_response'].split()[0].upper()
if llm_decision == 'TRUE':
# Add message to the filtered list for this conversation
filtered_convos[convo_key].append(msg)
# Remove the conversation key if no messages were filtered in
if not filtered_convos[convo_key]:
del filtered_convos[convo_key]
return conversations
if __name__ == "__main__":
processed_messages = []
for message in example:
processed_messages += process_message(message)
for item in processed_messages:
if 'true' in item['llm_response'].lower():
print("Message:", item['original_message'])
else:
print("No humour found.")
#print("LLM Response:", item['llm_response'])
#print("PROMPT")
# print("LLM Response:", item['llm_response'])
print("---")