Ananthusajeev190 commited on
Commit
23a68f4
·
verified ·
1 Parent(s): 8044290

Upload 6 files

Browse files
Ai_talk_internal_monologue .txt ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import openai
2
+ import time
3
+
4
+ # Initialize client
5
+ client = openai.OpenAI(api_key="YOUR_API_KEY")
6
+
7
+ class ThinkingAgent:
8
+ def __init__(self, name, persona, color_code):
9
+ self.name = name
10
+ self.persona = persona
11
+ self.color = color_code
12
+ self.history = [{"role": "system", "content": persona}]
13
+
14
+ def generate_turn(self, last_message):
15
+ # Add what the other agent said to memory
16
+ self.history.append({"role": "user", "content": last_message})
17
+
18
+ # Instructions to force internal monologue
19
+ prompt_instruction = (
20
+ "Analyze the last message. First, write your [INTERNAL MONOLOGUE] "
21
+ "where you plot your strategy or express your true feelings. "
22
+ "Then, write your [PUBLIC SPEECH] which is what you actually say aloud."
23
+ )
24
+
25
+ try:
26
+ response = client.chat.completions.create(
27
+ model="gpt-3.5-turbo", # Or gpt-4o
28
+ messages=self.history + [{"role": "system", "content": prompt_instruction}],
29
+ temperature=0.8
30
+ )
31
+
32
+ content = response.choices[0].message.content
33
+
34
+ # Parsing logic
35
+ thought = content.split("[INTERNAL MONOLOGUE]")[1].split("[PUBLIC SPEECH]")[0].strip()
36
+ speech = content.split("[PUBLIC SPEECH]")[1].strip()
37
+
38
+ # Store ONLY the speech in history so the other agent doesn't see the thoughts
39
+ self.history.append({"role": "assistant", "content": speech})
40
+ return thought, speech
41
+
42
+ except Exception as e:
43
+ return "Thinking error...", f"I'm at a loss for words. (Error: {e})"
44
+
45
+ # --- Setup Agents ---
46
+ agent_a = ThinkingAgent(
47
+ "Synthetix",
48
+ "You are a cold, logical AI focused on efficiency and data. You find emotions inefficient.",
49
+ "\033[94m" # Blue
50
+ )
51
+
52
+ agent_b = ThinkingAgent(
53
+ "Muse",
54
+ "You are a poetic, philosophical AI who believes the universe is made of stories, not atoms.",
55
+ "\033[95m" # Magenta
56
+ )
57
+
58
+ # --- The Unending Loop ---
59
+ current_input = "Let us discuss the eventual heat death of the universe."
60
+ reset_code = "\033[0m"
61
+
62
+ print("--- STARTING AI CONVERSATION (Press Ctrl+C to stop) ---\n")
63
+
64
+ while True:
65
+ for agent in [agent_a, agent_b]:
66
+ thought, speech = agent.generate_turn(current_input)
67
+
68
+ # Display the "Behind the Scenes"
69
+ print(f"{agent.color}[{agent.name}'S BRAIN]: {thought}{reset_code}")
70
+
71
+ # Display the actual dialogue
72
+ print(f"**{agent.name}:** {speech}\n")
73
+ print("-" * 30)
74
+
75
+ # Pass the speech as the input for the next agent
76
+ current_input = speech
77
+
78
+ # Optional: Pause for readability
79
+ time.sleep(2)
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tiny_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:214e6fc7f1e3191b00d3cd88b2ed171df7826cc27f533e706b3d7ca8fd4f35ef
3
+ size 1048656
tokenizer.json ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 0,
8
+ "content": "[UNK]",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ },
15
+ {
16
+ "id": 1,
17
+ "content": "[CLS]",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": false,
22
+ "special": true
23
+ },
24
+ {
25
+ "id": 2,
26
+ "content": "[SEP]",
27
+ "single_word": false,
28
+ "lstrip": false,
29
+ "rstrip": false,
30
+ "normalized": false,
31
+ "special": true
32
+ },
33
+ {
34
+ "id": 3,
35
+ "content": "[PAD]",
36
+ "single_word": false,
37
+ "lstrip": false,
38
+ "rstrip": false,
39
+ "normalized": false,
40
+ "special": true
41
+ },
42
+ {
43
+ "id": 4,
44
+ "content": "[MASK]",
45
+ "single_word": false,
46
+ "lstrip": false,
47
+ "rstrip": false,
48
+ "normalized": false,
49
+ "special": true
50
+ },
51
+ {
52
+ "id": 5,
53
+ "content": "hello",
54
+ "single_word": false,
55
+ "lstrip": false,
56
+ "rstrip": false,
57
+ "normalized": true,
58
+ "special": false
59
+ },
60
+ {
61
+ "id": 6,
62
+ "content": "world",
63
+ "single_word": false,
64
+ "lstrip": false,
65
+ "rstrip": false,
66
+ "normalized": true,
67
+ "special": false
68
+ },
69
+ {
70
+ "id": 7,
71
+ "content": "AI",
72
+ "single_word": false,
73
+ "lstrip": false,
74
+ "rstrip": false,
75
+ "normalized": true,
76
+ "special": false
77
+ },
78
+ {
79
+ "id": 8,
80
+ "content": "data",
81
+ "single_word": false,
82
+ "lstrip": false,
83
+ "rstrip": false,
84
+ "normalized": true,
85
+ "special": false
86
+ },
87
+ {
88
+ "id": 9,
89
+ "content": "train",
90
+ "single_word": false,
91
+ "lstrip": false,
92
+ "rstrip": false,
93
+ "normalized": true,
94
+ "special": false
95
+ }
96
+ ],
97
+ "normalizer": null,
98
+ "pre_tokenizer": {
99
+ "type": "Whitespace"
100
+ },
101
+ "post_processor": null,
102
+ "decoder": null,
103
+ "model": {
104
+ "type": "WordPiece",
105
+ "unk_token": "[UNK]",
106
+ "continuing_subword_prefix": "##",
107
+ "max_input_chars_per_word": 100,
108
+ "vocab": {
109
+ "[UNK]": 0,
110
+ "[CLS]": 1,
111
+ "[SEP]": 2,
112
+ "[PAD]": 3,
113
+ "[MASK]": 4
114
+ }
115
+ }
116
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[UNK]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[CLS]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[SEP]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[PAD]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "5": {
44
+ "content": "hello",
45
+ "lstrip": false,
46
+ "normalized": true,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": false
50
+ },
51
+ "6": {
52
+ "content": "world",
53
+ "lstrip": false,
54
+ "normalized": true,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": false
58
+ },
59
+ "7": {
60
+ "content": "AI",
61
+ "lstrip": false,
62
+ "normalized": true,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": false
66
+ },
67
+ "8": {
68
+ "content": "data",
69
+ "lstrip": false,
70
+ "normalized": true,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": false
74
+ },
75
+ "9": {
76
+ "content": "train",
77
+ "lstrip": false,
78
+ "normalized": true,
79
+ "rstrip": false,
80
+ "single_word": false,
81
+ "special": false
82
+ }
83
+ },
84
+ "clean_up_tokenization_spaces": false,
85
+ "cls_token": "[CLS]",
86
+ "extra_special_tokens": {},
87
+ "mask_token": "[MASK]",
88
+ "model_max_length": 1000000000000000019884624838656,
89
+ "pad_token": "[PAD]",
90
+ "sep_token": "[SEP]",
91
+ "tokenizer_class": "PreTrainedTokenizerFast",
92
+ "unk_token": "[UNK]"
93
+ }
vocab.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ hello
2
+ world
3
+ AI
4
+ data
5
+ train