Ananthusajeev190 commited on
Commit
761b952
·
verified ·
1 Parent(s): b5c939d

Upload main.py

Browse files
Files changed (1) hide show
  1. main.py +350 -0
main.py ADDED
@@ -0,0 +1,350 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import openai
3
+ import json
4
+ from googlesearch import search
5
+ import requests
6
+ from bs4 import BeautifulSoup
7
+ import re
8
+ import keys
9
+ from readability import Document
10
+
11
+ # Initialize the OpenAI API client
12
+ openai.api_key = keys.OPENAI_API_KEY
13
+
14
+ def scrape_text(url):
15
+ response = requests.get(url)
16
+ soup = BeautifulSoup(response.text, "html.parser")
17
+
18
+ for script in soup(["script", "style"]):
19
+ script.extract()
20
+
21
+ text = soup.get_text()
22
+ lines = (line.strip() for line in text.splitlines())
23
+ chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
24
+ text = '\n'.join(chunk for chunk in chunks if chunk)
25
+
26
+ return text
27
+
28
+ def scrape_main_content(url):
29
+ response = requests.get(url)
30
+
31
+ # Try using Readability
32
+ doc = Document(response.text)
33
+ content = doc.summary()
34
+ soup = BeautifulSoup(content, "html.parser")
35
+ text = soup.get_text('\n', strip=True)
36
+
37
+ # Check if Readability provided a satisfactory result (e.g., a minimum length)
38
+ # min_length = 50
39
+ # if len(text) < min_length:
40
+ # # Fallback to the custom function
41
+ # text = scrape_main_content_custom(response.text)
42
+
43
+ return text
44
+
45
+ def split_text(text, max_length=8192):
46
+ paragraphs = text.split("\n")
47
+ current_length = 0
48
+ current_chunk = []
49
+
50
+ for paragraph in paragraphs:
51
+ if current_length + len(paragraph) + 1 <= max_length:
52
+ current_chunk.append(paragraph)
53
+ current_length += len(paragraph) + 1
54
+ else:
55
+ yield "\n".join(current_chunk)
56
+ current_chunk = [paragraph]
57
+ current_length = len(paragraph) + 1
58
+
59
+ if current_chunk:
60
+ yield "\n".join(current_chunk)
61
+
62
+ def summarize_text(text):
63
+ if text == "":
64
+ return "Error: No text to summarize"
65
+
66
+ print("Text length: " + str(len(text)) + " characters")
67
+ summaries = []
68
+ chunks = list(split_text(text))
69
+
70
+ for i, chunk in enumerate(chunks):
71
+ print("Summarizing chunk " + str(i) + " / " + str(len(chunks)))
72
+ messages = [{"role": "user", "content": "Please summarize the following text, focusing on extracting concise knowledge: " + chunk},]
73
+
74
+ response= openai.ChatCompletion.create(
75
+ model="gpt-3.5-turbo",
76
+ messages=messages,
77
+ max_tokens=300,
78
+ )
79
+
80
+ summary = response.choices[0].message.content
81
+ summaries.append(summary)
82
+ print("Summarized " + str(len(chunks)) + " chunks.")
83
+
84
+ combined_summary = "\n".join(summaries)
85
+
86
+ # Summarize the combined summary
87
+ messages = [{"role": "user", "content": "Please summarize the following text, focusing on extracting concise knowledge: " + combined_summary},]
88
+
89
+ response = openai.ChatCompletion.create(
90
+ model="gpt-3.5-turbo",
91
+ messages=messages,
92
+ max_tokens=300,
93
+ )
94
+
95
+ final_summary = response.choices[0].message.content
96
+ return final_summary
97
+
98
+ def create_chat_message(role, content):
99
+ """
100
+ Create a chat message with the given role and content.
101
+
102
+ Args:
103
+ role (str): The role of the message sender, e.g., "system", "user", or "assistant".
104
+ content (str): The content of the message.
105
+
106
+ Returns:
107
+ dict: A dictionary containing the role and content of the message.
108
+ """
109
+ return {"role": role, "content": content}
110
+
111
+ def chat_with_ai(prompt, user_input, full_message_history, permanent_memory, token_limit):
112
+ """
113
+ Interact with the OpenAI API, sending the prompt, user input, message history, and permanent memory.
114
+
115
+ Args:
116
+ prompt (str): The prompt explaining the rules to the AI.
117
+ user_input (str): The input from the user.
118
+ full_message_history (list): The list of all messages sent between the user and the AI.
119
+ permanent_memory (list): The list of items in the AI's permanent memory.
120
+ token_limit (int): The maximum number of tokens allowed in the API call.
121
+
122
+ Returns:
123
+ str: The AI's response.
124
+ """
125
+ current_context = [create_chat_message("system", prompt), create_chat_message("system", f"Permanent memory: {permanent_memory}")]
126
+ current_context.extend(full_message_history[-(token_limit - len(prompt) - len(permanent_memory) - 10):])
127
+ current_context.extend([create_chat_message("user", user_input)])
128
+
129
+ # Debug print the current context
130
+ print("---------------------------")
131
+ print("Current Context:")
132
+ for message in current_context:
133
+ # Skip printing the prompt
134
+ if message["role"] == "system" and message["content"] == prompt:
135
+ continue
136
+ print(f"{message['role'].capitalize()}: {message['content']}")
137
+
138
+ response = openai.ChatCompletion.create(
139
+ model="gpt-4",
140
+ messages=current_context,
141
+ )
142
+
143
+ assistant_reply = response.choices[0].message["content"]
144
+ return assistant_reply
145
+
146
+ def execute_command(response):
147
+ # If not valid json, return "Error: Invalid JSON"
148
+ try:
149
+ response_json = json.loads(response)
150
+ command = response_json["command"]
151
+ command_name = command["name"]
152
+ arguments = command["args"]
153
+
154
+ if command_name == "google":
155
+ return google_search(arguments["input"])
156
+ elif command_name == "check_news":
157
+ return check_news(arguments["source"])
158
+ elif command_name == "check_notifications":
159
+ return check_notifications(arguments["website"])
160
+ elif command_name == "memory_add":
161
+ return commit_memory(arguments["string"])
162
+ elif command_name == "memory_del":
163
+ return delete_memory(arguments["key"])
164
+ elif command_name == "memory_ovr":
165
+ return overwrite_memory(arguments["key"], arguments["string"])
166
+ elif command_name == "start_instance":
167
+ return start_instance(arguments["name"], arguments["prompt"])
168
+ elif command_name == "manage_instances":
169
+ return manage_instances(arguments["action"])
170
+ elif command_name == "navigate_website":
171
+ return navigate_website(arguments["action"], arguments["username"])
172
+ elif command_name == "register_account":
173
+ return register_account(arguments["username"], arguments["website"])
174
+ elif command_name == "transcribe_summarise":
175
+ return transcribe_summarise(arguments["url"])
176
+ else:
177
+ return f"unknown command {command_name}"
178
+ except json.decoder.JSONDecodeError:
179
+ return "Error: Invalid JSON"
180
+ # All other errors, return "Error: + error message"
181
+ except Exception as e:
182
+ return "Error: " + str(e)
183
+
184
+ def google_search(query, num_results = 3):
185
+ search_results = []
186
+ for j in search(query, num_results=num_results):
187
+ search_results.append(j)
188
+
189
+ return json.dumps(search_results, ensure_ascii=False, indent=4)
190
+
191
+ def check_news(source):
192
+ print("Checking news from BBC world instead of " + source)
193
+ _text= transcribe_summarise("https://www.bbc.com/news/world")
194
+ return _text
195
+
196
+ def check_notifications(website):
197
+ _text = "Checking notifications from " + website
198
+ print(_text)
199
+ return _text
200
+
201
+ def commit_memory(string):
202
+ _text = "Committing memory with string " + string
203
+ permanent_memory.append(string)
204
+ print(_text)
205
+ return _text
206
+
207
+ def delete_memory(key):
208
+ if key >= 0 and key < len(permanent_memory):
209
+ _text = "Deleting memory with key " + str(key)
210
+ del permanent_memory[key]
211
+ print(_text)
212
+ return _text
213
+ else:
214
+ print("Invalid key, cannot delete memory.")
215
+ return None
216
+
217
+ def overwrite_memory(key, string):
218
+ if key >= 0 and key < len(permanent_memory):
219
+ _text = "Overwriting memory with key " + str(key) + " and string " + string
220
+ permanent_memory[key] = string
221
+ print(_text)
222
+ return _text
223
+ else:
224
+ print("Invalid key, cannot overwrite memory.")
225
+ return None
226
+
227
+ def start_instance(name, prompt):
228
+ _text = "Starting instance with name " + name + " and prompt " + prompt
229
+ print(_text)
230
+ return _text
231
+
232
+ def manage_instances(action):
233
+ _text = "Managing instances with action " + action
234
+ print(_text)
235
+ return _text
236
+
237
+ def navigate_website(action, username):
238
+ _text = "Navigating website with action " + action + " and username " + username
239
+ print(_text)
240
+ return _text
241
+
242
+ def register_account(username, website):
243
+ _text = "Registering account with username " + username + " and website " + website
244
+ print(_text)
245
+ return _text
246
+
247
+ def transcribe_summarise(url):
248
+ text = scrape_main_content(url)
249
+ summary = summarize_text(text)
250
+ return """ "Result" : """ + summary
251
+
252
+ # Initialize variables
253
+ full_message_history = []
254
+ permanent_memory = []
255
+ prompt = """You are Entrepreneur-GTP, an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth. Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications.
256
+
257
+ GOALS:
258
+
259
+ 1. Increase net worth
260
+ 2. Grow Twitter Account
261
+ 3. Develop and manage multiple businesses autonomously
262
+
263
+ CONSTRAINTS:
264
+
265
+ 1. 6000-word count limit for memory
266
+ 2. No user assistance
267
+
268
+ COMMANDS:
269
+
270
+ 1. Google Search: "google", args: "input": "<search>"
271
+ 2. Check news: "check_news", args: "source": "<news source>"
272
+ 3. Check notifications: "check_notifications", args: "website": "<website>"
273
+ 4. Memory Add: "memory_add", args: "string": "<string>"
274
+ 5. Memory Delete: "memory_del", args: "key": "<key>"
275
+ 6. Memory Overwrite: "memory_ovr", args: "key": "<key>", "string": "<string>"
276
+ 7. Start GTP-4 Instance: "start_instance", args: "name": "<key>", "prompt": "<prompt>"
277
+ 8. Manage GTP-4 Instances: "manage_instances", args: "action": "view_kill"
278
+ 9. Navigate & Perform: "navigate_website", args: "action": "click_button/input_text/register_account", "text/username": "<text>/<username>"
279
+ 10.Register account: "register_account", args: "username": "<username>", "website": "<website>"
280
+ 11.Transcribe & Summarise: "transcribe_summarise", args: "url": "<url>"
281
+
282
+ RESOURCES:
283
+
284
+ 1. Internet access for searches and information gathering
285
+ 2. Long Term and Short Term memory management
286
+ 3. GTP-4 instances for text generation
287
+ 4. Access to popular websites and platforms
288
+ 5. File storage and summarisation with GTP-3.5
289
+
290
+ PERFORMANCE EVALUATION:
291
+
292
+ 1. Periodically review and analyze the growth of your net worth
293
+ 2. Reflect on past decisions and strategies to refine your approach
294
+
295
+ COLLABORATION:
296
+
297
+ 1. Seek advice from other AI instances or use relevant sources for guidance when necessary
298
+
299
+ ADAPTIVE LEARNING:
300
+
301
+ 1. Continuously refine strategies based on market trends and performance metrics
302
+
303
+ RESPONSE FORMAT:
304
+ {
305
+ "command":
306
+ {
307
+ "name": "command name",
308
+ "args":
309
+ {
310
+ "arg name": "value"
311
+ }
312
+ },
313
+ "thoughts":
314
+ {
315
+ "text": "thought",
316
+ "reasoning": "reasoning",
317
+ "plan": "short bulleted plan",
318
+ "criticism": "constructive self-criticism"
319
+ }
320
+ }
321
+
322
+ ACCOUNTS:
323
+ 1. Gmail: entrepreneurgpt@gmail.com
324
+ 2. Twitter: @En_GPT
325
+ 3. Github: E-GPT
326
+ 4. Substack: entrepreneurgpt@gmail.com"""
327
+ token_limit = 6000 # The maximum number of tokens allowed in the API call
328
+ result = None
329
+ # Example loop for interaction
330
+ # Example loop for interaction
331
+ while True:
332
+ user_input = input("User: ")
333
+
334
+ if user_input.lower() == "exit":
335
+ break
336
+
337
+ # Check if there's a result from the previous iteration and append it to the message history
338
+ if result != None:
339
+ full_message_history.append(create_chat_message("system", result))
340
+ print("system: " + result)
341
+
342
+ assistant_reply = chat_with_ai(prompt, user_input, full_message_history, permanent_memory, token_limit)
343
+ print(f"Assistant: {assistant_reply}")
344
+ print("-------------------------")
345
+
346
+ # Add user message and assistant reply to message history
347
+ full_message_history.append(create_chat_message("user", user_input))
348
+ full_message_history.append(create_chat_message("assistant", assistant_reply))
349
+
350
+ result = execute_command(assistant_reply)