-
-
Notifications
You must be signed in to change notification settings - Fork 17
/
main.py
156 lines (110 loc) · 8.31 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
# ██████╗ ███████╗ ██╗ ██╗ ███████╗ ██████╗ ██████╗ ██████╗ ██████╗ ██████╗ ███████╗
# ██╔══██╗ ██╔════╝ ██║ ██║ ██╔════╝ ██╔══██╗ ██╔═══██╗ ██╔════╝ ██╔═══██╗ ██╔══██╗ ██╔════╝
# ██║ ██║ █████╗ ██║ ██║ ███████╗ ██║ ██║ ██║ ██║ ██║ ██║ ██║ ██║ ██║ █████╗
# ██║ ██║ ██╔══╝ ╚██╗ ██╔╝ ╚════██║ ██║ ██║ ██║ ██║ ██║ ██║ ██║ ██║ ██║ ██╔══╝
# ██████╔╝ ███████╗ ╚████╔╝ ███████║ ██████╔╝╚██████╔╝ ╚██████╗ ╚██████╔╝ ██████╔╝ ███████╗
# ╚═════╝ ╚══════╝ ╚═══╝ ╚══════╝ ╚═════╝ ╚═════╝ ╚═════╝ ╚═════╝ ╚═════╝ ╚══════╝
# Made With 💓 By - Sree ( Devs Do Code )
# YouTube Channel: https://www.youtube.com/@devsdocode
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
- Support: https://buymeacoffee.com/devsdocode
- Patreon: https://patreon.com/DevsDoCode
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# For any questions or concerns, reach out to us via our social media handles.
# Our top choice for contact is Telegram: https://t.me/devsdocode
# You can also find us on other platforms listed above. We're here to help!
# - YouTube Channel: https://www.youtube.com/@DevsDoCode
# - Telegram Group: https://t.me/devsdocode
# - Discord Server: https://discord.gg/ehwfVtsAts
# - Instagram:
# - Personal: https://www.instagram.com/sree.shades_/
# - Channel: https://www.instagram.com/devsdocode_/
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
- Support: https://buymeacoffee.com/devsdocode
- Patreon: https://patreon.com/DevsDoCode
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# ------------------------------------------------------------------------------
# Dive into the world of coding with Devs Do Code - where passion meets programming!
# Make sure to hit that Subscribe button to stay tuned for exciting content!
# Pro Tip: For optimal performance and a seamless experience, we recommend using
# the default library versions demonstrated in this demo. Your coding journey just
# got even better! Happy coding!
# ----------------------------------------------------------------------------
from IMPORTS import *
while True:
speech = listener.listen()
if speech.lower().startswith("jarvis") or speech.lower().endswith("jarvis"):
speech = speech[6:].strip()
print("Updated Speech:", speech)
response_img_or_text = concurrent.futures.ThreadPoolExecutor().submit(deepInfra_TEXT.generate, [{"role": "user", "content": "Text to Classify -->" + speech}], system_prompt=BISECTORS.image_requests_v3)
response_classifier = concurrent.futures.ThreadPoolExecutor().submit(deepInfra_TEXT.generate, [{"role": "user", "content": "Text to Classify -->" + speech}], system_prompt=BISECTORS.complex_task_classifier_v6, stream=False)
default_response = concurrent.futures.ThreadPoolExecutor().submit(deepInfra_TEXT.generate, history_manager.history, system_prompt=INSTRUCTIONS.human_response_v3_AVA, stream=False)
concurrent.futures.wait([response_img_or_text, response_classifier, default_response])
print("Response Classifier >> ", "\033[91m" + response_classifier.result() + "\033[0m")
print("Image or Text Classifier >> ", "\033[91m" + response_img_or_text.result() + "\033[0m")
if "yes" in response_img_or_text.result().lower():
speak("Sure Sir, Generating Your Image")
decohere_ai.generate(speech)
continue
elif all(x in response_classifier.result().lower() for x in ("vision", "website", "call", "youtube")):
print("\033[91mConfused with Classification. Using Default Response\033[0m")
speak(default_response.result())
elif "system control" in response_classifier.result().lower():
# Create a ThreadPoolExecutor and submit the speak function
with concurrent.futures.ThreadPoolExecutor() as executor:
speak_future = executor.submit(speak, "Sure Sir. Setting the Required Settings")
speech_lower = speech.lower()
if "dark" in speech_lower or "light" in speech_lower:
theme = 0 if "dark" in speech_lower else 1
system_theme.WindowsThemeManager().set_theme(theme)
elif any(alignment in speech_lower for alignment in ["left", "center", "centre", "right"]):
alignment = 0 if "left" in speech_lower else 1
taskbar.TaskbarCustomizer().set_alignment(alignment)
elif "temperature" in speech_lower:
taskbar.TaskbarCustomizer().set_temperature_display(1)
speak_future.result()
elif "vision" in response_classifier.result().lower():
concurrent.futures.ThreadPoolExecutor().submit(speak("Analysing, Please Wait"))
image_path = camera_vision.realtime_vision()
response_vison = deepInfra_VISION.generate(speech, system_prompt=INSTRUCTIONS.vison_realtime_v1, image_path=image_path)
print("AI>>", response_vison)
os.remove(image_path)
speak(response_vison)
elif "call" in response_classifier.result().lower():
speak("Sure Sir. Calling")
# make_call.call()
elif "website" in response_classifier.result().lower():
site_markdown = jenna_reader.fetch_website_content(chrome_latest_url.get_latest_chrome_url())
response = openrouter.generate(f"METEDATA: {site_markdown}\n\nQUERY: {speech}", system_prompt="Keep you responses very short and concise")
speak(response, voice="Salli")
else:
# taskExecutor.process_query(speech)
print("AI>>", default_response.result())
speak(default_response.result())
else:
history_manager.store_history(history_manager.history + [{"role": "user", "content": speech}])
print("\033[93mHuman >> {}\033[0m".format(speech))
# chat_response = Phind.generate(history_manager.history, system_prompt=INSTRUCTIONS.hindi_only_system_prompt_v3, stream=True)
# chat_response = Phind.generate(history_manager.history, system_prompt=INSTRUCTIONS.human_response_v3_AVA, stream=True)
# chat_response = Pi_Ai.generate(speech, prints=False)
chat_response = Hugging_Face_TEXT.generate(speech)
print("\n\033[92mJARVIS >> {}\033[0m\n".format(chat_response))
history_manager.update_file(speech, chat_response)
speak(chat_response)
# engine.speak(chat_response, voice="hi-IN-Wavenet-D")
# ai_response = Phind.generate(speech, stream=False)
# ai_response = openrouter.generate(speech, stream=False)
# ai_response = hf_api.generate(speech, stream=False)
# ai_response = liaobots.generate(speech)
# ai_response = groq_web_access.generate(speech)
# ai_response = openrouter.generate(speech)
# ai_response = deepInfra_TEXT.generate(speech)
# sources, ai_response = Blackbox_ai.generate(speech, system_prompt="Keep your Responses very short and concise", web_access=True, stream=False)
# print("AI >>", ai_response.replace("@web_search", ""))
# speak(ai_response)
# if 'call' in speech:
# speak("Sure Sir. Calling")
# make_call.call()
# speak("Sure sir, generating images for you")
# images_link = deepInfra_IMG.generate(speech)
# print(images_link)