implementing processor manageability on a orange pi 5
im struggling to properly implement this to my ai script it appears to run based on the redlining of the cpu usage but times out? currently i have no clue on how to define the cpu npu ang gpu properly also managing resource constraints here is my code if you wanna try to make this better its 2 files "huggingface_autodetect.py" and "chat_commands.py"
'''import threading
import time
import sys
from gtts import gTTS
import os
import tkinter as tk
from tkinter import filedialog, messagebox, ttk
import speech_recognition as sr
import webbrowser
import re
import subprocess
import openai
from transformers import AutoTokenizer, AutoModelForCausalLM
import tracemalloc
import functools
import torch
doListenToCommand = True
listening = False
is_decoder = True
despedida = ["Goodbye", "goodbye", "bye", "Bye", "See you later", "see you later"]
Processor selector variables
use_npu = False
use_gpu = False
use_cpu = True
window = tk.Tk()
window.title("Computer: AI")
window.geometry("400x400")
text_entry = tk.Entry(window, width=50)
text_entry.pack(side=tk.BOTTOM)
submit_button = tk.Button(window, text="Submit", command=lambda: submit())
submit_button.pack(side=tk.BOTTOM)
text_output = tk.Text(window, height=300, width=300)
text_output.pack(side=tk.BOTTOM)
scrollbar = tk.Scrollbar(window)
scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
text_output.config(yscrollcommand=scrollbar.set)
scrollbar.config(command=text_output.yview)
tokenizer = AutoTokenizer.from_pretrained("Aeala/GPT4-x-AlpacaDente2-30b")
@functools.lru_cache(maxsize=128)
def get_model():
return AutoModelForCausalLM.from_pretrained("Aeala/GPT4-x-AlpacaDente2-30b")
Set your OpenAI API key here
openai.api_key = ""
def submit(event=None, text_input=None):
global doListenToCommand
global listening
if text_input is not None and text_input != "":
usuario = text_input
else:
usuario = text_entry.get()
if usuario in despedida:
on_closing()
else:
prompt = f"You are ChatGPT and answer my following message: {usuario}"
if not use_offline:
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
max_tokens=100,
n=1,
stop=None,
temperature=0.7,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0
)
respuesta = response.choices[0].text.strip()
else:
input_ids = tokenizer.encode(prompt, return_tensors="pt")
model = get_model()
device = torch.device("cpu")
if use_npu and torch.cuda.is_available():
device = torch.device("cuda")
elif use_cpu:
device = torch.device("cpu")
model = model.to(device)
input_ids = input_ids.to(device)
output = model.generate(input_ids, max_length=100, num_return_sequences=1)
respuesta = tokenizer.decode(output[0], skip_special_tokens=True)
respuesta = respuesta.replace(prompt, "").strip()
texto = str(respuesta)
tts = gTTS(texto, lang='en', tld='ie')
tts.save("audio.mp3")
text_output.insert(tk.END, "ChatGPT: " + respuesta + "\n")
text_entry.delete(0, tk.END)
if doListenToCommand:
doListenToCommand = False
text_output.insert(tk.END, "Computer is now quiet...\n")
window.update()
time.sleep(1)
doListenToCommand = True
def play_audio():
if sys.platform.startswith('darwin'):
subprocess.call(["afplay", "audio.mp3"])
elif sys.platform.startswith('win32'):
os.startfile("audio.mp3")
elif sys.platform.startswith('linux'):
subprocess.call(["paplay", "audio.mp3"])
def listen_to_command():
global doListenToCommand
global listening
if not doListenToCommand:
return
if listening:
return
listening = True
print("Listening...")
r = sr.Recognizer()
r.energy_threshold = 3000
with sr.Microphone() as source:
audio = r.listen(source)
try:
command = r.recognize_google(audio)
print("You said: " + command)
text_output.insert(tk.END, "You: " + command + "\n")
text_entry.delete(0, tk.END)
submit(text_input=command)
except sr.UnknownValueError:
print("Speech recognition could not understand audio.")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service:", str(e))
listening = False
listen_to_command()
def on_closing():
if messagebox.askokcancel("Quit", "Do you want to quit?"):
window.destroy()
window.protocol("WM_DELETE_WINDOW", on_closing)
menu_bar = tk.Menu(window)
file_menu = tk.Menu(menu_bar, tearoff=0)
file_menu.add_separator()
file_menu.add_command(label="Exit", command=on_closing)
menu_bar.add_cascade(label="File", menu=file_menu)
run_menu = tk.Menu(menu_bar, tearoff=0)
run_menu.add_command(label="Run Online", command=lambda: threading.Thread(target=run_online).start())
run_menu.add_command(label="Run Offline", command=lambda: threading.Thread(target=run_offline).start())
menu_bar.add_cascade(label="Run", menu=run_menu)
window.config(menu=menu_bar)
def clear_output():
text_output.delete("1.0", tk.END)
def run_online():
global use_offline
use_offline = False
def run_offline():
global use_offline
use_offline = True
start_listening_thread = threading.Thread(target=listen_to_command)
start_listening_thread.daemon = True
start_listening_thread.start()
window.mainloop() '''
import subprocess
import webbrowser
import re
import validators
import sys
def process_commands(passed_commands, command):
if "computer" in command.lower():
print("Activated Command: Computer")
passed_commands.text_output.insert(
passed_commands.tk.END, "Activated Command: Computer" + "\n")
passed_commands.submit(text_input=command)
# listen_to_command()
# Open a website
#if command.lower().startswith("open website"):
if "open website" in command.lower():
# Extract the website URL from the command
#url = command.replace("open website", "")
url = command.partition("open website")
# access third tuple element
url = url[2]
url = url.strip() # Strip whitespace on both ends. Not working? As there is a space in the leading part of the URL variable after this.
# Test for http:// or https:// and add http:// to the URL if missing.
if not url.startswith("http://") and not url.startswith("https://"):
url = "http://" + url
print("Trying to open website: " + url)
# Validating if the URL is correct
if validators.url(url):
webbrowser.open(url, new=0, autoraise=True)
passed_commands.text_output.insert(
passed_commands.tk.END, "Opening website: " + url + "\n")
else:
print("Invalid URL command. URL: " + url)
passed_commands.text_output.insert(
passed_commands.tk.END, "Invalid URL command. URL: " + url + "\n")
return
def process_commands(passed_commands, command):
if "computer" in command.lower():
print("Activated Command: Computer")
passed_commands.text_output.insert(
passed_commands.tk.END, "Activated Command: Computer" + "\n")
passed_commands.submit(text_input=command)
# listen_to_command()
# Open an application
if "run program" in command.lower():
# Extract the application name from the command
app_name = command.partition("run program")[2]
app_name = app_name.strip()
print("Trying to open program: " + app_name)
try:
subprocess.Popen(app_name)
passed_commands.text_output.insert(
passed_commands.tk.END, "Opening program: " + app_name + "\n")
except FileNotFoundError:
print("Program not found: " + app_name)
passed_commands.text_output.insert(
passed_commands.tk.END, "Program not found: " + app_name + "\n")
return
print("Invalid command")
passed_commands.text_output.insert(
passed_commands.tk.END, "Invalid command" + "\n")
# Testing
# Stop listening to the microphone
if command.lower() == "stop listening":
passed_commands.text_output.insert(
passed_commands.tk.END, "Stopping the microphone." + "\n")
# What goes here?
return
# Testing
# Allow program exit via voice.
if command.lower() == "stop program":
passed_commands.text_output.insert(
passed_commands.tk.END, "Stopping the program." + "\n")
sys.exit()
return
sorry it appears ''' ''' isnt working"