diff --git a/README.md b/README.md index 09170bf..eb831ed 100644 --- a/README.md +++ b/README.md @@ -12,6 +12,13 @@ InitialTerm is a terminal application that leverages the Ollama API to convert u **Please ensure that you review and understand the commands before executing them to prevent any unintended consequences.** +### Features + +- **Ollama API Integration**: Converts user queries into command-line commands using the Ollama API. +- **Cross-Platform Support**: Works on Windows, macOS, and Linux. +- **Session Memory**: Stores the conversation in a Session for long-term memory based on the current session. +- **User Confirmation**: Prompts for user confirmation before executing generated commands. + --- # InitialTerm diff --git a/initialterm/main.py b/initialterm/main.py index d1f7f9d..6dc7644 100644 --- a/initialterm/main.py +++ b/initialterm/main.py @@ -5,6 +5,8 @@ import os import sys import logging +import json +import uuid # Configure logging logging.basicConfig(level=logging.DEBUG, @@ -24,7 +26,49 @@ class Color: PINK = '\033[95m' -def ollama_api_call(os_name, command, model_name): +def get_session_file(os_name): + """ + Gets the path to the session file. + + Parameters: + os_name (str): The operating system name. + + Returns: + str: The path to the session file. + """ + session_dir = os.path.join(os.path.expanduser("~"), ".conversation") + if not os.path.exists(session_dir): + os.makedirs(session_dir) + session_id = uuid.uuid4().hex + return os.path.join(session_dir, f"session_{os_name.lower()}_{session_id}.json") + +def load_session(session_file): + """ + Loads the session data from a JSON file. + + Parameters: + session_file (str): The path to the session file. + + Returns: + list: The session data. + """ + if os.path.exists(session_file): + with open(session_file, 'r') as file: + return json.load(file) + return [] + +def save_session(session_file, session_data): + """ + Saves the session data to a JSON file. + + Parameters: + session_file (str): The path to the session file. + session_data (list): The session data to save. + """ + with open(session_file, 'w') as file: + json.dump(session_data, file, indent=4) + +def ollama_api_call(os_name, command, model_name, session_data): """ Calls the Ollama API to convert user queries into command-line commands. @@ -32,15 +76,17 @@ def ollama_api_call(os_name, command, model_name): os_name (str): The operating system name. command (str): The user query to convert. model_name (str): The model name to use for the API call. + session_data (list): The session data to include in the API call. Returns: str: The generated command-line command. """ logging.debug(f"Calling Ollama API with OS: {os_name}, command: {command}, and model: {model_name}") + messages = session_data + [{'role': 'user', 'content': f'I am using {os_name} operating system which does not have any extentions instaalled and I want to Convert the user query: {command} to commandline / terminal code. Only output one line of terminal command please. Do not add any other text as the intention is to copy paste this generated output directly in terminal and run.'}] stream = ollama.chat( model=model_name, options={'temperature': 0.1}, - messages=[{'role': 'user', 'content': f'I am using {os_name} operating system which does not have any extentions instaalled and I want to Convert the user query: {command} to commandline / terminal code. Only output one line of terminal command please. Do not add any other text as the intention is to copy paste this generated output directly in terminal and run.'}], + messages=messages, stream=True, ) logging.debug("Ollama API call completed") @@ -53,10 +99,44 @@ def ollama_api_call(os_name, command, model_name): strdata = ''.join([chunk for chunk in stream_data]).replace("`", "").replace("```sh", "").replace("\n", "").replace("```bash", "") print(f"\n{Color.BLUE}Finished.\nGenerated: {strdata}\n{Color.RESET}") + session_data.append({'role': 'assistant', 'content': strdata.strip().replace('`', '')}) return strdata.strip().replace('`', '') +def ollama_api_correct_error(os_name, error_message, model_name, session_data): + """ + Calls the Ollama API to correct errors in the command. -def echo_and_execute(command, os_name, model_name): + Parameters: + os_name (str): The operating system name. + error_message (str): The error message to correct. + model_name (str): The model name to use for the API call. + session_data (list): The session data to include in the API call. + + Returns: + str: The corrected command-line command. + """ + logging.debug(f"Calling Ollama API to correct error with OS: {os_name}, error: {error_message}, and model: {model_name}") + messages = session_data + [{'role': 'user', 'content': f'I am using {os_name} operating system and encountered the following error while executing a command: {error_message}. Please provide a corrected command to resolve this error.'}] + stream = ollama.chat( + model=model_name, + options={'temperature': 0.1}, + messages=messages, + stream=True, + ) + logging.debug("Ollama API error correction call completed") + + stream_data = [] + for chunk in stream: + stream_data.append(chunk['message']['content']) + print(f"{Color.BLUE}{chunk['message']['content']}{Color.RESET}", end='', flush=True) + + strdata = ''.join([chunk for chunk in stream_data]).replace("`", "").replace("```sh", "").replace("\n", "").replace("```bash", "") + print(f"\n{Color.BLUE}Finished.\nCorrected: {strdata}\n{Color.RESET}") + + session_data.append({'role': 'assistant', 'content': strdata.strip().replace('`', '')}) + return strdata.strip().replace('`', '') + +def echo_and_execute(command, os_name, model_name, session_data): """ Executes a command generated by the Ollama API and handles user confirmation. @@ -64,10 +144,11 @@ def echo_and_execute(command, os_name, model_name): command (str): The command to execute. os_name (str): The operating system name. model_name (str): The model name used for the API call. + session_data (list): The session data to include in the API call. """ logging.info(f"Executing command: {command} on OS: {os_name}") try: - command_to_execue = ollama_api_call(os_name, command, model_name) + command_to_execue = ollama_api_call(os_name, command, model_name, session_data) confirm = input(f"Generated command is: {Color.CYAN}'{command_to_execue}', shall we continue? (Y/N):{Color.RESET}# ").strip().lower() if confirm.lower() in ['y', 'yes', 'yup']: result = subprocess.run(command_to_execue, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -83,6 +164,19 @@ def echo_and_execute(command, os_name, model_name): if error: logging.error(f"Command error: {error}") print(f"\n{Color.RED}Error: {error}{Color.RESET}") + corrected_command = ollama_api_correct_error(os_name, error, model_name, session_data) + confirm = input(f"Corrected command is: {Color.CYAN}'{corrected_command}', shall we continue? (Y/N):{Color.RESET}# ").strip().lower() + if confirm.lower() in ['y', 'yes', 'yup']: + result = subprocess.run(corrected_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + logging.info(f"Corrected command executed: {corrected_command}") + output = result.stdout.decode().strip() + error = result.stderr.decode().strip() + if output: + logging.debug(f"Corrected command output: {output}") + print(f"\n{Color.GREEN}# Output:{Color.RESET}\n{Color.PINK}{output}{Color.RESET}") + if error: + logging.error(f"Corrected command error: {error}") + print(f"\n{Color.RED}Error: {error}{Color.RESET}") except Exception as e: logging.exception(f"An exception occurred: {e}") print(f"{Color.RED}An exception occurred while executing the command: {e}{Color.RESET}", file=sys.stderr) @@ -99,6 +193,9 @@ def custom_cmd(os_name, model_name): logging.info(f"Starting custom command prompt for {os_name} with model {model_name}") print(f"{Color.CYAN}Welcome to the Initial Terminal command prompt for {os_name} with model {model_name}!\n Ollama with {model_name} LLM running locally for inference\n{Color.RESET}\n Type quit/exit to exit") + session_file = get_session_file(os_name) + session_data = load_session(session_file) + while True: try: input_str = input(f"{Color.GREEN}_____________\nCommand to execute :\n{Color.RESET}# ") @@ -108,7 +205,9 @@ def custom_cmd(os_name, model_name): print(f"{Color.GREEN}Exiting the custom command prompt.{Color.RESET}") break - echo_and_execute(input_str, os_name, model_name) + session_data.append({'role': 'user', 'content': input_str}) + echo_and_execute(input_str, os_name, model_name, session_data) + save_session(session_file, session_data) except KeyboardInterrupt: logging.info("Exiting custom command prompt due to keyboard interrupt") @@ -116,7 +215,7 @@ def custom_cmd(os_name, model_name): break -def start_custom_cmd(model_name='llama3.2:3b'): +def start_custom_cmd(model_name='gemma3:latest'): """ Initializes the application and starts the custom command prompt. @@ -147,7 +246,7 @@ def start_custom_cmd(model_name='llama3.2:3b'): parser = argparse.ArgumentParser() parser.add_argument('--spawn', action='store_true') - parser.add_argument('--model', type=str, default='llama3.2:3b', help='Specify the model name to use') + parser.add_argument('--model', type=str, default='gemma3:latest', help='Specify the model name to use') args = parser.parse_args() if not args.spawn: diff --git a/start.sh b/start.sh index f89779e..f519727 100755 --- a/start.sh +++ b/start.sh @@ -1,7 +1,7 @@ #!/bin/bash - +cd /media/tyson/TYSON/initialterm/ # Activate the Python environment -source ~/initialterm/.venv/bin/activate +source /media/tyson/TYSON/initialterm/.venv/bin/activate # Start the initialterm command -initialterm \ No newline at end of file +initialterm