51 lines
1.4 KiB
Python
51 lines
1.4 KiB
Python
import os
|
|
from ollama import Client
|
|
import yaml
|
|
|
|
with open("ollama-api.yaml", "r") as yamlfile:
|
|
data = yaml.safe_load(yamlfile)
|
|
|
|
# -----------------------------
|
|
# Ollama Client
|
|
# -----------------------------
|
|
|
|
client = Client(
|
|
host="https://ollama.com",
|
|
headers={"Authorization": f"Bearer {data[0]['token']}"}
|
|
)
|
|
|
|
|
|
# -----------------------------
|
|
# Ollama-Funktion
|
|
# -----------------------------
|
|
def ask_ollama(prompt):
|
|
"""GPT via Ollama Python Client"""
|
|
try:
|
|
messages = [{"role": "user", "content": prompt}]
|
|
response = client.chat("gpt-oss:120b-cloud", messages=messages, stream=False)
|
|
|
|
# Absicherung: Prüfen ob key existiert
|
|
if "message" in response and "content" in response["message"]:
|
|
return response["message"]["content"]
|
|
elif "content" in response:
|
|
return response["content"]
|
|
else:
|
|
return "⚠️ Ollama hat keine Antwort geliefert."
|
|
except Exception as e:
|
|
return f"Error talking to GPT-oss: {str(e)}"
|
|
|
|
# -----------------------------
|
|
# Hauptfunktion zur Textverarbeitung
|
|
# -----------------------------
|
|
def process_text(command):
|
|
command_lower = command.lower()
|
|
print("Command received:", command)
|
|
return ask_ollama(command)
|
|
|
|
# -----------------------------
|
|
# Lokaler Test
|
|
# -----------------------------
|
|
if __name__ == "__main__":
|
|
user_input = input("Enter your command: ")
|
|
print(process_text(user_input))
|