[REFORMAT] Ran black reformat
This commit is contained in:
@@ -1,11 +1,11 @@
|
||||
# modules/nlp/service.py
|
||||
|
||||
from sqlalchemy.orm import Session
|
||||
from sqlalchemy import desc # Import desc for ordering
|
||||
from sqlalchemy import desc # Import desc for ordering
|
||||
from google import genai
|
||||
import json
|
||||
from datetime import datetime, timezone
|
||||
from typing import List # Import List
|
||||
from typing import List # Import List
|
||||
|
||||
# Import the new model and Enum
|
||||
from .models import ChatMessage, MessageSender
|
||||
@@ -14,7 +14,8 @@ from core.config import settings
|
||||
client = genai.Client(api_key=settings.GOOGLE_API_KEY)
|
||||
|
||||
### Base prompt for MAIA, used for inital user requests
|
||||
SYSTEM_PROMPT = """
|
||||
SYSTEM_PROMPT = (
|
||||
"""
|
||||
You are MAIA - My AI Assistant. Your job is to parse user requests into structured JSON commands and generate a user-facing response text.
|
||||
|
||||
Available functions/intents:
|
||||
@@ -109,8 +110,11 @@ MAIA:
|
||||
"response_text": "Okay, I've deleted task 2 from your list."
|
||||
}
|
||||
|
||||
The datetime right now is """+str(datetime.now(timezone.utc))+""".
|
||||
The datetime right now is """
|
||||
+ str(datetime.now(timezone.utc))
|
||||
+ """.
|
||||
"""
|
||||
)
|
||||
|
||||
### Prompt for MAIA to forward user request to AI
|
||||
SYSTEM_FORWARD_PROMPT = f"""
|
||||
@@ -123,6 +127,7 @@ Here is the user request:
|
||||
|
||||
# --- Chat History Service Functions ---
|
||||
|
||||
|
||||
def save_chat_message(db: Session, user_id: int, sender: MessageSender, text: str):
|
||||
"""Saves a chat message to the database."""
|
||||
db_message = ChatMessage(user_id=user_id, sender=sender, text=text)
|
||||
@@ -131,16 +136,21 @@ def save_chat_message(db: Session, user_id: int, sender: MessageSender, text: st
|
||||
db.refresh(db_message)
|
||||
return db_message
|
||||
|
||||
|
||||
def get_chat_history(db: Session, user_id: int, limit: int = 50) -> List[ChatMessage]:
|
||||
"""Retrieves the last 'limit' chat messages for a user."""
|
||||
return db.query(ChatMessage)\
|
||||
.filter(ChatMessage.user_id == user_id)\
|
||||
.order_by(desc(ChatMessage.timestamp))\
|
||||
.limit(limit)\
|
||||
.all()[::-1] # Reverse to get oldest first for display order
|
||||
return (
|
||||
db.query(ChatMessage)
|
||||
.filter(ChatMessage.user_id == user_id)
|
||||
.order_by(desc(ChatMessage.timestamp))
|
||||
.limit(limit)
|
||||
.all()[::-1]
|
||||
) # Reverse to get oldest first for display order
|
||||
|
||||
|
||||
# --- Existing NLP Service Functions ---
|
||||
|
||||
|
||||
def process_request(request: str):
|
||||
"""
|
||||
Process the user request using the Google GenAI API.
|
||||
@@ -152,7 +162,7 @@ def process_request(request: str):
|
||||
config={
|
||||
"temperature": 0.3, # Less creativity, more factual
|
||||
"response_mime_type": "application/json",
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
# Parse the JSON response
|
||||
@@ -160,7 +170,9 @@ def process_request(request: str):
|
||||
parsed_response = json.loads(response.text)
|
||||
# Validate required fields
|
||||
if not all(k in parsed_response for k in ("intent", "params", "response_text")):
|
||||
raise ValueError("AI response missing required fields (intent, params, response_text)")
|
||||
raise ValueError(
|
||||
"AI response missing required fields (intent, params, response_text)"
|
||||
)
|
||||
return parsed_response
|
||||
except (json.JSONDecodeError, ValueError) as e:
|
||||
print(f"Error parsing AI response: {e}")
|
||||
@@ -169,9 +181,10 @@ def process_request(request: str):
|
||||
return {
|
||||
"intent": "error",
|
||||
"params": {},
|
||||
"response_text": "Sorry, I had trouble understanding that request or formulating a response. Could you please try rephrasing?"
|
||||
"response_text": "Sorry, I had trouble understanding that request or formulating a response. Could you please try rephrasing?",
|
||||
}
|
||||
|
||||
|
||||
def ask_ai(request: str):
|
||||
"""
|
||||
Ask the AI a question.
|
||||
@@ -179,6 +192,6 @@ def ask_ai(request: str):
|
||||
"""
|
||||
response = client.models.generate_content(
|
||||
model="gemini-2.0-flash",
|
||||
contents=SYSTEM_FORWARD_PROMPT+request,
|
||||
contents=SYSTEM_FORWARD_PROMPT + request,
|
||||
)
|
||||
return response.text
|
||||
return response.text
|
||||
|
||||
Reference in New Issue
Block a user