Spaces:
Sleeping
Sleeping
| """ | |
| FastAPI Backend Server for AuthorCheck AI Detection | |
| Uses DeBERTa model for AI-generated text detection | |
| """ | |
| from fastapi import FastAPI, HTTPException | |
| from fastapi.middleware.cors import CORSMiddleware | |
| from pydantic import BaseModel, Field | |
| from typing import List, Optional | |
| import uvicorn | |
| from model_handler import AIDetectionModelHandler | |
| import logging | |
| # Configure logging | |
| logging.basicConfig( | |
| level=logging.INFO, | |
| format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' | |
| ) | |
| logger = logging.getLogger(__name__) | |
| # Initialize FastAPI app | |
| app = FastAPI( | |
| title="AuthorCheck API", | |
| description="AI-powered text analysis and detection API", | |
| version="1.0.0" | |
| ) | |
| # Configure CORS | |
| app.add_middleware( | |
| CORSMiddleware, | |
| allow_origins=["*"], | |
| allow_credentials=True, | |
| allow_methods=["*"], | |
| allow_headers=["*"], | |
| ) | |
| # Initialize model handler | |
| model_handler = AIDetectionModelHandler() | |
| # Request/Response Models | |
| class AnalysisRequest(BaseModel): | |
| text: str = Field(..., min_length=1, description="Text to analyze") | |
| analysisTypes: Optional[List[str]] = Field( | |
| default=["all"], | |
| description="Types of analysis to perform" | |
| ) | |
| model: Optional[str] = Field( | |
| default="ai-detector", | |
| description="Model to use for analysis" | |
| ) | |
| class EmotionData(BaseModel): | |
| emotion: str | |
| score: float | |
| intensity: str | |
| class AdvancedSentiment(BaseModel): | |
| emotions: List[EmotionData] | |
| confidence: float | |
| context: str | |
| avg_polarity: Optional[float] = None | |
| polarity_variance: Optional[float] = None | |
| class TopicData(BaseModel): | |
| topic: str | |
| relevance: float | |
| keywords: List[str] | |
| class WritingStyle(BaseModel): | |
| tone: str | |
| formality: str | |
| complexity: str | |
| style: List[str] | |
| audience: str | |
| sentiment_consistency: Optional[str] = None | |
| class Insight(BaseModel): | |
| type: str | |
| title: str | |
| description: str | |
| suggestion: Optional[str] = None | |
| class PlagiarismRisk(BaseModel): | |
| score: int | |
| level: str | |
| details: str | |
| class ContentQuality(BaseModel): | |
| overall: int | |
| clarity: int | |
| coherence: int | |
| engagement: int | |
| originality: int | |
| class AnalysisResponse(BaseModel): | |
| advancedSentiment: AdvancedSentiment | |
| topics: List[TopicData] | |
| writingStyle: WritingStyle | |
| insights: List[Insight] | |
| plagiarismRisk: PlagiarismRisk | |
| contentQuality: ContentQuality | |
| aiOrHuman: str | |
| aiOrHumanConfidence: float | |
| aiOrHumanExplanation: str | |
| # API Endpoints | |
| async def root(): | |
| """Root endpoint - API health check""" | |
| return { | |
| "status": "online", | |
| "message": "AuthorCheck API is running", | |
| "version": "1.0.0" | |
| } | |
| async def health_check(): | |
| """Health check endpoint""" | |
| try: | |
| model_loaded = model_handler.is_loaded() | |
| return { | |
| "status": "healthy" if model_loaded else "degraded", | |
| "model_loaded": model_loaded, | |
| "model_type": "DeBERTa AI Detector" | |
| } | |
| except Exception as e: | |
| logger.error(f"Health check failed: {e}") | |
| return { | |
| "status": "unhealthy", | |
| "error": str(e) | |
| } | |
| async def analyze_text(request: AnalysisRequest): | |
| """ | |
| Analyze text using the DeBERTa AI detection model | |
| Returns comprehensive analysis including: | |
| - AI vs Human detection | |
| - Sentiment analysis | |
| - Topic detection | |
| - Writing style analysis | |
| - Content quality metrics | |
| """ | |
| try: | |
| if not request.text or len(request.text.strip()) == 0: | |
| raise HTTPException(status_code=400, detail="Text cannot be empty") | |
| # Check text length for meaningful analysis (50-7000 words) | |
| word_count = len(request.text.split()) | |
| if word_count < 50: | |
| raise HTTPException( | |
| status_code=400, | |
| detail="Text is too short for analysis. Please provide at least 50 words for accurate AI detection and sentiment analysis." | |
| ) | |
| if word_count > 7000: | |
| raise HTTPException( | |
| status_code=400, | |
| detail="Text is too long for analysis. Maximum 7,000 words allowed." | |
| ) | |
| # Perform AI detection using the model | |
| logger.info(f"Analyzing text of length: {len(request.text)}") | |
| analysis_result = model_handler.analyze_text(request.text) | |
| logger.info(f"Analysis complete: {analysis_result['aiOrHuman']} ({analysis_result['aiOrHumanConfidence']:.2f}%)") | |
| return analysis_result | |
| except HTTPException: | |
| raise | |
| except Exception as e: | |
| logger.error(f"Analysis error: {e}", exc_info=True) | |
| raise HTTPException( | |
| status_code=500, | |
| detail=f"Analysis failed: {str(e)}" | |
| ) | |
| async def detect_ai(request: AnalysisRequest): | |
| """ | |
| Simple endpoint for AI detection only | |
| Returns just the AI/Human classification | |
| """ | |
| try: | |
| if not request.text or len(request.text.strip()) == 0: | |
| raise HTTPException(status_code=400, detail="Text cannot be empty") | |
| # Check text length (50-7000 words) | |
| word_count = len(request.text.split()) | |
| if word_count < 50: | |
| raise HTTPException( | |
| status_code=400, | |
| detail="Text is too short. Please provide at least 50 words." | |
| ) | |
| elif word_count > 7000: | |
| raise HTTPException( | |
| status_code=400, | |
| detail="Text is too long. Maximum 7,000 words allowed." | |
| ) | |
| result = model_handler.detect_ai(request.text) | |
| return { | |
| "text": request.text[:100] + "..." if len(request.text) > 100 else request.text, | |
| "classification": result["classification"], | |
| "prediction": result.get("prediction", result["classification"]), | |
| "probability": result["probability"], | |
| "confidence": result["confidence"], | |
| "explanation": result["explanation"], | |
| "mixed_analysis": result.get("mixed_analysis", None), | |
| "modelProcessingTime": result.get("modelProcessingTime", None) | |
| } | |
| except HTTPException: | |
| raise | |
| except Exception as e: | |
| logger.error(f"Detection error: {e}", exc_info=True) | |
| raise HTTPException( | |
| status_code=500, | |
| detail=f"Detection failed: {str(e)}" | |
| ) | |
| async def model_info(): | |
| """Get information about the loaded model""" | |
| try: | |
| return model_handler.get_model_info() | |
| except Exception as e: | |
| logger.error(f"Model info error: {e}") | |
| raise HTTPException(status_code=500, detail=str(e)) | |
| # Run the server | |
| if __name__ == "__main__": | |
| uvicorn.run( | |
| "app:app", | |
| host="0.0.0.0", | |
| port=8000, | |
| reload=True, | |
| log_level="info" | |
| ) | |