Coverage for app \ main.py: 46%

89 statements  

« prev     ^ index     » next       coverage.py v7.12.0, created at 2025-11-30 09:36 +0100

1""" 

2FastAPI main application for SLM Code Engine 

3""" 

4import logging 

5from contextlib import asynccontextmanager 

6from typing import Dict 

7 

8from fastapi import FastAPI, HTTPException 

9from fastapi.middleware.cors import CORSMiddleware 

10from fastapi.responses import JSONResponse 

11 

12from app.config import settings 

13from app.models.schemas import ( 

14 QueryRequest, 

15 QueryResponse, 

16 HealthResponse, 

17 TranslateRequest, 

18 BoilerplateRequest, 

19 FeedbackRequest, 

20 FeedbackResponse, 

21) 

22from app.core.orchestrator import Orchestrator 

23from app import __version__ 

24 

25# Configure logging 

26logging.basicConfig( 

27 level=getattr(logging, settings.log_level), 

28 format="%(asctime)s - %(name)s - %(levelname)s - %(message)s" 

29) 

30logger = logging.getLogger(__name__) 

31 

32# Global orchestrator instance 

33orchestrator: Orchestrator = None 

34 

35 

36@asynccontextmanager 

37async def lifespan(app: FastAPI): 

38 """Lifecycle manager for the application""" 

39 global orchestrator 

40 

41 logger.info("Starting SLM Code Engine...") 

42 

43 try: 

44 # Initialize orchestrator 

45 orchestrator = Orchestrator() 

46 await orchestrator.initialize() 

47 logger.info("Orchestrator initialized successfully") 

48 

49 yield 

50 

51 except Exception as e: 

52 logger.error(f"Failed to initialize: {e}") 

53 raise 

54 

55 finally: 

56 # Cleanup 

57 logger.info("Shutting down SLM Code Engine...") 

58 if orchestrator: 

59 await orchestrator.shutdown() 

60 

61 

62# Create FastAPI app 

63app = FastAPI( 

64 title="SLM Code Engine", 

65 description="Local AI-powered code assistant using Small Language Models", 

66 version=__version__, 

67 lifespan=lifespan 

68) 

69 

70# CORS middleware 

71app.add_middleware( 

72 CORSMiddleware, 

73 allow_origins=["*"], # Configure appropriately for production 

74 allow_credentials=True, 

75 allow_methods=["*"], 

76 allow_headers=["*"], 

77) 

78 

79 

80@app.get("/", response_model=Dict[str, str]) 

81async def root(): 

82 """Root endpoint""" 

83 return { 

84 "name": "SLM Code Engine", 

85 "version": __version__, 

86 "status": "running", 

87 "docs": "/docs" 

88 } 

89 

90 

91@app.get("/health", response_model=HealthResponse) 

92async def health_check(): 

93 """Health check endpoint""" 

94 if not orchestrator: 

95 raise HTTPException(status_code=503, detail="Orchestrator not initialized") 

96 

97 status = await orchestrator.get_status() 

98 

99 return HealthResponse( 

100 status="healthy" if status["ready"] else "initializing", 

101 version=__version__, 

102 models_loaded=status.get("models_loaded", {}), 

103 automata_available=status.get("automata_available", []) 

104 ) 

105 

106 

107@app.post("/api/v1/query", response_model=QueryResponse) 

108async def process_query(request: QueryRequest): 

109 """ 

110 Main endpoint for code processing 

111 

112 Supports: 

113 - fix: Fix code errors 

114 - explain: Explain code or errors 

115 - refactor: Refactor code 

116 - test: Generate unit tests 

117 - translate: Translate code between languages 

118 - format: Format code 

119 - boilerplate: Generate boilerplate code 

120 """ 

121 if not orchestrator: 

122 raise HTTPException(status_code=503, detail="Orchestrator not initialized") 

123 

124 try: 

125 logger.info(f"Processing {request.task} request for {request.language}") 

126 

127 result = await orchestrator.process( 

128 task=request.task, 

129 code=request.code, 

130 language=request.language, 

131 context=request.context, 

132 trace=request.trace 

133 ) 

134 

135 return QueryResponse(**result) 

136 

137 except Exception as e: 

138 logger.error(f"Error processing query: {e}", exc_info=True) 

139 return QueryResponse( 

140 success=False, 

141 task=request.task, 

142 error=str(e), 

143 used_automata=False, 

144 used_slm=False, 

145 pipeline=[], 

146 total_duration_ms=0 

147 ) 

148 

149 

150@app.post("/api/v1/translate", response_model=QueryResponse) 

151async def translate_code(request: TranslateRequest): 

152 """Translate code between programming languages""" 

153 if not orchestrator: 

154 raise HTTPException(status_code=503, detail="Orchestrator not initialized") 

155 

156 try: 

157 result = await orchestrator.translate( 

158 code=request.code, 

159 source_lang=request.source_language, 

160 target_lang=request.target_language, 

161 preserve_comments=request.preserve_comments 

162 ) 

163 

164 return QueryResponse(**result) 

165 

166 except Exception as e: 

167 logger.error(f"Error translating code: {e}", exc_info=True) 

168 return QueryResponse( 

169 success=False, 

170 task="translate", 

171 error=str(e), 

172 used_automata=False, 

173 used_slm=False, 

174 pipeline=[], 

175 total_duration_ms=0 

176 ) 

177 

178 

179@app.post("/api/v1/boilerplate", response_model=QueryResponse) 

180async def generate_boilerplate(request: BoilerplateRequest): 

181 """Generate boilerplate code""" 

182 if not orchestrator: 

183 raise HTTPException(status_code=503, detail="Orchestrator not initialized") 

184 

185 try: 

186 result = await orchestrator.generate_boilerplate( 

187 template_type=request.template_type, 

188 language=request.language, 

189 name=request.name, 

190 options=request.options 

191 ) 

192 

193 return QueryResponse(**result) 

194 

195 except Exception as e: 

196 logger.error(f"Error generating boilerplate: {e}", exc_info=True) 

197 return QueryResponse( 

198 success=False, 

199 task="boilerplate", 

200 error=str(e), 

201 used_automata=False, 

202 used_slm=False, 

203 pipeline=[], 

204 total_duration_ms=0 

205 ) 

206 

207 

208from app.storage.feedback import FeedbackLogger 

209 

210 

211@app.post("/api/v1/feedback", response_model=FeedbackResponse) 

212async def log_feedback(request: FeedbackRequest): 

213 """ 

214 Endpoint to log positive user feedback on an interaction. 

215 This feedback is used to improve the model over time. 

216 """ 

217 try: 

218 feedback_logger = FeedbackLogger() 

219 entry_created = feedback_logger.log_feedback( 

220 task=request.task.value, 

221 language=request.language.value, 

222 request_code=request.request_code, 

223 response_code=request.response_code, 

224 response_explanation=request.response_explanation 

225 ) 

226 

227 if entry_created: 

228 message = "Feedback logged successfully. Thank you!" 

229 else: 

230 message = "This feedback was already recorded." 

231 

232 return FeedbackResponse( 

233 success=True, 

234 message=message, 

235 entry_created=entry_created 

236 ) 

237 

238 except Exception as e: 

239 logger.error(f"Error logging feedback: {e}", exc_info=True) 

240 raise HTTPException(status_code=500, detail=f"Failed to log feedback: {str(e)}") 

241 

242 

243@app.exception_handler(Exception) 

244async def global_exception_handler(request, exc): 

245 """Global exception handler""" 

246 logger.error(f"Unhandled exception: {exc}", exc_info=True) 

247 return JSONResponse( 

248 status_code=500, 

249 content={ 

250 "error": "Internal server error", 

251 "detail": str(exc) if settings.debug else "An error occurred" 

252 } 

253 ) 

254 

255 

256if __name__ == "__main__": 

257 import uvicorn 

258 

259 uvicorn.run( 

260 "app.main:app", 

261 host=settings.api_host, 

262 port=settings.api_port, 

263 reload=settings.debug, 

264 workers=settings.api_workers 

265 )