From ad262522695bb5ac913d7af60e87b6f28f477e7e Mon Sep 17 00:00:00 2001 From: Dita Aji Pratama Date: Tue, 18 Feb 2025 16:44:03 +0700 Subject: [PATCH] Feature for QnA with AI --- handler.py | 18 +++++++++++++++++- modules/api/qna.py | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 1 deletion(-) create mode 100755 modules/api/qna.py diff --git a/handler.py b/handler.py index 64ae435..5b5b62e 100644 --- a/handler.py +++ b/handler.py @@ -5,11 +5,14 @@ # This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with this program. If not, see https://www.gnu.org/licenses/. -from bottle import Bottle, route +from bottle import Bottle, route, request, response from config import directory +import json + import templates.plain.main as template_public import modules.public.home as public_home +import modules.api.qna as api_qna app = Bottle() @@ -21,3 +24,16 @@ def index(): } } return public_home.main().html(params) + +@app.route('/api/synthesisgen/ask', method=['OPTIONS', 'POST']) +def index(): + try: + if request.method == 'OPTIONS': + return None + else: + response.content_type = 'application/json' + params = request.json + return json.dumps(api_qna.qna().ask(params), indent = 2).encode() + except Exception as e: + print(str(e),flush=True) + return json.dumps({}, indent = 2).encode() diff --git a/modules/api/qna.py b/modules/api/qna.py new file mode 100755 index 0000000..7eb0c66 --- /dev/null +++ b/modules/api/qna.py @@ -0,0 +1,35 @@ +import ollama + +from scripts import loggorilla, synthesai + +class qna: + + def __init__(self): + pass + + def ask(self, params): + APIADDR = "ASK" + sysrole = params['sysrole' ] + question = params['question' ] + model = "deepseek-r1:7b" + history = [ + { "role" : "system", "content" : sysrole }, + { "role" : "user", "content" : question } + ] + loggorilla.prcss(APIADDR, "Processing AI") + result = ollama.chat(model=model, messages=history, stream=False) + #for chunk in result: + # print(chunk['message']['content'], end='', flush=True) + loggorilla.prcss(APIADDR, "Processing AI finish") + loggorilla.fyinf(APIADDR, result['message']['content']) + history.append({ "role" : "assistant", "content" : result['message']['content'] }) + return { + "content" : result['message']['content'], + "history" : history + } + + + + + +