aboutsummaryrefslogtreecommitdiffstats
path: root/scratch
diff options
context:
space:
mode:
Diffstat (limited to 'scratch')
-rw-r--r--scratch/semgrep/semgrep.el116
-rw-r--r--scratch/semgrep/server.py92
2 files changed, 208 insertions, 0 deletions
diff --git a/scratch/semgrep/semgrep.el b/scratch/semgrep/semgrep.el
new file mode 100644
index 0000000..5752a20
--- /dev/null
+++ b/scratch/semgrep/semgrep.el
@@ -0,0 +1,116 @@
+;;; semgrep.el --- Semantic search -*- lexical-binding: t; -*-
+;;
+;; Copyright (C) 2023 Óscar Nájera
+;;
+;; Author: Óscar Nájera <hi@oscarnajera.com>
+;; Maintainer: Óscar Nájera <hi@oscarnajera.com>
+;; Created: November 07, 2023
+;; Modified: November 07, 2023
+;; Version: 0.0.1
+;; Keywords: abbrev bib c calendar comm convenience data docs emulations extensions faces files frames games hardware help hypermedia i18n internal languages lisp local maint mail matching mouse multimedia news outlines processes terminals tex tools unix vc wp
+;; Homepage: https://github.com/titan/semgrep
+;; Package-Requires: ((emacs "27.1"))
+;;
+;; This file is not part of GNU Emacs.
+;;
+;;; Commentary:
+;;
+;; semantically search on my database by paragraph
+;;
+;;; Code:
+(require 'url)
+(require 'org-element)
+(require 'org-roam-db)
+(require 'dash)
+
+(defun semgrep--connect (method data)
+ (let ((url-request-method "POST")
+ (url-request-extra-headers '(("Content-Type" . "application/json")))
+ (url-request-data (encode-coding-string
+ (json-serialize `(,method ,data))
+ 'utf-8)))
+ (with-current-buffer
+ (url-retrieve-synchronously "http://localhost:8080")
+ (goto-char url-http-end-of-headers)
+ (json-read))))
+
+(defun semgrep--get-node-id (paragraph &optional default)
+ (thread-first
+ (org-element-map
+ (org-element-property :parent paragraph)
+ 'node-property
+ (lambda (np)
+ (org-element-property :value np)))
+ (car)
+ (org-string-nw-p)
+ (or default)))
+
+(defun semgrep--prepare-paragraph (file-id)
+ (lambda (paragraph)
+ (list
+ :document (substring-no-properties (org-element-interpret-data paragraph))
+ :metadata (list :start-point
+ (org-element-property :begin paragraph)
+ :node-id
+ (semgrep--get-node-id paragraph file-id)))))
+
+(defun semgrep--add-buffer ()
+ (interactive)
+ (if (eq major-mode 'org-mode)
+ (-some-->
+ (org-element-map
+ (org-element-parse-buffer)
+ 'paragraph
+ (semgrep--prepare-paragraph (org-id-get (point-min))))
+ (cl-coerce it 'vector)
+ ;; (json-serialize it)
+ ;; (f-write it 'utf-8 "/tmp/out.json")
+ ;; (message "%S" it)
+ (semgrep--connect :store it))
+ (user-error "This only works on org-mode")))
+
+(defun semgrep--roam-data (entries)
+ (thread-last
+ (cl-mapcar (lambda (meta)
+ (alist-get 'node-id meta))
+ entries)
+ (delete-dups)
+ (vconcat)
+ (org-roam-db-query [:select [id title]
+ :from nodes
+ :where (in id $v1)])))
+
+(defun semgrep-search (text)
+ (interactive (list (or (thing-at-point 'paragraph)
+ (read-from-minibuffer "What are you looking for? "))))
+ (-let (((&alist 'distances 'documents 'metadatas)
+ (semgrep--connect :query text)))
+ (with-current-buffer (get-buffer-create "*Semantic Search*")
+ (erase-buffer)
+ (org-mode)
+ (insert "#+title: Looking for:\n" text "\n")
+ (cl-mapc
+ (lambda (entry-distances entry-document entry-metadatas)
+ (let ((data (semgrep--roam-data entry-metadatas)))
+ (cl-mapc
+ (lambda (d paragraph meta)
+ (let* ((node-id (or (alist-get 'node-id meta) ""))
+ (title (cadr (assoc node-id data #'string=))))
+ (unless (zerop d)
+ (insert
+ (format "* [[id:%s][%s]]\n" node-id title)
+ "- Distance :: " (number-to-string d) "\n"
+ "- point :: " (number-to-string (or (alist-get 'start-point meta) -1)) "\n"
+ (string-trim paragraph) ?\n))))
+ entry-distances entry-document entry-metadatas)))
+ distances documents metadatas)
+ (goto-char (point-min))
+ (display-buffer (current-buffer)))))
+
+;; (org-roam-dolist-with-progress (file (org-roam-list-files))
+;; "importing to semantic search"
+;; (org-roam-with-file file nil
+;; (semgrep--add-buffer)))
+
+(provide 'semgrep)
+;;; semgrep.el ends here
diff --git a/scratch/semgrep/server.py b/scratch/semgrep/server.py
new file mode 100644
index 0000000..6a8648b
--- /dev/null
+++ b/scratch/semgrep/server.py
@@ -0,0 +1,92 @@
+#!/usr/bin/env python3
+from http.server import BaseHTTPRequestHandler, HTTPServer
+import chromadb
+import collections
+import hashlib
+import json
+
+
+def checksum(string):
+ sha256 = hashlib.sha256()
+ sha256.update(string.encode("utf-8"))
+ return sha256.hexdigest()[:32]
+
+
+def ensure_list(data):
+ if isinstance(data, str):
+ return [data]
+ if isinstance(data, list):
+ if all(isinstance(l, str) for l in data):
+ return data
+ raise ValueError("Data must be a list of strings")
+
+
+class MyRequestHandler(BaseHTTPRequestHandler):
+ def do_POST(self):
+ content_length = int(self.headers["Content-Length"])
+ post_data = self.rfile.read(content_length).decode("utf-8")
+
+ try:
+ data = json.loads(post_data)
+ # Process the JSON data
+ response_message = f"Received POST request with data: '{data}'\n"
+ except ValueError:
+ response_message = "Invalid JSON data"
+ self.send_response(400)
+
+ if query := data.get("query"):
+ response = collection.query(query_texts=ensure_list(query))
+ elif paragraph := data.get("store"):
+ data, metadata = drop_duplicates(paragraph)
+ collection.add(
+ documents=data, metadatas=metadata, ids=[checksum(l) for l in data]
+ )
+ response = {"added": data}
+ else:
+ raise ValueError(f"Used wrong method. Sent: {data.keys()}")
+
+ response_message = json.dumps(response)
+
+ self.send_response(200)
+ self.send_header("Content-type", "text/plain")
+ self.end_headers()
+ self.wfile.write(response_message.encode("utf-8"))
+
+
+def run_server(port=8080):
+ server_address = ("", port)
+ httpd = HTTPServer(server_address, MyRequestHandler)
+ print(f"Server running on port {port}")
+ httpd.serve_forever()
+
+
+def drop_duplicates(paragraph):
+ data = [data["document"] for data in paragraph]
+ metadata = [data["metadata"] for data in paragraph]
+ dups = (x for x, count in collections.Counter(data).items() if count > 1)
+ to_drop = []
+ for no in dups:
+ to_drop.extend([i for i, d in enumerate(data) if d == no][1:])
+ to_drop.sort(reverse=True)
+ for index in to_drop:
+ data.pop(index)
+ metadata.pop(index)
+ return data, metadata
+
+
+def test():
+ sample = [
+ {"document": "Hello", "metadata": 5},
+ {"document": "World", "metadata": 8},
+ {"document": "Hello", "metadata": 6},
+ {"document": "Good", "metadata": 3},
+ {"document": "World", "metadata": 9},
+ ]
+
+ assert drop_duplicates(sample) == (["Hello", "World", "Good"], [5, 8, 3])
+
+
+if __name__ == "__main__":
+ client = chromadb.PersistentClient(path="./semgrep")
+ collection = client.get_or_create_collection("org-roam")
+ run_server()