Externalized items to new glade files and classes #10
|
@ -76,9 +76,10 @@ class Plugin(PluginBase):
|
||||||
path = self._fm_state.tab.get_current_directory()
|
path = self._fm_state.tab.get_current_directory()
|
||||||
# NOTE: -h = human readable, -d = depth asigned to 1
|
# NOTE: -h = human readable, -d = depth asigned to 1
|
||||||
command = ["du", "-h", "-d", "1", path]
|
command = ["du", "-h", "-d", "1", path]
|
||||||
proc = subprocess.Popen(command, stdout=subprocess.PIPE)
|
proc = subprocess.Popen(command, stdout=subprocess.PIPE, encoding="utf-8")
|
||||||
raw_data = proc.communicate()[0]
|
raw_data = proc.communicate()[0]
|
||||||
data = raw_data.decode("utf-8").strip() # NOTE: Will return data AFTER completion (if any)
|
# NOTE: Will return data AFTER completion (if any)
|
||||||
|
data = raw_data.strip()
|
||||||
parts = data.split("\n")
|
parts = data.split("\n")
|
||||||
|
|
||||||
# NOTE: Last entry is curret dir. Move to top of list and pop off...
|
# NOTE: Last entry is curret dir. Move to top of list and pop off...
|
||||||
|
|
|
@ -4,6 +4,7 @@ import subprocess
|
||||||
import signal
|
import signal
|
||||||
import json
|
import json
|
||||||
import shlex
|
import shlex
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
# Lib imports
|
# Lib imports
|
||||||
import gi
|
import gi
|
||||||
|
@ -40,6 +41,8 @@ class FileSearchMixin:
|
||||||
# NOTE: Freeze IPC consumption
|
# NOTE: Freeze IPC consumption
|
||||||
self.pause_fifo_update = True
|
self.pause_fifo_update = True
|
||||||
self.search_query = ""
|
self.search_query = ""
|
||||||
|
dt = datetime.now()
|
||||||
|
self.fsearch_time_stamp = datetime.timestamp(dt) # NOTE: Get timestamp
|
||||||
|
|
||||||
# NOTE: Kill the former process
|
# NOTE: Kill the former process
|
||||||
if self._list_proc:
|
if self._list_proc:
|
||||||
|
|
|
@ -5,6 +5,7 @@ import subprocess
|
||||||
import signal
|
import signal
|
||||||
import json
|
import json
|
||||||
import shlex
|
import shlex
|
||||||
|
from datetime import datetime
|
||||||
libgcc_s = ctypes.CDLL('libgcc_s.so.1')
|
libgcc_s = ctypes.CDLL('libgcc_s.so.1')
|
||||||
|
|
||||||
# Lib imports
|
# Lib imports
|
||||||
|
@ -42,6 +43,8 @@ class GrepSearchMixin:
|
||||||
# NOTE: Freeze IPC consumption
|
# NOTE: Freeze IPC consumption
|
||||||
self.pause_fifo_update = True
|
self.pause_fifo_update = True
|
||||||
self.grep_query = ""
|
self.grep_query = ""
|
||||||
|
dt = datetime.now()
|
||||||
|
self.grep_time_stamp = datetime.timestamp(dt) # NOTE: Get timestamp
|
||||||
|
|
||||||
# NOTE: Kill the former process
|
# NOTE: Kill the former process
|
||||||
if self._grep_proc:
|
if self._grep_proc:
|
||||||
|
|
|
@ -42,6 +42,7 @@ class Plugin(IPCServer, FileSearchMixin, GrepSearchMixin, PluginBase):
|
||||||
# where self.name should not be needed for message comms
|
# where self.name should not be needed for message comms
|
||||||
self._GLADE_FILE = f"{self.path}/search_dialog.glade"
|
self._GLADE_FILE = f"{self.path}/search_dialog.glade"
|
||||||
|
|
||||||
|
self.update_list_ui_buffer = ()
|
||||||
self._search_dialog = None
|
self._search_dialog = None
|
||||||
self._active_path = None
|
self._active_path = None
|
||||||
self.file_list_parent = None
|
self.file_list_parent = None
|
||||||
|
@ -51,7 +52,8 @@ class Plugin(IPCServer, FileSearchMixin, GrepSearchMixin, PluginBase):
|
||||||
self._grep_proc = None
|
self._grep_proc = None
|
||||||
self._list_proc = None
|
self._list_proc = None
|
||||||
self.pause_fifo_update = False
|
self.pause_fifo_update = False
|
||||||
self.update_list_ui_buffer = ()
|
self.grep_time_stamp = None
|
||||||
|
self.fsearch_time_stamp = None
|
||||||
self.grep_query = ""
|
self.grep_query = ""
|
||||||
self.search_query = ""
|
self.search_query = ""
|
||||||
|
|
||||||
|
|
|
@ -60,14 +60,22 @@ class IPCServer:
|
||||||
msg = conn.recv()
|
msg = conn.recv()
|
||||||
|
|
||||||
if "SEARCH|" in msg:
|
if "SEARCH|" in msg:
|
||||||
file = msg.split("SEARCH|")[1].strip()
|
ts, file = msg.split("SEARCH|")[1].strip().split("|", 1)
|
||||||
if file:
|
try:
|
||||||
|
timestamp = float(ts)
|
||||||
|
if timestamp > self.fsearch_time_stamp and file:
|
||||||
GLib.idle_add(self._load_file_ui, file, priority=GLib.PRIORITY_LOW)
|
GLib.idle_add(self._load_file_ui, file, priority=GLib.PRIORITY_LOW)
|
||||||
|
except Exception as e:
|
||||||
|
...
|
||||||
|
|
||||||
if "GREP|" in msg:
|
if "GREP|" in msg:
|
||||||
data = msg.split("GREP|")[1].strip()
|
ts, data = msg.split("GREP|")[1].strip().split("|", 1)
|
||||||
if data:
|
try:
|
||||||
|
timestamp = float(ts)
|
||||||
|
if timestamp > self.grep_time_stamp and data:
|
||||||
GLib.idle_add(self._load_grep_ui, data, priority=GLib.PRIORITY_LOW)
|
GLib.idle_add(self._load_grep_ui, data, priority=GLib.PRIORITY_LOW)
|
||||||
|
except Exception as e:
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
conn.close()
|
conn.close()
|
||||||
|
|
|
@ -5,11 +5,11 @@
|
||||||
import os
|
import os
|
||||||
import traceback
|
import traceback
|
||||||
import argparse
|
import argparse
|
||||||
import threading
|
import subprocess
|
||||||
import json
|
import json
|
||||||
import base64
|
import base64
|
||||||
import time
|
import time
|
||||||
import pickle
|
from datetime import datetime
|
||||||
from setproctitle import setproctitle
|
from setproctitle import setproctitle
|
||||||
from multiprocessing.connection import Client
|
from multiprocessing.connection import Client
|
||||||
|
|
||||||
|
@ -26,18 +26,10 @@ _ipc_authkey = b'' + bytes(f'solarfm-search_grep-ipc', 'utf-8')
|
||||||
filter = (".cpp", ".css", ".c", ".go", ".html", ".htm", ".java", ".js", ".json", ".lua", ".md", ".py", ".rs", ".toml", ".xml", ".pom") + \
|
filter = (".cpp", ".css", ".c", ".go", ".html", ".htm", ".java", ".js", ".json", ".lua", ".md", ".py", ".rs", ".toml", ".xml", ".pom") + \
|
||||||
(".txt", ".text", ".sh", ".cfg", ".conf", ".log")
|
(".txt", ".text", ".sh", ".cfg", ".conf", ".log")
|
||||||
|
|
||||||
|
# NOTE: Create timestamp of when this launched. Is used in IPC to see if
|
||||||
# NOTE: Threads WILL NOT die with parent's destruction.
|
# we are stale and that new call didn't fully kill this or older processes.
|
||||||
def threaded(fn):
|
dt = datetime.now()
|
||||||
def wrapper(*args, **kwargs):
|
ts = datetime.timestamp(dt)
|
||||||
threading.Thread(target=fn, args=args, kwargs=kwargs, daemon=False).start()
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
# NOTE: Threads WILL die with parent's destruction.
|
|
||||||
def daemon_threaded(fn):
|
|
||||||
def wrapper(*args, **kwargs):
|
|
||||||
threading.Thread(target=fn, args=args, kwargs=kwargs, daemon=True).start()
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
|
|
||||||
def send_ipc_message(message) -> None:
|
def send_ipc_message(message) -> None:
|
||||||
|
@ -55,84 +47,51 @@ def file_search(path, query):
|
||||||
for file in _files:
|
for file in _files:
|
||||||
if query in file.lower():
|
if query in file.lower():
|
||||||
target = os.path.join(_path, file)
|
target = os.path.join(_path, file)
|
||||||
data = f"SEARCH|{json.dumps([target, file])}"
|
data = f"SEARCH|{ts}|{json.dumps([target, file])}"
|
||||||
send_ipc_message(data)
|
send_ipc_message(data)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print("Couldn't traverse to path. Might be permissions related...")
|
print("Couldn't traverse to path. Might be permissions related...")
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
|
|
||||||
def _search_for_string(file, query):
|
|
||||||
|
def grep_search(target=None, query=None):
|
||||||
|
if not query or not target:
|
||||||
|
return
|
||||||
|
|
||||||
|
# NOTE: -n = provide line numbers, -R = Search recursive in given target
|
||||||
|
# -i = insensitive, -F = don't do regex parsing. (Treat as raw string)
|
||||||
|
command = ["grep", "-n", "-R", "-i", "-F", query, target]
|
||||||
|
proc = subprocess.Popen(command, stdout=subprocess.PIPE, encoding="utf-8")
|
||||||
|
raw_data = proc.communicate()[0].strip()
|
||||||
|
proc_data = raw_data.split("\n") # NOTE: Will return data AFTER completion (if any)
|
||||||
|
collection = {}
|
||||||
|
|
||||||
|
for line in proc_data:
|
||||||
|
file, line_no, data = line.split(":", 2)
|
||||||
b64_file = base64.urlsafe_b64encode(file.encode('utf-8')).decode('utf-8')
|
b64_file = base64.urlsafe_b64encode(file.encode('utf-8')).decode('utf-8')
|
||||||
grep_result_set = {}
|
b64_data = base64.urlsafe_b64encode(data.encode('utf-8')).decode('utf-8')
|
||||||
padding = 15
|
|
||||||
|
|
||||||
with open(file, 'rb') as fp:
|
if b64_file in collection.keys():
|
||||||
# NOTE: I know there's an issue if there's a very large file with content
|
collection[f"{b64_file}"][f"{line_no}"] = b64_data
|
||||||
# all on one line will lower and dupe it. And, yes, it will only
|
|
||||||
# return one instance from the file.
|
|
||||||
try:
|
|
||||||
for i, raw in enumerate(fp):
|
|
||||||
line = None
|
|
||||||
llower = raw.lower()
|
|
||||||
if not query in llower:
|
|
||||||
continue
|
|
||||||
|
|
||||||
if len(raw) > 72:
|
|
||||||
start = 0
|
|
||||||
end = len(raw) - 1
|
|
||||||
index = llower.index(query)
|
|
||||||
sindex = llower.index(query) - 15 if index >= 15 else abs(start - index) - index
|
|
||||||
eindex = sindex + 15 if end > (index + 15) else abs(index - end) + index
|
|
||||||
line = raw[sindex:eindex]
|
|
||||||
else:
|
else:
|
||||||
line = raw
|
collection[f"{b64_file}"] = {}
|
||||||
|
collection[f"{b64_file}"] = { f"{line_no}": b64_data}
|
||||||
b64_line = base64.urlsafe_b64encode(line).decode('utf-8')
|
|
||||||
if f"{b64_file}" in grep_result_set.keys():
|
|
||||||
grep_result_set[f"{b64_file}"][f"{i+1}"] = b64_line
|
|
||||||
else:
|
|
||||||
grep_result_set[f"{b64_file}"] = {}
|
|
||||||
grep_result_set[f"{b64_file}"] = {f"{i+1}": b64_line}
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
...
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
data = f"GREP|{json.dumps(grep_result_set)}"
|
data = f"GREP|{ts}|{json.dumps(collection, separators=(',', ':'), indent=4)}"
|
||||||
send_ipc_message(data)
|
send_ipc_message(data)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
...
|
...
|
||||||
|
|
||||||
|
collection = {}
|
||||||
|
|
||||||
|
|
||||||
@daemon_threaded
|
|
||||||
def _search_for_string_threaded(file, query):
|
|
||||||
_search_for_string(file, query)
|
|
||||||
|
|
||||||
def grep_search(path, query):
|
|
||||||
try:
|
|
||||||
for file in os.listdir(path):
|
|
||||||
target = os.path.join(path, file)
|
|
||||||
if os.path.isdir(target):
|
|
||||||
grep_search(target, query)
|
|
||||||
else:
|
|
||||||
if target.lower().endswith(filter):
|
|
||||||
size = os.path.getsize(target)
|
|
||||||
if not size > 5000:
|
|
||||||
_search_for_string(target, query)
|
|
||||||
else:
|
|
||||||
_search_for_string_threaded(target, query)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print("Couldn't traverse to path. Might be permissions related...")
|
|
||||||
traceback.print_exc()
|
|
||||||
|
|
||||||
def search(args):
|
def search(args):
|
||||||
if args.type == "file_search":
|
if args.type == "file_search":
|
||||||
file_search(args.dir, args.query.lower())
|
file_search(args.dir, args.query.lower())
|
||||||
|
|
||||||
if args.type == "grep_search":
|
if args.type == "grep_search":
|
||||||
grep_search(args.dir, args.query.lower().encode("utf-8"))
|
grep_search(args.dir, args.query.encode("utf-8"))
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|
Loading…
Reference in New Issue