diff --git a/Experiments/borealis_overlay.py b/Experiments/borealis_overlay.py new file mode 100644 index 0000000..74b9d8f --- /dev/null +++ b/Experiments/borealis_overlay.py @@ -0,0 +1,542 @@ +#!/usr/bin/env python3 + +import sys +import time +import re +import numpy as np +import cv2 +import pytesseract + +try: + import winsound + HAS_WINSOUND = True +except ImportError: + HAS_WINSOUND = False + +from PyQt5.QtWidgets import QApplication, QWidget +from PyQt5.QtCore import Qt, QRect, QPoint, QTimer +from PyQt5.QtGui import QPainter, QPen, QColor, QFont +from PIL import Image, ImageGrab, ImageFilter + +from rich.console import Console, Group +from rich.table import Table +from rich.progress import Progress, BarColumn, TextColumn +from rich.text import Text +from rich.live import Live + +# ============================================================================= +# Global Config +# ============================================================================= + +pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe" + +POLLING_RATE_MS = 500 +MAX_DATA_POINTS = 8 + +# We still use these defaults for Region size. +DEFAULT_WIDTH = 180 +DEFAULT_HEIGHT = 130 +HANDLE_SIZE = 8 +LABEL_HEIGHT = 20 + +GREEN_HEADER_STYLE = "bold green" + +BEEP_INTERVAL_SECONDS = 1.0 # Only beep once every 1 second + +# STATUS BAR AUTO-LOCATOR LOGIC (WILL BE BUILT-OUT TO BE MORE ROBUST LATER) +TEMPLATE_PATH = "G:\\Nextcloud\\Projects\\Scripting\\bars_template.png" # Path to your bars template file +MATCH_THRESHOLD = 0.4 # The correlation threshold to consider a "good" match + +# ============================================================================= +# Helper Functions +# ============================================================================= + +def beep_hp_warning(): + """ + Only beep if enough time has elapsed since the last beep (BEEP_INTERVAL_SECONDS). + """ + current_time = time.time() + if (beep_hp_warning.last_beep_time is None or + (current_time - beep_hp_warning.last_beep_time >= BEEP_INTERVAL_SECONDS)): + + beep_hp_warning.last_beep_time = current_time + if HAS_WINSOUND: + # frequency=376 Hz, duration=100 ms + winsound.Beep(376, 100) + else: + # Attempt terminal bell + print('\a', end='') + +beep_hp_warning.last_beep_time = None + + +def locate_bars_opencv(template_path, threshold=MATCH_THRESHOLD): + """ + Attempt to locate the bars via OpenCV template matching: + 1) Grab the full screen using PIL.ImageGrab. + 2) Convert to NumPy array in BGR format for cv2. + 3) Load template from `template_path`. + 4) Use cv2.matchTemplate to find the best match location. + 5) If max correlation > threshold, return (x, y, w, h). + 6) Else return None. + """ + # 1) Capture full screen + screenshot_pil = ImageGrab.grab() + screenshot_np = np.array(screenshot_pil) # shape (H, W, 4) possibly + # Convert RGBA or RGB to BGR + screenshot_bgr = cv2.cvtColor(screenshot_np, cv2.COLOR_RGB2BGR) + + # 2) Load template from file + template_bgr = cv2.imread(template_path, cv2.IMREAD_COLOR) + if template_bgr is None: + print(f"[WARN] Could not load template file: {template_path}") + return None + + # 3) Template matching + result = cv2.matchTemplate(screenshot_bgr, template_bgr, cv2.TM_CCOEFF_NORMED) + + # 4) Find best match + min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result) + # template width/height + th, tw, _ = template_bgr.shape + + if max_val >= threshold: + # max_loc is top-left corner of the best match + found_x, found_y = max_loc + return (found_x, found_y, tw, th) + else: + return None + + +def format_duration(seconds): + if seconds is None: + return "???" + seconds = int(seconds) + hours = seconds // 3600 + leftover = seconds % 3600 + mins = leftover // 60 + secs = leftover % 60 + if hours > 0: + return f"{hours}h {mins}m {secs}s" + else: + return f"{mins}m {secs}s" + + +def sanitize_experience_string(raw_text): + text_no_percent = raw_text.replace('%', '') + text_no_spaces = text_no_percent.replace(' ', '') + cleaned = re.sub(r'[^0-9\.]', '', text_no_spaces) + match = re.search(r'\d+(?:\.\d+)?', cleaned) + if not match: + return None + val = float(match.group(0)) + if val < 0: + val = 0 + elif val > 100: + val = 100 + return round(val, 4) + + +def format_experience_value(value): + if value < 0: + value = 0 + elif value > 100: + value = 100 + float_4 = round(value, 4) + raw_str = f"{float_4:.4f}" + int_part, dec_part = raw_str.split('.') + if int_part == "100": + pass + elif len(int_part) == 1 and int_part != "0": + int_part = "0" + int_part + elif int_part == "0": + int_part = "00" + return f"{int_part}.{dec_part}" + +# ----------------------------------------------------------------------------- +# Region Class +# ----------------------------------------------------------------------------- +class Region: + """ + Defines a draggable/resizable screen region for OCR capture. + """ + def __init__(self, x, y, label="Region", color=QColor(0,0,255)): + self.x = x + self.y = y + self.w = DEFAULT_WIDTH + self.h = DEFAULT_HEIGHT + self.label = label + self.color = color + self.visible = True + self.data = "" + + def rect(self): + return QRect(self.x, self.y, self.w, self.h) + + def label_rect(self): + return QRect(self.x, self.y - LABEL_HEIGHT, self.w, LABEL_HEIGHT) + + def resize_handles(self): + return [ + QRect(self.x - HANDLE_SIZE // 2, self.y - HANDLE_SIZE // 2, HANDLE_SIZE, HANDLE_SIZE), + QRect(self.x + self.w - HANDLE_SIZE // 2, self.y - HANDLE_SIZE // 2, HANDLE_SIZE, HANDLE_SIZE), + QRect(self.x - HANDLE_SIZE // 2, self.y + self.h - HANDLE_SIZE // 2, HANDLE_SIZE, HANDLE_SIZE), + QRect(self.x + self.w - HANDLE_SIZE // 2, self.y + self.h - HANDLE_SIZE // 2, HANDLE_SIZE, HANDLE_SIZE), + ] + +# ----------------------------------------------------------------------------- +# OverlayCanvas Class +# ----------------------------------------------------------------------------- +class OverlayCanvas(QWidget): + """ + Renders the overlay & handles region dragging/resizing. + """ + def __init__(self, regions, parent=None): + super().__init__(parent) + self.regions = regions + self.edit_mode = True + self.selected_region = None + self.selected_handle = None + self.drag_offset = QPoint() + + def paintEvent(self, event): + painter = QPainter(self) + for region in self.regions: + if region.visible: + pen = QPen(region.color) + pen.setWidth(3) + painter.setPen(pen) + painter.drawRect(region.x, region.y, region.w, region.h) + + painter.setFont(QFont("Arial", 12, QFont.Bold)) + painter.setPen(region.color) + painter.drawText(region.x, region.y - 5, region.label) + + if self.edit_mode: + for handle in region.resize_handles(): + painter.fillRect(handle, region.color) + + def mousePressEvent(self, event): + if not self.edit_mode: + return + if event.button() == Qt.LeftButton: + for region in reversed(self.regions): + for i, handle in enumerate(region.resize_handles()): + if handle.contains(event.pos()): + self.selected_region = region + self.selected_handle = i + return + if region.label_rect().contains(event.pos()): + self.selected_region = region + self.selected_handle = None + self.drag_offset = event.pos() - QPoint(region.x, region.y) + return + if region.rect().contains(event.pos()): + self.selected_region = region + self.selected_handle = None + self.drag_offset = event.pos() - QPoint(region.x, region.y) + return + + def mouseMoveEvent(self, event): + if not self.edit_mode or self.selected_region is None: + return + + if self.selected_handle is None: + self.selected_region.x = event.x() - self.drag_offset.x() + self.selected_region.y = event.y() - self.drag_offset.y() + else: + sr = self.selected_region + if self.selected_handle == 0: # top-left + sr.w += sr.x - event.x() + sr.h += sr.y - event.y() + sr.x = event.x() + sr.y = event.y() + elif self.selected_handle == 1: # top-right + sr.w = event.x() - sr.x + sr.h += sr.y - event.y() + sr.y = event.y() + elif self.selected_handle == 2: # bottom-left + sr.w += sr.x - event.x() + sr.h = event.y() - sr.y + sr.x = event.x() + elif self.selected_handle == 3: # bottom-right + sr.w = event.x() - sr.x + sr.h = event.y() - sr.y + + sr.w = max(sr.w, 10) + sr.h = max(sr.h, 10) + + self.update() + + def mouseReleaseEvent(self, event): + if not self.edit_mode: + return + if event.button() == Qt.LeftButton: + self.selected_region = None + self.selected_handle = None + +# ----------------------------------------------------------------------------- +# BorealisOverlay Class +# ----------------------------------------------------------------------------- +class BorealisOverlay(QWidget): + """ + Single Region Overlay for Player Stats (HP/MP/FP/EXP) with: + - Automatic location via OpenCV template matching at startup + - OCR scanning + - Low-HP beep + - Rich Live updates in terminal + """ + def __init__(self, live=None): + super().__init__() + screen_geo = QApplication.primaryScreen().geometry() + self.setGeometry(screen_geo) + self.setWindowFlags(Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint) + self.setAttribute(Qt.WA_TranslucentBackground, True) + + # Try to find the bars automatically + # If found => use that location, else default + initial_x, initial_y = 250, 50 + region_w, region_h = DEFAULT_WIDTH, DEFAULT_HEIGHT + + match_result = locate_bars_opencv(TEMPLATE_PATH, MATCH_THRESHOLD) + if match_result is not None: + found_x, found_y, w, h = match_result + print(f"Character Status Located at {found_x}, {found_y} with confidence >= {MATCH_THRESHOLD}.") + initial_x, initial_y = found_x, found_y + # Optionally override region size with template size + region_w, region_h = w, h + else: + print("Could not auto-locate the character status page. Set your theme to Masquerade and Interface Scale to 140%, and browser zoom level to 110%. Using default region.") + + region = Region(initial_x, initial_y, label="Character Status") + region.w = region_w + region.h = region_h + self.regions = [region] + + self.canvas = OverlayCanvas(self.regions, self) + self.canvas.setGeometry(self.rect()) + + # Tesseract + self.engine = pytesseract + + # Keep history of EXP data + self.points = [] + + self.live = live + + # Timer for periodic OCR scanning + self.timer = QTimer(self) + self.timer.timeout.connect(self.collect_ocr_data) + self.timer.start(POLLING_RATE_MS) + + def set_live(self, live): + self.live = live + + def collect_ocr_data(self): + for region in self.regions: + if region.visible: + screenshot = ImageGrab.grab( + bbox=(region.x, region.y, region.x + region.w, region.y + region.h) + ) + processed = self.preprocess_image(screenshot) + text = pytesseract.image_to_string(processed, config='--psm 4 --oem 1') + region.data = text.strip() + + if self.live is not None: + renderable = self.build_renderable() + self.live.update(renderable) + + def preprocess_image(self, image): + gray = image.convert("L") + scaled = gray.resize((gray.width * 3, gray.height * 3)) + thresh = scaled.point(lambda p: p > 200 and 255) + return thresh.filter(ImageFilter.MedianFilter(3)) + + def parse_all_stats(self, raw_text): + raw_lines = raw_text.splitlines() + lines = [l.strip() for l in raw_lines if l.strip()] + stats_dict = { + "hp": (0,1), + "mp": (0,1), + "fp": (0,1), + "exp": None + } + if len(lines) < 4: + return stats_dict + + hp_match = re.search(r"(\d+)\s*/\s*(\d+)", lines[0]) + if hp_match: + stats_dict["hp"] = (int(hp_match.group(1)), int(hp_match.group(2))) + + mp_match = re.search(r"(\d+)\s*/\s*(\d+)", lines[1]) + if mp_match: + stats_dict["mp"] = (int(mp_match.group(1)), int(mp_match.group(2))) + + fp_match = re.search(r"(\d+)\s*/\s*(\d+)", lines[2]) + if fp_match: + stats_dict["fp"] = (int(fp_match.group(1)), int(fp_match.group(2))) + + exp_val = sanitize_experience_string(lines[3]) + stats_dict["exp"] = exp_val + return stats_dict + + def update_points(self, new_val): + now = time.time() + if self.points: + _, last_v = self.points[-1] + if abs(new_val - last_v) < 1e-6: + return + if new_val < last_v: + self.points.clear() + self.points.append((now, new_val)) + if len(self.points) > MAX_DATA_POINTS: + self.points.pop(0) + + def compute_time_to_100(self): + n = len(self.points) + if n < 2: + return None + first_t, first_v = self.points[0] + last_t, last_v = self.points[-1] + diff_v = last_v - first_v + if diff_v <= 0: + return None + + steps = n - 1 + total_time = last_t - first_t + if total_time <= 0: + return None + + avg_change = diff_v / steps + remain = 100.0 - last_v + if remain <= 0: + return None + + avg_time = total_time / steps + rate_per_s = avg_change / avg_time if avg_time > 0 else 0 + if rate_per_s <= 0: + return None + + return int(remain / rate_per_s) + + def build_renderable(self): + raw_text = self.regions[0].data + stats = self.parse_all_stats(raw_text) + hp_cur, hp_max = stats["hp"] + mp_cur, mp_max = stats["mp"] + fp_cur, fp_max = stats["fp"] + exp_val = stats["exp"] + + # HP beep logic + if hp_max > 0: + hp_ratio = hp_cur / hp_max + if 0 < hp_ratio <= 0.40: + beep_hp_warning() + + if exp_val is not None: + self.update_points(exp_val) + current_exp = self.points[-1][1] if self.points else 0.0 + + # Title + title_text = Text("Project Borealis\n", style="bold white") + subtitle_text = Text("Flyff Information Overlay\n\n", style="dim") + + # HP / MP / FP bars + bar_progress = Progress( + "{task.description}", + BarColumn(bar_width=30), + TextColumn(" {task.completed}/{task.total} ({task.percentage:>5.2f}%)"), + transient=False, + auto_refresh=False + ) + bar_progress.add_task("[bold red]HP[/bold red]", total=hp_max, completed=hp_cur, + style="red", complete_style="red") + bar_progress.add_task("[bold blue]MP[/bold blue]", total=mp_max, completed=mp_cur, + style="blue", complete_style="blue") + bar_progress.add_task("[bold green]FP[/bold green]", total=fp_max, completed=fp_cur, + style="green", complete_style="green") + bar_progress.refresh() + + # Historical EXP table + table = Table(show_header=True, header_style=GREEN_HEADER_STYLE, style=None) + table.add_column("Historical EXP", justify="center", style="green") + table.add_column("Time Since Last Kill", justify="center", style="green") + table.add_column("Average EXP Per Kill", justify="center", style="green") + table.add_column("Average Time Between Kills", justify="center", style="green") + + n = len(self.points) + if n == 0: + table.add_row("N/A", "N/A", "N/A", "N/A") + elif n == 1: + _, v0 = self.points[0] + exp_str = f"[green]{format_experience_value(v0)}%[/green]" + table.add_row(exp_str, "N/A", "N/A", "N/A") + else: + for i in range(1, n): + t_cur, v_cur = self.points[i] + t_prev, v_prev = self.points[i - 1] + delta_v = v_cur - v_prev + delta_str = f"{delta_v:+.4f}%" + exp_main = format_experience_value(v_cur) + exp_str = f"[green]{exp_main}%[/green] [dim]({delta_str})[/dim]" + + delta_t = t_cur - t_prev + t_since_str = f"{delta_t:.1f}s" + + diff_v = v_cur - self.points[0][1] + steps = i + avg_exp_str = f"{diff_v/steps:.4f}%" + + total_time = t_cur - self.points[0][0] + avg_kill_time = total_time / steps + avg_time_str = f"{avg_kill_time:.1f}s" + + table.add_row(exp_str, t_since_str, avg_exp_str, avg_time_str) + + # Predicted Time to Level + secs_left = self.compute_time_to_100() + time_str = format_duration(secs_left) + + time_bar = Progress( + TextColumn("[bold white]Predicted Time to Level:[/bold white] "), + BarColumn(bar_width=30, complete_style="magenta"), + TextColumn(" [green]{task.percentage:>5.2f}%[/green] "), + TextColumn(f"[magenta]{time_str}[/magenta] until 100%", justify="right"), + transient=False, + auto_refresh=False + ) + time_bar.add_task("", total=100, completed=current_exp) + time_bar.refresh() + + return Group( + title_text, + subtitle_text, + bar_progress, + table, + time_bar + ) + +# ----------------------------------------------------------------------------- +# main +# ----------------------------------------------------------------------------- +def main(): + """ + 1) Attempt to locate HP/MP/FP/Exp bars using OpenCV template matching. + 2) Position overlay region accordingly if found, else default. + 3) Start PyQt, periodically OCR the region, update Rich Live in terminal. + """ + app = QApplication(sys.argv) + window = BorealisOverlay() + window.setWindowTitle("Project Borealis Overlay (HP/MP/FP/EXP)") + window.show() + + console = Console() + + with Live(console=console, refresh_per_second=4) as live: + window.set_live(live) + exit_code = app.exec_() + + sys.exit(exit_code) + +if __name__ == "__main__": + main() diff --git a/Modules/__pycache__/data_collector.cpython-312.pyc b/Modules/__pycache__/data_collector.cpython-312.pyc index 9d4d97e..f99d78d 100644 Binary files a/Modules/__pycache__/data_collector.cpython-312.pyc and b/Modules/__pycache__/data_collector.cpython-312.pyc differ diff --git a/Modules/data_collector.py b/Modules/data_collector.py index 1a041c4..5f9df55 100644 --- a/Modules/data_collector.py +++ b/Modules/data_collector.py @@ -25,6 +25,7 @@ regions = {} app_instance = None + def _ensure_qapplication(): """ Ensures that QApplication is initialized before creating widgets. @@ -34,12 +35,14 @@ def _ensure_qapplication(): app_instance = QApplication(sys.argv) threading.Thread(target=app_instance.exec_, daemon=True).start() -def create_ocr_region(region_id, x=250, y=50, w=DEFAULT_WIDTH, h=DEFAULT_HEIGHT): + +def create_ocr_region(region_id, x=250, y=50, w=DEFAULT_WIDTH, h=DEFAULT_HEIGHT, color=(255, 255, 0)): """ Creates an OCR region with a visible, resizable box on the screen. + The color parameter allows customization (default yellow, blue for overlays). """ - - _ensure_qapplication() # Ensure QApplication is running first + + _ensure_qapplication() collector_mutex.lock() if region_id in regions: @@ -48,10 +51,11 @@ def create_ocr_region(region_id, x=250, y=50, w=DEFAULT_WIDTH, h=DEFAULT_HEIGHT) regions[region_id] = { 'bbox': [x, y, w, h], 'raw_text': "", - 'widget': OCRRegionWidget(x, y, w, h, region_id) + 'widget': OCRRegionWidget(x, y, w, h, region_id, color) } collector_mutex.unlock() + def get_raw_text(region_id): collector_mutex.lock() if region_id not in regions: @@ -61,10 +65,12 @@ def get_raw_text(region_id): collector_mutex.unlock() return text + def start_collector(): t = threading.Thread(target=_update_ocr_loop, daemon=True) t.start() + def _update_ocr_loop(): while True: collector_mutex.lock() @@ -79,25 +85,102 @@ def _update_ocr_loop(): x, y, w, h = bbox screenshot = ImageGrab.grab(bbox=(x, y, x + w, y + h)) processed = _preprocess_image(screenshot) - raw_text = pytesseract.image_to_string(processed, config='--psm 4 --oem 1') + raw_text = pytesseract.image_to_string(processed, config='--psm 6 --oem 1') collector_mutex.lock() if rid in regions: regions[rid]['raw_text'] = raw_text collector_mutex.unlock() -# print(f"OCR Text for {rid}: {raw_text}") # SHOW RAW OCR OUTPUT IN TERMINAL FOR DEBUGGING - time.sleep(0.7) + def _preprocess_image(image): gray = image.convert("L") scaled = gray.resize((gray.width * 3, gray.height * 3)) thresh = scaled.point(lambda p: 255 if p > 200 else 0) return thresh.filter(ImageFilter.MedianFilter(3)) + +def find_word_positions(region_id, word, offset_x=0, offset_y=0, margin=5): + """ + Finds positions of a specific word within the OCR region. + Applies user-defined offset and margin adjustments. + Returns a list of bounding box coordinates relative to the OCR box. + """ + collector_mutex.lock() + if region_id not in regions: + collector_mutex.unlock() + return [] + + bbox = regions[region_id]['bbox'] + collector_mutex.unlock() + + # Extract OCR region position and size + x, y, w, h = bbox + left, top, right, bottom = x, y, x + w, y + h + + if right <= left or bottom <= top: + print(f"[ERROR] Invalid OCR region bounds: {bbox}") + return [] + + try: + image = ImageGrab.grab(bbox=(left, top, right, bottom)) + processed = _preprocess_image(image) + + # Get original and processed image sizes + orig_width, orig_height = image.size + proc_width, proc_height = processed.size + + # Scale factor between processed image and original screenshot + scale_x = orig_width / proc_width + scale_y = orig_height / proc_height + + data = pytesseract.image_to_data(processed, config='--psm 6 --oem 1', output_type=pytesseract.Output.DICT) + + word_positions = [] + for i in range(len(data['text'])): + if re.search(rf"\b{word}\b", data['text'][i], re.IGNORECASE): + # Scale the detected coordinates back to region-relative positions + x_scaled = int(data['left'][i] * scale_x) + y_scaled = int(data['top'][i] * scale_y) + w_scaled = int(data['width'][i] * scale_x) + h_scaled = int(data['height'][i] * scale_y) + + # Apply user-configured margin + x_margin = max(0, x_scaled - margin) + y_margin = max(0, y_scaled - margin) + w_margin = w_scaled + (margin * 2) + h_margin = h_scaled + (margin * 2) + + # Apply user-configured offset + x_final = x_margin + offset_x + y_final = y_margin + offset_y + + word_positions.append((x_final, y_final, w_margin, h_margin)) + + return word_positions + except Exception as e: + print(f"[ERROR] Failed to capture OCR region: {e}") + return [] + + + + + +def draw_identification_boxes(region_id, positions, color=(0, 0, 255)): + """ + Draws non-interactive rectangles at specified positions within the given OCR region. + """ + collector_mutex.lock() + if region_id in regions and 'widget' in regions[region_id]: + widget = regions[region_id]['widget'] + widget.set_draw_positions(positions, color) + collector_mutex.unlock() + + class OCRRegionWidget(QWidget): - def __init__(self, x, y, w, h, region_id): + def __init__(self, x, y, w, h, region_id, color): super().__init__() self.setGeometry(x, y, w, h) @@ -108,23 +191,41 @@ class OCRRegionWidget(QWidget): self.drag_offset = None self.selected_handle = None self.region_id = region_id + self.box_color = QColor(*color) + self.draw_positions = [] self.show() def paintEvent(self, event): painter = QPainter(self) - pen = QPen(QColor(255, 255, 0)) # COLOR OF THE BOX ITSELF - pen.setWidth(5) # WIDTH OF THE BOX BORDER + pen = QPen(self.box_color) + pen.setWidth(5) painter.setPen(pen) # Draw main rectangle painter.drawRect(0, 0, self.width(), self.height()) + # Draw detected word overlays + pen.setWidth(2) + pen.setColor(QColor(0, 0, 255)) + painter.setPen(pen) + + for x, y, w, h in self.draw_positions: + painter.drawRect(x, y, w, h) + # Draw resize handles - painter.setBrush(QColor(255, 255, 0)) # COLOR OF THE RESIZE HANDLES + painter.setBrush(self.box_color) for handle in self._resize_handles(): painter.drawRect(handle) + def set_draw_positions(self, positions, color): + """ + Update the positions where identification boxes should be drawn. + """ + self.draw_positions = positions + self.box_color = QColor(*color) + self.update() + def _resize_handles(self): w, h = self.width(), self.height() return [ @@ -174,7 +275,3 @@ class OCRRegionWidget(QWidget): if self.region_id in regions: regions[self.region_id]['bbox'] = [new_x, new_y, self.width(), self.height()] collector_mutex.unlock() - - def mouseReleaseEvent(self, event): - self.selected_handle = None - self.drag_offset = None diff --git a/Nodes/Flyff/__pycache__/identification_overlay.cpython-312.pyc b/Nodes/Flyff/__pycache__/identification_overlay.cpython-312.pyc new file mode 100644 index 0000000..0498144 Binary files /dev/null and b/Nodes/Flyff/__pycache__/identification_overlay.cpython-312.pyc differ diff --git a/Nodes/General Purpose/__pycache__/identification_overlay.cpython-312.pyc b/Nodes/General Purpose/__pycache__/identification_overlay.cpython-312.pyc new file mode 100644 index 0000000..5e05c3d Binary files /dev/null and b/Nodes/General Purpose/__pycache__/identification_overlay.cpython-312.pyc differ diff --git a/Nodes/General Purpose/identification_overlay.py b/Nodes/General Purpose/identification_overlay.py new file mode 100644 index 0000000..1510ab6 --- /dev/null +++ b/Nodes/General Purpose/identification_overlay.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python3 +""" +Identification Overlay Node: +- Creates an OCR region in data_collector with a blue overlay. +- Detects instances of a specified word and draws adjustable overlays. +- Users can configure offset and margin dynamically. +""" + +import re +from OdenGraphQt import BaseNode +from PyQt5.QtWidgets import QMessageBox +from PyQt5.QtCore import QTimer +from Modules import data_manager, data_collector + + +class IdentificationOverlayNode(BaseNode): + __identifier__ = "bunny-lab.io.identification_overlay_node" + NODE_NAME = "Identification Overlay" + + def __init__(self): + super(IdentificationOverlayNode, self).__init__() + + # User-configurable options + self.add_text_input("search_term", "Search Term", text="Aibatt") + self.add_text_input("offset_value", "Offset Value (X,Y)", text="0,0") # New input + self.add_text_input("margin", "Margin", text="5") # New input + + self.region_id = "identification_overlay" + data_collector.create_ocr_region(self.region_id, x=250, y=50, w=300, h=200, color=(0, 0, 255)) + + data_collector.start_collector() + self.set_name("Identification Overlay") + + # Timer for updating overlays + self.timer = QTimer() + self.timer.timeout.connect(self.update_overlay) + self.timer.start(500) # Update every 500ms + + def update_overlay(self): + """ + Updates the overlay with detected word positions. + """ + search_term = self.get_property("search_term") + offset_text = self.get_property("offset_value") + margin_text = self.get_property("margin") + + # Parse user-defined offset + try: + offset_x, offset_y = map(int, offset_text.split(",")) + except ValueError: + offset_x, offset_y = 0, 0 # Default to no offset if invalid input + + # Parse user-defined margin + try: + margin = int(margin_text) + except ValueError: + margin = 5 # Default margin if invalid input + + if not search_term: + return + + # Get detected word positions + detected_positions = data_collector.find_word_positions(self.region_id, search_term, offset_x, offset_y, margin) + + # Draw detected word boxes + data_collector.draw_identification_boxes(self.region_id, detected_positions, color=(0, 0, 255))