From ce392d7a0404f368c0a1f65ac8187e0a15d7a8a3 Mon Sep 17 00:00:00 2001
From: Nicole Rappe <nicole.rappe@bunny-lab.io>
Date: Wed, 26 Feb 2025 02:08:00 -0700
Subject: [PATCH] Further optimized identification_overlay node GPU
 acceleration / processing.

---
 .../data_collector.cpython-312.pyc            | Bin 14564 -> 14312 bytes
 Modules/data_collector.py                     |  41 ++++++++----------
 2 files changed, 19 insertions(+), 22 deletions(-)

diff --git a/Modules/__pycache__/data_collector.cpython-312.pyc b/Modules/__pycache__/data_collector.cpython-312.pyc
index 107d0944c578b79ecfebb370ae1ad1b1545a2489..2341494517f6f9553ec66384d6fbe41fc8ee5b77 100644
GIT binary patch
delta 1657
zcma)6ZD?Cn7(ORoO`D}jlQiESH_iHyEDP)UWvQL38`9N5VYQBxl^Bw{m@ZAydv8sR
z_qK>A{o#+gGqBs7Hrs4FCaxng{G*}{ODDsn9TdZm4U23)gn|S8={ZSmXk|Y<kmo(;
zc|Xp3pL24yCY~f~e^#q&03NHG-$vhWyj44`l~8gt=;+R^YO7&@*NoK?_wI380)U_u
z>Y_l<@$!i5YzqL)Q?3qOpzLa16OnV;astY?)}xfpaQq9!4QYuJ^qfU3$JxXgua8s_
z`UWg9qCE;mFXPP&oI{KpbD{-<LsO3D4J3=Rqk_(k{Cf3_M~uT2tBJ1^A=VS>xt$R;
z&xtVZicmc#!n7-5&(4TFrR@bXuN!Mbbt=89o|j}u8%ADQv?g8_QD@`;6jx?cYq01)
zlL1~g1h{6g`LPy~YtmHOws5|RZ$I&QEft@Yw_=$t0&p5jEGPt<VE?aNQd?P~hX&7#
zLvR*P&?QhiKEzR}w^Jf#h21HOi+5!3wW;`a3PSPDUB03cV(-KvhpOuTs50|ONfi&^
z0%HDNz*q%0S1qn18s2>gT$YHPKoA9C$te)g3hrt5P}!zo8caz~fi(2iW!7sbJO)!-
zY&@2sBh;D6M3{>u6BL)EA~Z*bIVzoGB2;QL$;OC-rD6#xl%NKWoS+ywikU#M#4t;<
z6cb8QY?z_x1QiK!Au1i?-b_w%REnWfOfpQfY%CF_&LkOXJTyh6Xa-NmLy0hrvc~S(
zNtR|PmX701aar`I@pEWK$kd3wF?|3V&{6Xk%9*X`AM-o12BO@%76+QM$kC@3n`#ad
z4F0jdOVAz5ylkIbw}sxY8hf`Dz|*qi%lh(4*KEz)tB)+U+1f{{`uX~Gm2*q)+|>K?
zdjIn9{hs3wM(E9vczz_lIWmzSnRuXQ&|~W>JsYmJx&DW)hK2TJdA4VHB711Vbx>rg
zvb`&Hc~9Ght6ij&*+VPtyr+G`)q&o&zeZ_*F|geUw9flV$1^uDTR)!qXlg-@W@h`J
zDS*k1p4x_+FJ(&U9hJ#)N49sv)mo;@D_roy)|b7<eBjQ3z5q5qx%%wbtiIT{ADcB_
z^?+Ks)}|RyO7BV_p5Il<2h_Pv$8{(_kL9_Y+j9OQb<N{x#*B~fMDYYq#v)Og3j~9)
zM2rgtf5@#j1f=jPf=%7<8tQ3s!$maOl!WWKdre=$&TB+#4S}l|3W~`TK5W`6WflpC
z2()5g@&6Ct%f*n#iaCdFwKT&gxnEnFAY44>v%#Cl?%NBubA7&x@W4$RQ&5JJ2`&^%
zumx!<$ud_+UKWE_Rw}(hi1!HOkjj4q{+%22pH#prXz8G@^8(??lfmHk7chPTUIOH)
zU~Us2wF<KF<Xf~?S+0nx+Rj3dW7}Sr93!eQUlBM*fV9MXM_>&@K^+W+;~|y})A4vP
z=!MJ@W?Aw;A1;02J`zgcd!*|;46o;gI(-m+ji$R^QWUS}dGupf1N;sB(e+r-2chn-
Jesu2z)4yG-lh6PF

delta 1866
zcmb7FZERCj7(S;T+q$x~YfHbju6OO)(hdh~Y&bqPm%&_I_=t$O1UH-ByY7~)E%)B7
zbGerp|2Pv#Acw%BVAdb7!s1d=jcAO2l*~YkT#G=O8H4c$(LW^M9}^SLX?I~YYT`-m
zd*1V$=RD`!=e|9CnE2&H?X8-cYJeMi_sgi@zgBzF2<LOBjD2ucFc6$H(Xa$dnPe0V
zF=HuTACIX3km@9pX#Ce%vt$wLMDvUuGdGGR(NfH;Q6SkwEv-4e2>|YQ!K|ppP|##{
zibh&1n4V%*B?ACm(>I_)CjAb(P_LkZOUXn#UFpCB<=jfY7rcH2@9z-Y=nYE)S~lq&
zYfRBW_(G$i5c42eV@8stdbVBgDJa&}5nhQcZk*9R+e31e@v7JHtYy6VGM;`NuUxO8
z#PgHQ@x~k05z7T#qAeCcb$YYgDXL}&yIxcm`vy@%*O1MNBNtjWJIqTOAleQCAxLyd
zi-pKqn^7@IY7{n?X7JpWqHg2UIk)J>CSC^Mm>L^dAyHD(Gc#+be=yK9>yf<00%?|f
z>t5aSANG00@;6*>S+nSw!6o~*)oW&vbBjLFTV$+hY$&~q^uKJH&6I5N0%5CSMl;}?
zl3&m%W7;74KL8h1ia8KOK}0nKXrtsm=|8;Ir(g<9tA{`e2J2>4swv$1**MKp$pp^|
zY$DE6iBT#{@ku7aj<OLdmEdS<e~J~x*f=GOG1S4nmnn{kVrpw~d2cc~&N4hT$urz@
zG&9P^85(Ph@{B-*<1{r9=Aw9Mn2t^I!UPi+c+_R>G7~AD8D}B_rZaI&VbELFOVEM*
zHlK>?M+3HZVE}z^i>bH+sLSpORVTSbWF*DX!dO9H%*PluIwlk<`AB%28JSj+H_;Kh
zs0m<W&;z>_J+fC8pC|T4{aFlh_~D;dBbQ_TwN|ZZ)#x~1d!}~bl}vk9o1MDd+<nK`
zGpAj(_!gMFW%HcwQ3ddBS`4K_a+Q0o`t;y^Ps?I!x^-=YXU(qp8QBz^tGsV=E$C%a
z%UmV;$zih&-R?PbXPCV=oRo)?_l76s;mJGZDYU`ae=vJg_U_KR_naPBb$b@JXSC^E
znG@;m?2EE@SKhr_fh*H{vURezJsXw1d-CpX1+T)?CfU0^o07fVd3O&w<s5Rh0b}Fs
zD%TSiushFBpP63J<!zz4fmJJoVEvI@nH}k#yt}<5(NQ+qm)V!zUw))A2ubztZR@Lf
z$q#N=dUxV(#n9V;yKgr4cH!=(UpJ^z-`rvt)TnQEL!95zXa{w<oefu@b{@O^S?;0h
zJ2kw3t~a^hjojU)RyABiT3;8;pdMcnyo$oU1pFZ<`<9_QLp~9??YTu93Kf${IxH|j
zHFuR@=h24&D?Eoj2?XG8xgP_5=qln{$Rzn~@c1SGxT-mF{c<@(1)JgH+`i!ZaBB`D
zg{nv*E`-@QUr^(c=kVPDxJx($HKh$cCcrx+e2NS$eejRm;g+Kn@G@H37V5l6a8g(t
z{(}G<xtKU|b8#UOJ|}@_6*LoxDJEF8rik=y$DulxYI{|+^#b8sC*d3k<Vf6CBz%iQ
zp=M+xG9Kpn2s1uDG7^N`68dwy9e$0h9WTI-az{Ht5PpH)?(C^37V<n=>GZ&#(XX8k
PD)vHXdVB!MJ8XXe2GPy-

diff --git a/Modules/data_collector.py b/Modules/data_collector.py
index 0f598df..82d9760 100644
--- a/Modules/data_collector.py
+++ b/Modules/data_collector.py
@@ -113,9 +113,9 @@ def _preprocess_image(image):
 
 def find_word_positions(region_id, word, offset_x=0, offset_y=0, margin=5, ocr_engine="CPU"):
     """
-    Finds positions of a specific word within the OCR region.
-    Applies user-defined offset and margin adjustments.
-    Uses Tesseract (CPU) or EasyOCR (GPU) depending on the selected engine.
+    Optimized function to detect word positions in an OCR region.
+    Uses raw screen data without preprocessing for max performance.
+    Uses Tesseract (CPU) or EasyOCR (GPU) depending on user selection.
     """
     collector_mutex.lock()
     if region_id not in regions:
@@ -134,45 +134,42 @@ def find_word_positions(region_id, word, offset_x=0, offset_y=0, margin=5, ocr_e
         return []
 
     try:
+        # Capture raw screen image (NO preprocessing)
         image = ImageGrab.grab(bbox=(left, top, right, bottom))
-        processed = _preprocess_image(image)
 
-        # Get original and processed image sizes
+        # Get original image size
         orig_width, orig_height = image.size
-        proc_width, proc_height = processed.size
-
-        # Scale factor between processed image and original screenshot
-        scale_x = orig_width / proc_width
-        scale_y = orig_height / proc_height
 
         word_positions = []
 
         if ocr_engine == "CPU":
-            # Use Tesseract (CPU)
-            data = pytesseract.image_to_data(processed, config='--psm 6 --oem 1', output_type=pytesseract.Output.DICT)
+            # Use Tesseract directly on raw PIL image (no preprocessing)
+            data = pytesseract.image_to_data(image, config='--psm 6 --oem 1', output_type=pytesseract.Output.DICT)
 
             for i in range(len(data['text'])):
                 if re.search(rf"\b{word}\b", data['text'][i], re.IGNORECASE):
-                    x_scaled = int(data['left'][i] * scale_x)
-                    y_scaled = int(data['top'][i] * scale_y)
-                    w_scaled = int(data['width'][i] * scale_x)
-                    h_scaled = int(data['height'][i] * scale_y)
+                    x_scaled = int(data['left'][i])
+                    y_scaled = int(data['top'][i])
+                    w_scaled = int(data['width'][i])
+                    h_scaled = int(data['height'][i])
 
                     word_positions.append((x_scaled + offset_x, y_scaled + offset_y, w_scaled + (margin * 2), h_scaled + (margin * 2)))
 
         else:
-            # Use EasyOCR (GPU) - Convert PIL image to NumPy array
-            image_np = np.array(processed)
+            # Convert PIL image to NumPy array for EasyOCR
+            image_np = np.array(image)
+
+            # Run GPU OCR
             results = reader_gpu.readtext(image_np)
 
             for (bbox, text, _) in results:
                 if re.search(rf"\b{word}\b", text, re.IGNORECASE):
                     (x_min, y_min), (x_max, y_max) = bbox[0], bbox[2]
 
-                    x_scaled = int(x_min * scale_x)
-                    y_scaled = int(y_min * scale_y)
-                    w_scaled = int((x_max - x_min) * scale_x)
-                    h_scaled = int((y_max - y_min) * scale_y)
+                    x_scaled = int(x_min)
+                    y_scaled = int(y_min)
+                    w_scaled = int(x_max - x_min)
+                    h_scaled = int(y_max - y_min)
 
                     word_positions.append((x_scaled + offset_x, y_scaled + offset_y, w_scaled + (margin * 2), h_scaled + (margin * 2)))