Enabled live-view without the need for OCR to be enabled for screen sharing within the flyff_character_status_node.py node.
This commit is contained in:
parent
7d39baad90
commit
7991aa751f
@ -71,7 +71,7 @@ def fp_api():
|
||||
"fp_total": get_data()["fp_total"]
|
||||
})
|
||||
|
||||
@app.route('/status_screenshot')
|
||||
@app.route('/flyff/status')
|
||||
def status_screenshot():
|
||||
"""
|
||||
Returns an HTML page that displays the stored screenshot and
|
||||
@ -80,23 +80,23 @@ def status_screenshot():
|
||||
html = """
|
||||
<html>
|
||||
<head>
|
||||
<title>Live Flyff Character Status</title>
|
||||
<title>Borealis - Live Status</title>
|
||||
<script>
|
||||
// Reload the <img> every second
|
||||
setInterval(function(){
|
||||
var img = document.getElementById('status_img');
|
||||
img.src = '/status_screenshot_data?random=' + Math.random();
|
||||
img.src = '/flyff/status_rawdata?random=' + Math.random();
|
||||
}, 1000);
|
||||
</script>
|
||||
</head>
|
||||
<body>
|
||||
<img id="status_img" src="/status_screenshot_data" />
|
||||
<img id="status_img" src="/flyff/status_rawdata" />
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
return html
|
||||
|
||||
@app.route('/status_screenshot_data')
|
||||
@app.route('/flyff/status_rawdata')
|
||||
def status_screenshot_data():
|
||||
"""
|
||||
Serves the raw PNG bytes (decoded from base64) used by <img> in /status_screenshot.
|
||||
|
@ -2,9 +2,8 @@
|
||||
"""
|
||||
Flyff Character Status Node:
|
||||
- Creates an OCR region in data_collector.
|
||||
- Periodically grabs raw text from that region and updates status.
|
||||
- ALSO: Captures a screenshot from the same region, converts to base64,
|
||||
and updates data_manager so the Flask server can serve it.
|
||||
- Periodically captures a screenshot and updates Flask.
|
||||
- If OCR is enabled, it extracts character status and updates the data_manager.
|
||||
"""
|
||||
|
||||
import re
|
||||
@ -13,7 +12,7 @@ from io import BytesIO
|
||||
|
||||
from OdenGraphQt import BaseNode
|
||||
from PyQt5.QtWidgets import QMessageBox
|
||||
from PyQt5.QtCore import QTimer # Corrected import
|
||||
from PyQt5.QtCore import QTimer
|
||||
|
||||
# Import the existing modules
|
||||
from Modules import data_manager, data_collector
|
||||
@ -30,6 +29,10 @@ class FlyffCharacterStatusNode(BaseNode):
|
||||
raise Exception("Duplicate Character Status Node.")
|
||||
data_manager.character_status_collector_exists = True
|
||||
|
||||
# Add the Data Collection dropdown menu
|
||||
self.add_combo_menu("data_collection", "Data Collection", items=["Disabled", "Enabled"])
|
||||
self.set_property("data_collection", "Disabled") # Default to Disabled
|
||||
|
||||
self.add_text_input("hp", "HP", text="HP: 0/0")
|
||||
self.add_text_input("mp", "MP", text="MP: 0/0")
|
||||
self.add_text_input("fp", "FP", text="FP: 0/0")
|
||||
@ -90,14 +93,25 @@ class FlyffCharacterStatusNode(BaseNode):
|
||||
|
||||
def process_input(self):
|
||||
"""
|
||||
Called periodically to update character status from OCR,
|
||||
and also capture the screenshot to display via Flask.
|
||||
Called periodically to capture a screenshot and update character status (if enabled).
|
||||
"""
|
||||
# 1) Update the text-based status (same as before).
|
||||
# Always capture the screenshot, regardless of OCR status
|
||||
screenshot_img = data_collector.capture_region_as_image(self.region_id)
|
||||
if screenshot_img:
|
||||
buf = BytesIO()
|
||||
screenshot_img.save(buf, format='PNG')
|
||||
image_b64 = base64.b64encode(buf.getvalue()).decode('utf-8')
|
||||
data_manager.set_status_screenshot(image_b64)
|
||||
|
||||
# If OCR is disabled, return early (skip OCR processing)
|
||||
if self.get_property("data_collection") == "Disabled":
|
||||
return
|
||||
|
||||
# Process OCR if enabled
|
||||
raw_text = data_collector.get_raw_text(self.region_id)
|
||||
hp_c, hp_t, mp_c, mp_t, fp_c, fp_t, exp_v = self.parse_character_stats(raw_text)
|
||||
|
||||
# Update data_manager with the parsed values
|
||||
# Update data_manager with parsed values
|
||||
data_manager.set_data_bulk({
|
||||
"hp_current": hp_c,
|
||||
"hp_total": hp_t,
|
||||
@ -113,12 +127,3 @@ class FlyffCharacterStatusNode(BaseNode):
|
||||
self.set_property("mp", f"MP: {mp_c}/{mp_t}")
|
||||
self.set_property("fp", f"FP: {fp_c}/{fp_t}")
|
||||
self.set_property("exp", f"EXP: {exp_v}%")
|
||||
|
||||
# 2) Capture the screenshot used by OCR and store as base64
|
||||
screenshot_img = data_collector.capture_region_as_image(self.region_id)
|
||||
if screenshot_img:
|
||||
# Convert PIL image to base64
|
||||
buf = BytesIO()
|
||||
screenshot_img.save(buf, format='PNG')
|
||||
image_b64 = base64.b64encode(buf.getvalue()).decode('utf-8')
|
||||
data_manager.set_status_screenshot(image_b64)
|
||||
|
Loading…
x
Reference in New Issue
Block a user