-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathhuntarr.py
733 lines (597 loc) · 33.5 KB
/
huntarr.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
#!/usr/bin/env python3
import os
import sys
import time
import json
import subprocess
import logging
import re
import requests
from datetime import datetime
from typing import Tuple, Dict, Optional, List, Any
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
logger = logging.getLogger("huntarr-tdarr")
###################################
# Configuration
###################################
class Config:
def __init__(self):
# Where your Tdarr Node log is located (Required)
self.tdarr_node_log_path = os.environ.get("TDARR_NODE_LOG_PATH", "")
# ------------- Tautulli (Required) -------------
self.tautulli_api_key = os.environ.get("TAUTULLI_API_KEY", "")
self.tautulli_url = os.environ.get("TAUTULLI_URL", "")
# ----------- Tdarr Settings (Required) -------------
self.tdarr_alter_workers_str = os.environ.get("TDARR_ALTER_WORKERS", "")
self.tdarr_alter_workers = self.tdarr_alter_workers_str.lower() == "true" if self.tdarr_alter_workers_str else None
# Parse TDARR_DEFAULT_LIMIT as required
self.tdarr_default_limit_str = os.environ.get("TDARR_DEFAULT_LIMIT", "")
try:
self.tdarr_default_limit = int(self.tdarr_default_limit_str) if self.tdarr_default_limit_str else None
except ValueError:
self.tdarr_default_limit = None
self.tdarr_api_url = os.environ.get("TDARR_API_URL", "")
self.container_name = os.environ.get("CONTAINER_NAME", "tdarr_node")
# ------------ Worker Scaling Settings ------------
try:
self.offset_threshold = int(os.environ.get("OFFSET_THRESHOLD", "1"))
except ValueError:
self.offset_threshold = 1
logger.warning("Invalid OFFSET_THRESHOLD value, using default: 1")
# ------------ Node Killer Settings ------------
try:
self.transcode_threshold = int(os.environ.get("TRANSCODE_THRESHOLD", "1"))
except ValueError:
self.transcode_threshold = 1
logger.warning("Invalid TRANSCODE_THRESHOLD value, using default: 1")
# ----------- Other -------------
try:
self.wait_seconds = int(os.environ.get("WAIT_SECONDS", "10"))
except ValueError:
self.wait_seconds = 10
logger.warning("Invalid WAIT_SECONDS value, using default: 10")
try:
self.basic_check = int(os.environ.get("BASIC_CHECK", "3"))
except ValueError:
self.basic_check = 3
logger.warning("Invalid BASIC_CHECK value, using default: 3")
try:
self.restart_delay = int(os.environ.get("RESTART_DELAY", "30"))
except ValueError:
self.restart_delay = 30
logger.warning("Invalid RESTART_DELAY value, using default: 30")
# Validate required configuration
self.validate()
def validate(self) -> None:
"""Validate that required configuration is provided"""
errors = []
# Required core variables
if not self.tdarr_node_log_path:
errors.append("TDARR_NODE_LOG_PATH is required")
if self.tdarr_alter_workers is None:
errors.append("TDARR_ALTER_WORKERS is required (must be 'true' or 'false')")
if self.tdarr_default_limit is None:
errors.append("TDARR_DEFAULT_LIMIT is required (must be a number)")
# Required API variables
if not self.tautulli_api_key:
errors.append("TAUTULLI_API_KEY is required")
if not self.tautulli_url:
errors.append("TAUTULLI_URL is required")
if not self.tdarr_api_url:
errors.append("TDARR_API_URL is required")
if errors:
for error in errors:
logger.error(error)
sys.exit(1)
class HuntarrTdarr:
def __init__(self, config: Config):
self.config = config
self.tdarr_node_id = ""
self.total_count = 0
self.last_operation = ""
self.last_gpu_limit = 0
self.consecutive_duplicates = 0
def find_latest_node_id(self) -> str:
"""Find the latest nodeID from the Tdarr Node log file"""
if not os.path.isfile(self.config.tdarr_node_log_path):
return ""
try:
with open(self.config.tdarr_node_log_path, 'r') as f:
content = f.read()
matches = re.findall(r'"nodeID":\s*"([^"]+)"', content)
if matches:
return matches[-1]
except Exception as e:
logger.error(f"Error reading node log file: {e}")
return ""
def ensure_node_id_loaded(self) -> bool:
"""Ensure that the nodeID is loaded"""
if self.tdarr_node_id:
return True
logger.info(f"Attempting to retrieve nodeID from {self.config.tdarr_node_log_path}")
node_id = self.find_latest_node_id()
if not node_id:
logger.error(f"Could not find any nodeID in {self.config.tdarr_node_log_path}")
return False
self.tdarr_node_id = node_id
logger.info(f"Found nodeID: {self.tdarr_node_id}")
return True
def refresh_node_id_if_changed(self) -> None:
"""Check if the nodeID has changed and update it"""
latest = self.find_latest_node_id()
if not latest:
logger.warning("Could not find any 'nodeID' lines in the log to refresh.")
return
if latest != self.tdarr_node_id:
logger.info(f"NOTICE: nodeID changed from [{self.tdarr_node_id}] -> [{latest}]. Updating.")
self.tdarr_node_id = latest
else:
logger.info(f"NOTICE: nodeID is still the same [{self.tdarr_node_id}].")
def check_tautulli_connection(self) -> bool:
"""Check connection to Tautulli"""
logger.info(f"Checking Tautulli at: {self.config.tautulli_url}")
try:
response = requests.get(f"{self.config.tautulli_url}?apikey={self.config.tautulli_api_key}&cmd=get_activity")
response.raise_for_status()
response.json() # Validate JSON
logger.info(f"Tautulli OK: {self.config.tautulli_url}")
return True
except Exception as e:
logger.warning(f"WARNING: Could not connect or invalid JSON: {self.config.tautulli_url} - {e}")
return False
def fetch_transcode_counts_from_tautulli(self) -> Tuple[int, int]:
"""Fetch transcoding counts from Tautulli"""
try:
response = requests.get(f"{self.config.tautulli_url}?apikey={self.config.tautulli_api_key}&cmd=get_activity")
response.raise_for_status()
data = response.json()
# Only count sessions that are transcoding video, not just audio
local_count = 0
remote_count = 0
if (data and 'response' in data and 'data' in data['response'] and
'sessions' in data['response']['data']):
for session in data['response']['data']['sessions']:
if (session.get('transcode_decision') == 'transcode' and
session.get('video_decision') == 'transcode'):
if session.get('ip_address', '').startswith('10.0.0.'):
local_count += 1
else:
remote_count += 1
return (local_count, remote_count)
except Exception as e:
logger.warning(f"Error fetching Tautulli data: {e}")
return (0, 0)
def is_plex_transcoding_over_threshold(self) -> bool:
"""Check if Plex transcoding is over the threshold"""
logger.info("Checking Plex transcodes...")
local, remote = self.fetch_transcode_counts_from_tautulli()
self.total_count = local + remote
logger.info(f"Found {local} local & {remote} remote => total={self.total_count}, threshold={self.config.transcode_threshold}")
# Return True if watchers >= threshold
return self.total_count >= self.config.transcode_threshold
def is_container_running(self) -> bool:
"""Check if the Tdarr container is running"""
try:
result = subprocess.run(
["docker", "inspect", "-f", "{{.State.Running}}", self.config.container_name],
capture_output=True,
text=True,
check=False
)
return result.stdout.strip() == "true"
except Exception as e:
logger.error(f"Error checking container status: {e}")
return False
# ... (rest of the methods remain the same as in the previous version)
def run(self) -> None:
"""Main execution loop"""
# Initial setup
self.ensure_node_id_loaded()
# Check Tautulli connection
if not self.check_tautulli_connection():
logger.error("ERROR: Tautulli not reachable. Exiting.")
sys.exit(1)
self.set_initial_gpu_workers()
# Main loop
while True:
try:
if self.is_plex_transcoding_over_threshold():
# Transcoding is over threshold
if self.config.tdarr_alter_workers:
# Adjust workers mode
operation = f"reduce_workers_{self.total_count}"
try:
response = requests.post(
f"{self.config.tdarr_api_url}/api/v2/poll-worker-limits",
headers={"Content-Type": "application/json"},
json={"data": {"nodeID": self.tdarr_node_id}}
)
response.raise_for_status()
data = response.json()
current_limit = data.get('workerLimits', {}).get('transcodegpu', 0)
if operation == self.last_operation and current_limit == self.last_gpu_limit:
self.consecutive_duplicates += 1
if self.consecutive_duplicates > 2:
logger.info(f"Skipping duplicate worker adjustment (done {self.consecutive_duplicates} times already)")
time.sleep(self.config.wait_seconds)
continue
else:
self.consecutive_duplicates = 0
self.last_operation = operation
self.last_gpu_limit = current_limit
logger.info("Threshold exceeded. Reducing GPU workers.")
self.adjust_tdarr_workers(self.total_count)
time.sleep(self.config.wait_seconds)
except Exception as e:
logger.error(f"Error in worker adjustment: {e}")
time.sleep(self.config.wait_seconds)
else:
# Kill container mode
operation = "kill_container"
if operation == self.last_operation:
self.consecutive_duplicates += 1
if self.consecutive_duplicates > 2:
logger.info(f"Skipping duplicate container management (done {self.consecutive_duplicates} times already)")
time.sleep(self.config.wait_seconds)
continue
else:
self.consecutive_duplicates = 0
self.last_operation = operation
if self.is_container_running():
logger.info(f"Threshold exceeded: Killing {self.config.container_name}")
subprocess.run(["docker", "kill", self.config.container_name], check=False)
else:
logger.info(f"{self.config.container_name} is already stopped.")
time.sleep(self.config.wait_seconds)
else:
# Below threshold
if self.config.tdarr_alter_workers:
# Adjust workers mode
operation = f"adjust_workers_{self.total_count}"
try:
response = requests.post(
f"{self.config.tdarr_api_url}/api/v2/poll-worker-limits",
headers={"Content-Type": "application/json"},
json={"data": {"nodeID": self.tdarr_node_id}}
)
response.raise_for_status()
data = response.json()
current_limit = data.get('workerLimits', {}).get('transcodegpu', 0)
if operation == self.last_operation and current_limit == self.last_gpu_limit:
self.consecutive_duplicates += 1
if self.consecutive_duplicates > 2:
logger.info(f"Skipping duplicate worker adjustment (done {self.consecutive_duplicates} times already)")
time.sleep(self.config.basic_check)
continue
else:
self.consecutive_duplicates = 0
self.last_operation = operation
self.last_gpu_limit = current_limit
self.adjust_tdarr_workers(self.total_count)
except Exception as e:
logger.error(f"Error in worker adjustment: {e}")
# Check container state regardless of mode
operation = "start_container"
if operation == self.last_operation and self.is_container_running():
self.consecutive_duplicates += 1
if self.consecutive_duplicates > 2:
logger.info(f"Skipping duplicate container check (done {self.consecutive_duplicates} times already)")
time.sleep(self.config.basic_check)
continue
else:
self.consecutive_duplicates = 0
self.last_operation = operation
if not self.is_container
def run(self) -> None:
"""Main execution loop"""
# Initial setup
self.ensure_node_id_loaded()
# Check Tautulli connection
if not self.check_tautulli_connection():
logger.error("ERROR: Tautulli not reachable. Exiting.")
sys.exit(1)
self.set_initial_gpu_workers()
# Main loop
while True:
try:
if self.is_plex_transcoding_over_threshold():
# Transcoding is over threshold
if self.config.tdarr_alter_workers:
# Adjust workers mode
operation = f"reduce_workers_{self.total_count}"
try:
response = requests.post(
f"{self.config.tdarr_api_url}/api/v2/poll-worker-limits",
headers={"Content-Type": "application/json"},
json={"data": {"nodeID": self.tdarr_node_id}}
)
response.raise_for_status()
data = response.json()
current_limit = data.get('workerLimits', {}).get('transcodegpu', 0)
if operation == self.last_operation and current_limit == self.last_gpu_limit:
self.consecutive_duplicates += 1
if self.consecutive_duplicates > 2:
logger.info(f"Skipping duplicate worker adjustment (done {self.consecutive_duplicates} times already)")
time.sleep(self.config.wait_seconds)
continue
else:
self.consecutive_duplicates = 0
self.last_operation = operation
self.last_gpu_limit = current_limit
logger.info("Threshold exceeded. Reducing GPU workers.")
self.adjust_tdarr_workers(self.total_count)
time.sleep(self.config.wait_seconds)
except Exception as e:
logger.error(f"Error in worker adjustment: {e}")
time.sleep(self.config.wait_seconds)
else:
# Kill container mode
operation = "kill_container"
if operation == self.last_operation:
self.consecutive_duplicates += 1
if self.consecutive_duplicates > 2:
logger.info(f"Skipping duplicate container management (done {self.consecutive_duplicates} times already)")
time.sleep(self.config.wait_seconds)
continue
else:
self.consecutive_duplicates = 0
self.last_operation = operation
if self.is_container_running():
logger.info(f"Threshold exceeded: Killing {self.config.container_name}")
subprocess.run(["docker", "kill", self.config.container_name], check=False)
else:
logger.info(f"{self.config.container_name} is already stopped.")
time.sleep(self.config.wait_seconds)
else:
# Below threshold
if self.config.tdarr_alter_workers:
# Adjust workers mode
operation = f"adjust_workers_{self.total_count}"
try:
response = requests.post(
f"{self.config.tdarr_api_url}/api/v2/poll-worker-limits",
headers={"Content-Type": "application/json"},
json={"data": {"nodeID": self.tdarr_node_id}}
)
response.raise_for_status()
data = response.json()
current_limit = data.get('workerLimits', {}).get('transcodegpu', 0)
if operation == self.last_operation and current_limit == self.last_gpu_limit:
self.consecutive_duplicates += 1
if self.consecutive_duplicates > 2:
logger.info(f"Skipping duplicate worker adjustment (done {self.consecutive_duplicates} times already)")
time.sleep(self.config.basic_check)
continue
else:
self.consecutive_duplicates = 0
self.last_operation = operation
self.last_gpu_limit = current_limit
self.adjust_tdarr_workers(self.total_count)
except Exception as e:
logger.error(f"Error in worker adjustment: {e}")
# Check container state regardless of mode
operation = "start_container"
if operation == self.last_operation and self.is_container_running():
self.consecutive_duplicates += 1
if self.consecutive_duplicates > 2:
logger.info(f"Skipping duplicate container check (done {self.consecutive_duplicates} times already)")
time.sleep(self.config.basic_check)
continue
else:
self.consecutive_duplicates = 0
self.last_operation = operation
if not self.is_container_running():
# In node kill mode, wait RESTART_DELAY seconds before starting container
if not self.config.tdarr_alter_workers:
logger.info(f"Below threshold in node kill mode -> Waiting {self.config.restart_delay} seconds before starting container {self.config.container_name}.")
initial_watchers = self.get_total_watchers()
delay = self.config.restart_delay
interval = 5
elapsed = 0
continue_outer_loop = False
while elapsed < delay and not continue_outer_loop:
time.sleep(interval)
elapsed += interval
current_watchers = self.get_total_watchers()
if current_watchers >= self.config.transcode_threshold:
logger.info(f"Watcher count increased to {current_watchers} during delay. Skipping container start.")
continue_outer_loop = True
if continue_outer_loop:
continue
logger.info(f"Below threshold -> Starting container {self.config.container_name}.")
subprocess.run(["docker", "start", self.config.container_name], check=False)
else:
logger.info(f"Container {self.config.container_name} is already running.")
time.sleep(self.config.basic_check)
except Exception as e:
logger.error(f"Unexpected error in main loop: {e}")
time.sleep(self.config.basic_check)
def adjust_tdarr_workers(self, watchers: int) -> None:
"""Adjust the number of Tdarr GPU workers based on transcoding load"""
if not self.ensure_node_id_loaded():
return
# Calculate how many watchers are above the offset
watchers_over_offset = 0
if self.config.offset_threshold == 0:
watchers_over_offset = watchers
else:
watchers_over_offset = watchers - self.config.offset_threshold + 1
if watchers_over_offset < 0:
watchers_over_offset = 0
# Desired = TDARR_DEFAULT_LIMIT - watchersOverOffset
desired = self.config.tdarr_default_limit - watchers_over_offset
if desired < 0:
desired = 0
logger.info(f"watchers={watchers} => watchersOverOffset={watchers_over_offset} => desiredWorkers={desired}")
# Poll current worker limits
try:
response = requests.post(
f"{self.config.tdarr_api_url}/api/v2/poll-worker-limits",
headers={"Content-Type": "application/json"},
json={"data": {"nodeID": self.tdarr_node_id}}
)
response.raise_for_status()
data = response.json()
current = data.get('workerLimits', {}).get('transcodegpu')
if current is None:
logger.error(f"ERROR: Could not retrieve current GPU worker limit for nodeID='{self.tdarr_node_id}'. Will re-check log for a new ID.")
self.refresh_node_id_if_changed()
return
logger.info(f"Current GPU worker limit: {current}")
diff = desired - current
if diff == 0:
logger.info(f"Already at the desired GPU worker limit ({desired}).")
return
if diff > 0:
step = "increase"
logger.info(f"Need to increase by {diff}")
# Modified increase branch using real Tautulli watcher count
logger.info("Need to increase workers. Initiating delay...")
original_watchers = self.get_total_watchers()
initial_watchers_over_offset = 0
if self.config.offset_threshold == 0:
initial_watchers_over_offset = original_watchers
else:
initial_watchers_over_offset = original_watchers - self.config.offset_threshold + 1
if initial_watchers_over_offset < 0:
initial_watchers_over_offset = 0
initial_desired = self.config.tdarr_default_limit - initial_watchers_over_offset
if initial_desired < 0:
initial_desired = 0
logger.info(f"Before delay: Tautulli watchers={original_watchers}, initial desired workers={initial_desired}")
delay = self.config.restart_delay
interval = 5
elapsed = 0
while elapsed < delay:
time.sleep(interval)
elapsed += interval
during_watchers = self.get_total_watchers()
during_watchers_over_offset = 0
if self.config.offset_threshold == 0:
during_watchers_over_offset = during_watchers
else:
during_watchers_over_offset = during_watchers - self.config.offset_threshold + 1
if during_watchers_over_offset < 0:
during_watchers_over_offset = 0
current_desired = self.config.tdarr_default_limit - during_watchers_over_offset
if current_desired < 0:
current_desired = 0
logger.info(f"During delay: Tautulli watchers={during_watchers}, desired workers={current_desired} (initial desired was {initial_desired})")
if current_desired < initial_desired:
logger.info(f"Desired workers dropped from {initial_desired} to {current_desired} during delay. Cancelling worker increase.")
return
# Final confirmation: get final desired from Tautulli
final_watchers = self.get_total_watchers()
final_watchers_over_offset = 0
if self.config.offset_threshold == 0:
final_watchers_over_offset = final_watchers
else:
final_watchers_over_offset = final_watchers - self.config.offset_threshold + 1
if final_watchers_over_offset < 0:
final_watchers_over_offset = 0
final_desired = self.config.tdarr_default_limit - final_watchers_over_offset
if final_desired < 0:
final_desired = 0
# Get current GPU worker limit again
response = requests.post(
f"{self.config.tdarr_api_url}/api/v2/poll-worker-limits",
headers={"Content-Type": "application/json"},
json={"data": {"nodeID": self.tdarr_node_id}}
)
response.raise_for_status()
new_data = response.json()
new_current = new_data.get('workerLimits', {}).get('transcodegpu', 0)
logger.info(f"Final confirmation: desired workers={final_desired}, current workers={new_current}")
if final_desired <= new_current:
logger.info("No longer need to increase workers after final confirmation. Cancelling the increase.")
return
# Apply the changes
for i in range(final_desired - new_current):
requests.post(
f"{self.config.tdarr_api_url}/api/v2/alter-worker-limit",
headers={"Content-Type": "application/json"},
json={"data": {"nodeID": self.tdarr_node_id, "process": "increase", "workerType": "transcodegpu"}}
)
time.sleep(1)
logger.info("GPU worker limit adjustment complete.")
else:
# Decrease workers
step = "decrease"
diff = -diff
logger.info(f"Need to decrease by {diff}")
for i in range(diff):
requests.post(
f"{self.config.tdarr_api_url}/api/v2/alter-worker-limit",
headers={"Content-Type": "application/json"},
json={"data": {"nodeID": self.tdarr_node_id, "process": "decrease", "workerType": "transcodegpu"}}
)
time.sleep(1)
logger.info("GPU worker limit adjustment complete.")
except Exception as e:
logger.error(f"Error adjusting Tdarr workers: {e}")
def set_initial_gpu_workers(self) -> None:
"""Set initial GPU workers on startup"""
if not self.config.tdarr_alter_workers:
return
logger.info(f"Setting initial GPU workers to default limit: {self.config.tdarr_default_limit} on startup")
if not self.ensure_node_id_loaded():
logger.error("ERROR: Could not get nodeID, can't set initial GPU workers")
time.sleep(5)
return
try:
response = requests.post(
f"{self.config.tdarr_api_url}/api/v2/poll-worker-limits",
headers={"Content-Type": "application/json"},
json={"data": {"nodeID": self.tdarr_node_id}}
)
response.raise_for_status()
data = response.json()
current_limit = data.get('workerLimits', {}).get('transcodegpu')
if current_limit is not None:
diff = self.config.tdarr_default_limit - current_limit
if diff != 0:
step = ""
count = 0
if diff > 0:
step = "increase"
count = diff
logger.info(f"Need to increase by {diff} to reach default limit")
else:
step = "decrease"
count = -diff
logger.info(f"Need to decrease by {-diff} to reach default limit")
for i in range(count):
requests.post(
f"{
self.config.tdarr_api_url}/api/v2/alter-worker-limit",
headers={"Content-Type": "application/json"},
json={"data": {"nodeID": self.tdarr_node_id, "process": step, "workerType": "transcodegpu"}}
)
time.sleep(1)
logger.info(f"Initial GPU worker limit set to {self.config.tdarr_default_limit}")
else:
logger.info(f"GPU workers already at desired default limit: {current_limit}")
else:
logger.error("ERROR: Could not get current GPU worker limit")
except Exception as e:
logger.error(f"Error setting initial GPU workers: {e}")
def get_total_watchers(self) -> int:
"""Get total number of transcoding watchers"""
local, remote = self.fetch_transcode_counts_from_tautulli()
return local + remote
if __name__ == "__main__":
try:
config = Config()
huntarr = HuntarrTdarr(config)
huntarr.run()
except KeyboardInterrupt:
logger.info("Process interrupted. Exiting.")
sys.exit(0)
except Exception as e:
logger.error(f"Critical error: {e}")
sys.exit(1)