-
-
Notifications
You must be signed in to change notification settings - Fork 15
Expand file tree
/
Copy pathtimeline.py
More file actions
2033 lines (1788 loc) · 71 KB
/
timeline.py
File metadata and controls
2033 lines (1788 loc) · 71 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
from mastodon import MastodonError
import time
import speak
import sound
import threading
import os
import wx
from GUI import main
class TimelineSettings(object):
def __init__(self, account, tl):
self.account_id = account
self.tl = tl
self.mute = False
self.read = False
self.hide = False
class timeline(object):
def __init__(self, account, name, type, data=None, user=None, status=None, silent=False):
self.members = []
self.account = account
self.app = account.app
self.status = status
self.name = name
self.removable = False
self.initial = True
self.statuses = []
self.type = type
self.data = data
self.user = user
self.index = 0
self.page = 0
self.mute = False
self.read = False
self.hide = False
self._loading = False # Flag to prevent concurrent load operations
self._stop_loading_all = False # Flag to stop load_all_previous
self._loading_all_active = False # Flag to track if load_all_previous is running
# Timeline position sync (for home timeline with Mastodon)
self._position_moved = False # Track if user navigated since last load
self._last_synced_id = None # Last position synced with server
# Set of status IDs for O(1) duplicate checking
self._status_ids = set()
# Lock for thread-safe duplicate checking and status addition
self._status_lock = threading.RLock()
# Gap tracking for cache - when API refresh doesn't fully connect to cached items
# List of gaps, each gap is a dict with 'max_id' (where to load from)
self._gaps = []
self._last_load_time = None # Timestamp of last successful load (for gap detection)
self._gap_idle_threshold = 600 # Seconds of idle time before gap detection triggers (10 minutes)
# Per-timeline streaming support
self._stream_thread = None
self._stream_started = False
self._stream_lock = threading.Lock()
for i in self.app.timeline_settings:
if i.account_id == self.account.me.id and i.tl == self.name:
self.mute = i.mute
self.read = i.read
self.hide = i.hide
if self.type == "user" and self.name != "Sent" or self.type == "conversation" or self.type == "search" or self.type == "list":
if not silent:
sound.play(self.account, "open")
self.removable = True
# Set up the API function and kwargs based on timeline type
# Use maximum limit (100) for Bluesky, otherwise use user preference
if getattr(self.account.prefs, 'platform_type', '') == 'bluesky':
fetch_limit = 100 # Bluesky max
else:
fetch_limit = self.app.prefs.count
self.update_kwargs = {"limit": fetch_limit}
self.prev_kwargs = {"limit": fetch_limit}
# Use platform backend methods where available
if self.type == "home":
if hasattr(self.account, '_platform') and self.account._platform:
self.func = self.account._platform.get_home_timeline
else:
self.func = self.account.api.timeline_home
elif self.type == "mentions":
# Use platform backend - returns statuses extracted from notifications
self.func = self.account.get_mentions
elif self.type == "notifications":
if hasattr(self.account, '_platform') and self.account._platform:
self.func = self.account._platform.get_notifications
else:
self.func = self.account.api.notifications
elif self.type == "conversations":
# Check if platform supports DMs
if hasattr(self.account, 'supports_feature') and not self.account.supports_feature('direct_messages'):
# Platform doesn't support DMs - hide this timeline
self.hide = True
self.func = lambda **kwargs: []
else:
self.func = self.account.api.conversations
elif self.type == "favourites":
if hasattr(self.account, '_platform') and self.account._platform:
self.func = self.account._platform.get_favourites
else:
self.func = self.account.api.favourites
self.removable = True
elif self.type == "bookmarks":
if hasattr(self.account, '_platform') and self.account._platform:
self.func = self.account._platform.get_bookmarks
else:
self.func = self.account.api.bookmarks
self.removable = True
elif self.type == "user":
# Extract username and filter from data (data can be string or dict with username/filter)
if isinstance(self.data, dict):
username = self.data.get('username')
user_filter = self.data.get('filter')
else:
username = self.data
user_filter = None
# If we don't have a user object, look it up by username
if not self.user and username:
looked_up = self.account.app.lookup_user_name(self.account, username)
if looked_up and looked_up != -1:
self.user = looked_up
# Now get the user_id - prefer user object, fall back to username (may fail)
user_id = self.user.id if self.user else username
if hasattr(self.account, '_platform') and self.account._platform:
# Use default args to capture values at definition time
if user_filter:
self.func = lambda uid=user_id, uf=user_filter, **kwargs: self.account._platform.get_user_statuses(uid, filter=uf, **kwargs)
else:
self.func = lambda uid=user_id, **kwargs: self.account._platform.get_user_statuses(uid, **kwargs)
else:
self.func = lambda **kwargs: self.account.api.account_statuses(id=self.user.id if self.user else self.data, **kwargs)
elif self.type == "list":
# Check if platform supports lists
if hasattr(self.account, 'supports_feature') and not self.account.supports_feature('lists'):
self.hide = True
self.func = lambda **kwargs: []
elif hasattr(self.account, '_platform') and self.account._platform:
self.func = lambda **kwargs: self.account._platform.get_list_timeline(self.data, **kwargs)
else:
self.func = lambda **kwargs: self.account.api.timeline_list(id=self.data, **kwargs)
# Fetch list members for streaming (in background to not block startup)
def fetch_members():
try:
members = self.account.api.list_accounts(id=self.data)
self.members = [m.id for m in members]
except:
pass
threading.Thread(target=fetch_members, daemon=True).start()
elif self.type == "search":
self.func = lambda **kwargs: self._search_statuses(**kwargs)
elif self.type == "feed":
# Bluesky custom feed
if hasattr(self.account, '_platform') and self.account._platform:
self.func = lambda **kwargs: self.account._platform.get_feed_timeline(self.data, **kwargs)
else:
self.func = lambda **kwargs: []
self.removable = True
elif self.type == "local":
# Mastodon local timeline
if hasattr(self.account, '_platform') and self.account._platform:
self.func = self.account._platform.get_local_timeline
else:
self.func = self.account.api.timeline_local
self.removable = True
elif self.type == "federated":
# Mastodon federated/public timeline
if hasattr(self.account, '_platform') and self.account._platform:
self.func = self.account._platform.get_public_timeline
else:
self.func = self.account.api.timeline_public
self.removable = True
elif self.type == "instance":
# Remote instance local timeline
if hasattr(self.account, '_platform') and self.account._platform:
self.func = lambda **kwargs: self.account._platform.get_instance_timeline(self.data, **kwargs)
else:
self.func = lambda **kwargs: []
self.removable = True
if not silent:
sound.play(self.account, "open")
elif self.type == "remote_user":
# Remote user timeline from another instance
if hasattr(self.account, '_platform') and self.account._platform:
# Store in instance variables to avoid closure issues
self._remote_url = self.data.get('url', '') if isinstance(self.data, dict) else ''
self._remote_username = self.data.get('username', '') if isinstance(self.data, dict) else ''
self._remote_filter = self.data.get('filter') if isinstance(self.data, dict) else None
self.func = self._load_remote_user
else:
self.func = lambda **kwargs: []
self.removable = True
if not silent:
sound.play(self.account, "open")
elif self.type == "pinned":
# User's pinned posts
if hasattr(self.account, '_platform') and self.account._platform:
self.func = lambda **kwargs: self.account._platform.get_pinned_statuses(**kwargs)
else:
self.func = lambda **kwargs: self.account.api.account_statuses(id=self.account.me.id, pinned=True, **kwargs)
self.removable = True
elif self.type == "scheduled":
# User's scheduled posts
if hasattr(self.account, '_platform') and self.account._platform:
self.func = lambda **kwargs: self.account._platform.get_scheduled_statuses(**kwargs)
else:
self.func = lambda **kwargs: self.account.api.scheduled_statuses(**kwargs)
self.removable = True
elif self.type == "quotes":
# Quotes of a specific status (Mastodon 4.5+)
status_id = self.data
self.func = lambda sid=status_id, **kwargs: self.account.api._Mastodon__api_request('GET', f'/api/v1/statuses/{sid}/quotes')
self.removable = True
# Load saved filter settings if any
from GUI.timeline_filter import get_saved_filter
saved_filter = get_saved_filter(self.account, self)
if saved_filter:
self._filter_settings = saved_filter
self._unfiltered_statuses = []
self._is_filtered = True
if self.type != "conversation":
# Check if we should load from cache first
if self._should_use_cache() and self._load_from_cache():
# Cache loaded successfully - spawn background refresh thread
threading.Thread(target=self._refresh_after_cache, daemon=True).start()
else:
# No cache or cache disabled - normal load
threading.Thread(target=self.load, daemon=True).start()
else:
self.load_conversation()
def _load_remote_user(self, **kwargs):
"""Helper to load remote user timeline"""
if hasattr(self.account, '_platform') and self.account._platform:
if self._remote_filter:
return self.account._platform.get_remote_user_timeline(self._remote_url, self._remote_username, filter=self._remote_filter, **kwargs)
return self.account._platform.get_remote_user_timeline(self._remote_url, self._remote_username, **kwargs)
return []
def _search_statuses(self, **kwargs):
"""Helper to search and return only statuses"""
# Extract only valid search parameters (avoid passing unsupported kwargs)
limit = kwargs.get('limit', 40)
max_id = kwargs.get('max_id')
# Use platform backend if available
if hasattr(self.account, '_platform') and self.account._platform:
# Only pass max_id if it's actually set (not None)
if max_id:
return self.account._platform.search_statuses(self.data, limit=limit, max_id=max_id)
return self.account._platform.search_statuses(self.data, limit=limit)
# Fallback to Mastodon API - handle versions that don't support limit
search_kwargs = {'q': self.data, 'result_type': 'statuses'}
if max_id:
search_kwargs['max_id'] = max_id
try:
# Try with limit first (Mastodon.py 2.8.0+)
result = self.account.api.search_v2(limit=limit, **search_kwargs)
except TypeError:
# Fall back without limit for older Mastodon.py versions
result = self.account.api.search_v2(**search_kwargs)
if hasattr(result, 'statuses'):
return result.statuses
return result.get('statuses', [])
@property
def supports_streaming(self):
"""Check if this timeline type supports streaming."""
# Only Mastodon supports streaming
if getattr(self.account.prefs, 'platform_type', '') == 'bluesky':
return False
# Streamable timeline types
# Note: instance (remote) timelines can't stream - most instances require auth
if self.type in ('list', 'local', 'federated'):
return True
# Search timelines with hashtag queries can stream
if self.type == 'search' and self.data and str(self.data).startswith('#'):
return True
return False
@property
def stream_endpoint(self):
"""Get the streaming endpoint URL for this timeline."""
if not self.supports_streaming:
return None
base_url = self.account.prefs.instance_url
if self.type == 'list':
return f"{base_url}/api/v1/streaming/list?list={self.data}"
elif self.type == 'local':
return f"{base_url}/api/v1/streaming/public/local"
elif self.type == 'federated':
return f"{base_url}/api/v1/streaming/public"
elif self.type == 'search' and self.data and str(self.data).startswith('#'):
# Hashtag search - stream the hashtag
tag = str(self.data).lstrip('#')
return f"{base_url}/api/v1/streaming/hashtag?tag={tag}"
return None
def start_stream(self):
"""Start streaming for this timeline if supported."""
if not self.supports_streaming:
return
if not self.app.prefs.streaming:
return
with self._stream_lock:
if self._stream_started:
return
if self._stream_thread is not None and self._stream_thread.is_alive():
return
self._stream_started = True
self._stream_thread = threading.Thread(
target=self._run_stream,
daemon=True
)
self._stream_thread.start()
def stop_stream(self):
"""Stop streaming for this timeline."""
with self._stream_lock:
self._stream_started = False
# Thread will exit on next iteration when it checks _stream_started
def _run_stream(self):
"""Run the streaming connection for this timeline."""
import requests
import json
from mastodon import AttribAccessDict
from platforms.mastodon.models import mastodon_status_to_universal
thread_id = threading.current_thread().ident
consecutive_errors = 0
base_delay = 5
max_delay = 300
def convert_to_attrib_dict(obj):
"""Recursively convert dicts to AttribAccessDict for attribute access."""
if isinstance(obj, dict):
converted = {k: convert_to_attrib_dict(v) for k, v in obj.items()}
return AttribAccessDict(**converted)
elif isinstance(obj, list):
return [convert_to_attrib_dict(item) for item in obj]
return obj
while self._stream_started:
try:
# Check if we're still the active stream thread
if self._stream_thread is None or self._stream_thread.ident != thread_id:
return
stream_url = self.stream_endpoint
if not stream_url:
return
# Only list timelines require authentication
# Public streams (local, federated, hashtag, instance) don't need auth
from version import APP_NAME, APP_VERSION
headers = {
"Accept": "text/event-stream",
"User-Agent": f"{APP_NAME}/{APP_VERSION}",
}
if self.type == 'list':
headers["Authorization"] = f"Bearer {self.account.prefs.access_token}"
with requests.get(stream_url, headers=headers, stream=True, timeout=300) as response:
response.raise_for_status()
consecutive_errors = 0
event_type = None
data_lines = []
for line in response.iter_lines():
if not self._stream_started:
return
if self._stream_thread is None or self._stream_thread.ident != thread_id:
return
if line:
line = line.decode('utf-8')
if line.startswith('event:'):
event_type = line[6:].strip()
elif line.startswith('data:'):
data_lines.append(line[5:].strip())
else:
# Empty line = end of event
if event_type and data_lines:
data_str = '\n'.join(data_lines)
try:
data = json.loads(data_str)
self._handle_stream_event(event_type, data, convert_to_attrib_dict)
except json.JSONDecodeError:
pass
event_type = None
data_lines = []
except requests.exceptions.Timeout:
time.sleep(2)
continue
except Exception:
consecutive_errors += 1
if consecutive_errors >= 10:
# Too many errors, give up
self._stream_started = False
return
delay = min(base_delay * (2 ** (consecutive_errors - 1)), max_delay)
time.sleep(delay)
def _handle_stream_event(self, event_type, data, convert_func):
"""Handle a streaming event for this timeline."""
import wx
from platforms.mastodon.models import mastodon_status_to_universal
try:
if event_type == 'update':
status = convert_func(data)
uni_status = mastodon_status_to_universal(status)
if uni_status:
wx.CallAfter(lambda s=uni_status: self.load(items=[s]))
elif event_type == 'delete':
status_id = str(data)
def do_delete():
for i, s in enumerate(self.statuses):
if hasattr(s, 'id') and str(s.id) == status_id:
if i < self.index:
self.index = max(0, self.index - 1)
elif i == self.index and self.index >= len(self.statuses) - 1:
self.index = max(0, len(self.statuses) - 2)
self.statuses.pop(i)
self._status_ids.discard(status_id)
self.invalidate_display_cache()
if self == self.account.currentTimeline and self.account == self.app.currentAccount:
main.window.refreshList()
break
wx.CallAfter(do_delete)
elif event_type == 'status.update':
status = convert_func(data)
uni_status = mastodon_status_to_universal(status)
if uni_status:
def do_update():
for i, s in enumerate(self.statuses):
if hasattr(s, 'id') and str(s.id) == str(uni_status.id):
self.statuses[i] = uni_status
self.invalidate_display_cache()
if self == self.account.currentTimeline and self.account == self.app.currentAccount:
main.window.refreshList()
break
wx.CallAfter(do_update)
except Exception:
pass # Silently ignore stream handler errors
def read_items(self, items):
pref = ""
if len(self.app.accounts) > 1:
pref = self.account.me.acct + ": "
if len(items) >= 4:
speak.speak(pref + str(len(items)) + " new in " + self.name)
return
speak.speak(pref + ", ".join(self.prepare(items)))
def _status_passes_server_filter(self, status):
"""Check if a status should be shown based on server-side filters.
Returns True if the status should be shown, False if it should be hidden.
Checks the 'filtered' attribute for any filters with filter_action='hide'.
"""
filtered = getattr(status, 'filtered', None)
if not filtered:
return True
# Check each filter result - if any has action="hide", hide the post
for result in filtered:
filter_obj = getattr(result, 'filter', None)
if filter_obj:
action = getattr(filter_obj, 'filter_action', 'warn')
if action == 'hide':
return False
return True
def _add_status_with_filter(self, status, to_front=False):
"""Add a status to the timeline, respecting any active filter.
Args:
status: The status to add
to_front: If True, insert at front of list; if False, append to end
Returns:
True if the status was added to the visible list, False if filtered out
"""
from GUI.timeline_filter import should_show_status
with self._status_lock:
# Always track ID for duplicate checking, even if filtered
if hasattr(status, 'id'):
self._status_ids.add(str(status.id))
# Check server-side filter action - hide posts completely if filter_action="hide"
if not self._status_passes_server_filter(status):
return False
# Always add to unfiltered list if it exists
if hasattr(self, '_unfiltered_statuses'):
if to_front:
self._unfiltered_statuses.insert(0, status)
else:
self._unfiltered_statuses.append(status)
# Check if we should show this status based on filter
if hasattr(self, '_filter_settings') and self._filter_settings:
if not should_show_status(status, self._filter_settings, self.app, account=self.account):
return False
# Add to visible statuses
if to_front:
self.statuses.insert(0, status)
else:
self.statuses.append(status)
self.invalidate_display_cache()
return True
def has_status(self, status_id):
"""Check if a status ID is already in this timeline (O(1) lookup)."""
with self._status_lock:
return str(status_id) in self._status_ids
def try_add_status_id(self, status_id):
"""Atomically check if status ID exists and add it if not.
Returns True if the ID was added (not a duplicate), False if already exists.
"""
with self._status_lock:
status_id_str = str(status_id)
if status_id_str in self._status_ids:
return False
self._status_ids.add(status_id_str)
return True
def has_gap(self):
"""Check if there's a gap in the timeline that needs to be filled."""
return len(self._gaps) > 0
def gap_count(self):
"""Return the number of gaps in the timeline."""
return len(self._gaps)
def _should_detect_gaps(self):
"""Check if gap detection should apply to this timeline type.
Gap detection is currently disabled due to false positives
(e.g., network outages causing huge idle times that trigger
false gap detection on any full page refresh).
"""
# Disabled for now - the heuristic-based approach causes too many false positives
return False
# ============ Cache Methods ============
def _get_cache(self):
"""Get the timeline cache from the platform backend, if available."""
if hasattr(self.account, '_platform') and self.account._platform:
return getattr(self.account._platform, 'timeline_cache', None)
return None
def _should_use_cache(self):
"""Check if this timeline should use caching."""
# Check global cache enabled
if not self.app.prefs.timeline_cache_enabled:
return False
# Check if cache is available
cache = self._get_cache()
if not cache or not cache.is_available():
return False
# Cacheable timeline types
cacheable_types = {
'home', 'mentions', 'notifications', 'favourites', 'bookmarks',
'user', 'list', 'search', 'feed',
'local', 'federated', 'instance', 'remote_user', 'pinned'
}
# Sent is a special user timeline we should cache
if self.type == 'user' and self.name == 'Sent':
return True
return self.type in cacheable_types
def _get_timeline_data_key(self):
"""Get the data key for this timeline for caching."""
if self.type in ('user', 'list', 'search', 'feed', 'instance', 'remote_user'):
return self.data
return None
def _get_item_type(self):
"""Get the item type for this timeline (status or notification)."""
if self.type == 'notifications':
return 'notification'
return 'status'
def get_cache_key(self):
"""Get the cache key tuple for this timeline (for cleanup purposes)."""
cache = self._get_cache()
if cache:
data_key = cache._get_timeline_key(self.type, self.name, self._get_timeline_data_key())
return (self.type, self.name, data_key)
return None
def clear_cache(self):
"""Clear the cache for this timeline."""
cache = self._get_cache()
if cache and cache.is_available():
cache.clear_timeline(self.type, self.name, self._get_timeline_data_key())
def _load_from_cache(self):
"""Load timeline items from cache (synchronous).
Returns True if cache was loaded, False otherwise.
"""
cache = self._get_cache()
if not cache:
return False
try:
items, metadata = cache.load_timeline(
self.type,
self.name,
self._get_timeline_data_key(),
self._get_item_type()
)
if not items:
return False
# Check if filter is active
filter_active = hasattr(self, '_filter_settings') and self._filter_settings
if filter_active:
from GUI.timeline_filter import should_show_status
# For notifications, check if we should filter out mentions from cache
filter_mentions_from_notifications = False
if self.type == "notifications":
include_mentions = getattr(self.account.prefs, 'mentions_in_notifications', False)
filter_mentions_from_notifications = not include_mentions
# Load items into timeline
for item in items:
if item is None:
continue
# Filter mentions from notifications cache if setting is disabled
if filter_mentions_from_notifications:
notif_type = getattr(item, 'type', None)
if notif_type == 'mention':
continue
# Check server-side filter action - hide posts completely if filter_action="hide"
if not self._status_passes_server_filter(item):
continue
# Track ID for O(1) duplicate checking
if hasattr(item, 'id'):
self._status_ids.add(str(item.id))
# If filter is active, add to unfiltered list and only add to visible if it passes filter
if filter_active:
self._unfiltered_statuses.append(item)
if should_show_status(item, self._filter_settings, self.app, account=self.account):
self.statuses.append(item)
else:
self.statuses.append(item)
# Set up since_id for next refresh
# Don't use since_id for timelines that use internal pagination IDs
# (favourites, bookmarks, scheduled use internal IDs, not status IDs)
if metadata.get('since_id') and items and self.type not in ('favourites', 'bookmarks', 'scheduled'):
self.update_kwargs['since_id'] = metadata['since_id']
# Clear any stale gaps from cache (gap detection is currently disabled)
self._gaps = []
# Set last load time to now
import time
self._last_load_time = time.time()
# Store position ID for restore after API refresh
# ID-based restore is more robust than index when new items arrive
self._cached_position_id = metadata.get('last_position_id')
# Set initial position (will be corrected after API refresh using ID)
saved_index = metadata.get('last_index', 0)
if self.statuses and saved_index >= 0 and saved_index < len(self.statuses):
self.index = saved_index
elif not self.statuses:
# All items filtered out or empty cache - keep index at 0
self.index = 0
elif not self.app.prefs.reversed:
self.index = 0
else:
self.index = len(self.statuses) - 1
# Invalidate display cache
self.invalidate_display_cache()
# Mark initial load as complete (so API refresh is treated as update, not initial)
self.initial = False
# Notify account that this timeline's initial load is complete
if hasattr(self.account, '_on_timeline_initial_load_complete'):
self.account._on_timeline_initial_load_complete()
# Start streaming for this timeline if supported
self.start_stream()
# Play ready sound if this is the last timeline
if self.account.timelines and self == self.account.timelines[-1] and not self.account.ready:
self.account.ready = True
sound.play(self.account, "ready")
# Update UI
if self.account == self.app.currentAccount and self.account.currentTimeline == self:
wx.CallAfter(main.window.refreshList)
return True
except Exception as e:
import traceback
print(f"Cache load error for {self.name}: {e}")
traceback.print_exc()
return False
def _refresh_after_cache(self):
"""Background refresh after loading from cache."""
# Do a normal load (will fetch new items from API)
# Since initial=False after cache load, this will be treated as an update
self.load()
# Restore position by saved ID after refresh
# This is more robust than index-based restore since new items shift positions
position_restored = False
# For notifications/mentions, use the account prefs saved ID
if self.type in ("notifications", "mentions"):
position_restored = self.sync_local_position()
# For other timelines, use the cached position ID
elif hasattr(self, '_cached_position_id') and self._cached_position_id:
# Find the item with this ID and set index
for i, status in enumerate(self.statuses):
if str(status.id) == str(self._cached_position_id):
self.index = i
position_restored = True
break
if not position_restored:
# Position ID not found - item may have been deleted or aged out
print(f"Position restore: ID {self._cached_position_id} not found in {self.name} ({len(self.statuses)} items)")
# Clean up
del self._cached_position_id
if position_restored and self.app.currentAccount == self.account and self.account.currentTimeline == self:
wx.CallAfter(main.window.list2.SetSelection, self.index)
def _cache_timeline(self):
"""Save current timeline items to cache (called after API load)."""
if not self._should_use_cache():
return
cache = self._get_cache()
if not cache:
return
# Use unfiltered statuses if filter is active, otherwise use visible statuses
# This ensures filtered-out items are still cached for when filter is changed/removed
source_statuses = getattr(self, '_unfiltered_statuses', None) or self.statuses
# Don't cache if no items
if not source_statuses:
return
try:
# Get cache limit
cache_limit = self.app.prefs.timeline_cache_limit
# Get items to cache - always cache newest items regardless of reversed setting
# When reversed=False: newest at start, so take [:limit]
# When reversed=True: newest at end, so take [-limit:]
if self.app.prefs.reversed:
items_to_cache = source_statuses[-cache_limit:] if len(source_statuses) > cache_limit else source_statuses[:]
else:
items_to_cache = source_statuses[:cache_limit]
# Get the ID at current position for robust restore (use visible statuses for position)
position_id = None
if self.index >= 0 and self.index < len(self.statuses):
position_id = str(self.statuses[self.index].id)
# Save to cache with gap info and current position
cache.save_timeline(
self.type,
self.name,
self._get_timeline_data_key(),
items_to_cache,
self._get_item_type(),
limit=cache_limit,
gaps=self._gaps if self._gaps else None,
last_index=self.index,
last_position_id=position_id
)
except Exception as e:
print(f"Cache save error for {self.name}: {e}")
def load_conversation(self):
status = self.status
# For boosted posts, use the reblogged status for conversation context
# This ensures we get replies to the original post, not the boost wrapper
actual_status = status
if hasattr(status, 'reblog') and status.reblog:
actual_status = status.reblog
# Get the actual status ID (for mentions, id is notification_id, real id is in _original_status_id)
status_id = getattr(actual_status, '_original_status_id', None) or actual_status.id
source_status_id = str(status_id) # Track source post to focus on it
# Track position of source status in the stored list
source_position = 0
# Try to use get_status_context for full thread (works better for Bluesky)
if hasattr(self.account, '_platform') and self.account._platform:
try:
context = self.account._platform.get_status_context(status_id)
ancestors = context.get('ancestors', [])
descendants = context.get('descendants', [])
# Build thread: ancestors -> current status -> descendants
for ancestor in ancestors:
self.statuses.append(ancestor)
if hasattr(ancestor, 'id'):
self._status_ids.add(str(ancestor.id))
# Source position is after all ancestors
source_position = len(ancestors)
self.statuses.append(actual_status)
if hasattr(actual_status, 'id'):
self._status_ids.add(str(actual_status.id))
for descendant in descendants:
self.statuses.append(descendant)
if hasattr(descendant, 'id'):
self._status_ids.add(str(descendant.id))
self.invalidate_display_cache()
except Exception:
# Fall back to recursive method
self.process_status(actual_status)
self.invalidate_display_cache()
# Find source position after recursive loading
for i, s in enumerate(self.statuses):
if str(getattr(s, 'id', '')) == source_status_id:
source_position = i
break
else:
# Fall back to recursive method for Mastodon API
self.process_status(actual_status)
self.invalidate_display_cache()
# Find source position after recursive loading
for i, s in enumerate(self.statuses):
if str(getattr(s, 'id', '')) == source_status_id:
source_position = i
break
# Conversation threads are always displayed in chronological order (oldest first)
# regardless of the global reversed setting, since they represent a chat-like thread
# Index directly matches the source position in the stored list
self.index = source_position
# Ensure index is valid
if len(self.statuses) > 0:
self.index = max(0, min(self.index, len(self.statuses) - 1))
if self.account.currentTimeline == self:
wx.CallAfter(main.window.refreshList)
sound.play(self.account, "search")
# Notify initial load complete
if self.initial:
self.initial = False
if hasattr(self.account, '_on_timeline_initial_load_complete'):
self.account._on_timeline_initial_load_complete()
# Start streaming for this timeline if supported
self.start_stream()
def play(self, items=None):
if self.type == "user":
if not os.path.exists("sounds/" + self.account.prefs.soundpack + "/" + self.user.acct + ".ogg"):
sound.play(self.account, "user")
else:
sound.play(self.account, self.user.acct)
else:
if self.type == "search":
sound.play(self.account, "search")
elif self.type == "list":
sound.play(self.account, "list")
elif self.type == "notifications":
# Check if any of the items are mentions (when mentions are hidden)
if items:
has_mention = False
has_direct_mention = False
# Check if mentions timeline is hidden
mentions_hidden = False
for tl in self.account.timelines:
if tl.type == "mentions" and tl.hide:
mentions_hidden = True
break
if mentions_hidden:
for item in items:
# Check if this is a mention notification
notif_type = getattr(item, 'type', None)
if notif_type == 'mention':
has_mention = True
# Check if it's a direct message
status = getattr(item, 'status', None)
if status:
visibility = getattr(status, 'visibility', None)
if visibility == 'direct':
has_direct_mention = True
break
if has_direct_mention:
sound.play(self.account, "messages")
return
elif has_mention:
sound.play(self.account, "mentions")
return
sound.play(self.account, "notification")
elif self.type == "mentions":
# Check if any items are direct messages
if items:
for item in items:
visibility = getattr(item, 'visibility', None)
if visibility == 'direct':
sound.play(self.account, "messages")
return
sound.play(self.account, self.name)
else:
sound.play(self.account, self.name)
def process_status(self, status):
# Process parents FIRST to maintain chronological order (oldest first)
try:
if hasattr(status, "in_reply_to_id") and status.in_reply_to_id is not None:
# Check if this is a remote status
if hasattr(status, '_instance_url') and status._instance_url:
parent = self._lookup_remote_status(status._instance_url, status.in_reply_to_id)
else:
parent = self.app.lookup_status(self.account, status.in_reply_to_id)
if parent:
self.process_status(parent)
except:
pass
# Then append current status
self.statuses.append(status)
if hasattr(status, 'id'):
self._status_ids.add(str(status.id))
def _lookup_remote_status(self, instance_url, status_id):
"""Look up a status from a remote instance."""
if not hasattr(self.account, '_platform') or not self.account._platform:
return None
try:
remote_api = self.account._platform.get_or_create_remote_api(instance_url)
status = remote_api.status(id=status_id)
if status:
from platforms.mastodon.models import mastodon_status_to_universal
from urllib.parse import urlparse
uni_status = mastodon_status_to_universal(status)
if uni_status:
# Mark as remote
uni_status._instance_url = instance_url
parsed = urlparse(instance_url)
instance_domain = parsed.netloc or parsed.path.strip('/')
if hasattr(uni_status, 'account') and uni_status.account:
if '@' not in uni_status.account.acct:
uni_status.account.acct = f"{uni_status.account.acct}@{instance_domain}"
uni_status.account._instance_url = instance_url
return uni_status
except:
pass
return None
def hide_tl(self):
if self.type == "user" and self.name != "Sent" or self.type == "list" or self.type == "search" or self.type == "conversation" or self.type == "instance" or self.type == "remote_user" or self.type == "favourites" or self.type == "bookmarks":
self.app.alert("You can't hide this timeline. Try closing it instead.", "Error")
return
self.hide = True
self.app.get_timeline_settings(self.account.me.id, self.name).hide = self.hide
self.app.save_timeline_settings()
if self.account.currentTimeline == self:
self.account.currentTimeline = self.account.get_first_timeline()
main.window.refreshTimelines()