TypeError: cannot pickle '_thread.lock' object #1723
oscarramirezs
started this conversation in
Ideas
Replies: 1 comment
-
Hi @oscarramirezs , Are you running the program in multi-processing with Thanks. |
Beta Was this translation helpful? Give feedback.
0 replies
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
-
Hi
can some one help me?, please,
Im using SmartCacheDataset:
`train_ds = SmartCacheDataset(
data=train_files, transform=train_transforms,
replace_rate=0.1, cache_num=20, cache_rate=1.0, num_init_workers=2, num_replace_workers=4)
train_loader = DataLoader(train_ds, batch_size=1, shuffle=True, num_workers=4)
val_ds = SmartCacheDataset(
data=val_files, transform=val_transforms, replace_rate=0.1, cache_num=3, cache_rate=1.0, num_init_workers=2, num_replace_workers=4 )
val_loader = DataLoader(val_ds, batch_size=1, num_workers=4)
`
100%|██████████| 20/20 [00:35<00:00, 1.78s/it]
100%|██████████| 3/3 [00:18<00:00, 6.32s/it]
and when I am training the model Unet 3D in a Mac Im raising the follow error
`
max_epochs = 20
val_interval = 2
best_metric = -1
best_metric_epoch = -1
epoch_loss_values = []
metric_values = []
post_pred = AsDiscrete(argmax=True, to_onehot=True, n_classes=2)
post_label = AsDiscrete(to_onehot=True, n_classes=2)
%time
for epoch in range(max_epochs):
%time
print("-" * 10)
print(f"epoch {epoch + 1}/{max_epochs}")
model_humerus.train()
epoch_loss = 0
step = 0
for batch_data in train_loader:
step += 1
`CPU times: user 1e+03 ns, sys: 0 ns, total: 1e+03 ns
Wall time: 2.86 µs
CPU times: user 2 µs, sys: 1e+03 ns, total: 3 µs
Wall time: 3.1 µs
epoch 1/20
TypeError Traceback (most recent call last)
in
15 epoch_loss = 0
16 step = 0
---> 17 for batch_data in train_loader:
18 step += 1
19
/opt/anaconda3/lib/python3.8/site-packages/torch/utils/data/dataloader.py in iter(self)
277 return _SingleProcessDataLoaderIter(self)
278 else:
--> 279 return _MultiProcessingDataLoaderIter(self)
280
281 @Property
/opt/anaconda3/lib/python3.8/site-packages/torch/utils/data/dataloader.py in init(self, loader)
717 # before it starts, and del tries to join but will get:
718 # AssertionError: can only join a started process.
--> 719 w.start()
720 self._index_queues.append(index_queue)
721 self._workers.append(w)
/opt/anaconda3/lib/python3.8/multiprocessing/process.py in start(self)
119 'daemonic processes are not allowed to have children'
120 _cleanup()
--> 121 self._popen = self._Popen(self)
122 self._sentinel = self._popen.sentinel
123 # Avoid a refcycle if the target function holds an indirect
/opt/anaconda3/lib/python3.8/multiprocessing/context.py in _Popen(process_obj)
222 @staticmethod
223 def _Popen(process_obj):
--> 224 return _default_context.get_context().Process._Popen(process_obj)
225
226 class DefaultContext(BaseContext):
/opt/anaconda3/lib/python3.8/multiprocessing/context.py in _Popen(process_obj)
282 def _Popen(process_obj):
283 from .popen_spawn_posix import Popen
--> 284 return Popen(process_obj)
285
286 class ForkServerProcess(process.BaseProcess):
/opt/anaconda3/lib/python3.8/multiprocessing/popen_spawn_posix.py in init(self, process_obj)
30 def init(self, process_obj):
31 self._fds = []
---> 32 super().init(process_obj)
33
34 def duplicate_for_child(self, fd):
/opt/anaconda3/lib/python3.8/multiprocessing/popen_fork.py in init(self, process_obj)
17 self.returncode = None
18 self.finalizer = None
---> 19 self._launch(process_obj)
20
21 def duplicate_for_child(self, fd):
/opt/anaconda3/lib/python3.8/multiprocessing/popen_spawn_posix.py in _launch(self, process_obj)
45 try:
46 reduction.dump(prep_data, fp)
---> 47 reduction.dump(process_obj, fp)
48 finally:
49 set_spawning_popen(None)
/opt/anaconda3/lib/python3.8/multiprocessing/reduction.py in dump(obj, file, protocol)
58 def dump(obj, file, protocol=None):
59 '''Replacement for pickle.dump() using ForkingPickler.'''
---> 60 ForkingPickler(file, protocol).dump(obj)
61
62 #
TypeError: cannot pickle '_thread.lock' object
`
Thank YOU
Beta Was this translation helpful? Give feedback.
All reactions