Gracefully handle CUDA out of memory errors with streaming

This commit is contained in:
oobabooga 2023-03-28 19:20:50 -03:00
parent b2f356a9ae
commit 304f812c63

View File

@ -1,4 +1,5 @@
import gc
import traceback
from queue import Queue
from threading import Thread
@ -63,6 +64,10 @@ class Iteratorize:
ret = self.mfunc(callback=_callback, **self.kwargs)
except ValueError:
pass
except:
traceback.print_exc()
pass
clear_torch_cache()
self.q.put(self.sentinel)
if self.c_callback: