/////////////////////////////////////////////////////
%cd /content/PanoHead
!python gen_videos_proj_withseg.py --output=/content/output/easy-khair-180-gpc0.8-trans10-025000.pkl/0/PTI_render/post.mp4 --latent=/content/output/easy-khair-180-gpc0.8-trans10-025000.pkl/0/projected_w.npz --trunc 0.7 --w-frames 120 --network /content/output/easy-khair-180-gpc0.8-trans10-025000.pkl/0/fintuned_generator.pkl --shapes 1 --cfg Head ##--nrr 256
//////////////////////////////////////////////////////in colab/////////////////
/content/PanoHead
Loading networks from "/content/output/easy-khair-180-gpc0.8-trans10-025000.pkl/0/fintuned_generator.pkl"...
Setting up PyTorch plugin "bias_act_plugin"... Done.
Setting up PyTorch plugin "upfirdn2d_plugin"... Done.
0% 0/120 [00:00<?, ?it/s]Generating shape for frame 0 / 120 ...
7% 10000000/134217728 [00:00<00:09, 13256492.97it/s]
0% 0/120 [00:11<?, ?it/s]
Traceback (most recent call last):
File "/content/PanoHead/gen_videos_proj_withseg.py", line 301, in
generate_images() # pylint: disable=no-value-for-parameter
File "/usr/local/lib/python3.10/dist-packages/click/core.py", line 1157, in call
return self.main(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/click/core.py", line 1078, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python3.10/dist-packages/click/core.py", line 1434, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python3.10/dist-packages/click/core.py", line 783, in invoke
return __callback(*args, **kwargs)
File "/content/PanoHead/gen_videos_proj_withseg.py", line 296, in generate_images
gen_interp_video(G=G, mp4=output, ws=ws, bitrate='100M', grid_dims=grid, num_keyframes=num_keyframes, w_frames=w_frames, psi=truncation_psi, truncation_cutoff=truncation_cutoff, cfg=cfg, image_mode=image_mode, gen_shapes=shapes, device=device)
File "/content/PanoHead/gen_videos_proj_withseg.py", line 172, in gen_interp_video
sigma = G.sample_mixed(samples[:, head:head+max_batch], transformed_ray_directions_expanded[:, :samples.shape[1]-head], w.unsqueeze(0), truncation_psi=psi, noise_mode='const')['sigma']
File "", line 146, in sample_mixed
File "", line 148, in run_model
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "", line 179, in forward
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/container.py", line 217, in forward
input = module(input)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/activation.py", line 843, in forward
return F.softplus(input, self.beta, self.threshold)
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 2.38 GiB (GPU 0; 14.75 GiB total capacity; 10.78 GiB already allocated; 1.69 GiB free; 12.04 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF