First time run of this... I have a laptop Nvidia 3060 GPU, running Ubuntu in WSL on Windows 10. I tried my first prompt from the web page but got this error below. I didn't install the Nvidia driver within Ubuntu because a) it didn't recognise my GPU and b) I had all sorts of other problems. Do I need to install the Nvidia driver within WSL, or does it use the host driver?
sd | ERROR: Exception in ASGI application
sd | Traceback (most recent call last):
sd | File "/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/uvicorn/protocols/http/httptools_impl.py", line 401, in run_asgi
sd | result = await app(self.scope, self.receive, self.send)
sd | File "/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/uvicorn/middleware/proxy_headers.py", line 78, in __call__
sd | return await self.app(scope, receive, send)
sd | File "/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/fastapi/applications.py", line 269, in __call__
sd | await super().__call__(scope, receive, send)
sd | File "/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/starlette/applications.py", line 124, in __call__
sd | await self.middleware_stack(scope, receive, send)
sd | File "/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/starlette/middleware/errors.py", line 184, in __call__
sd | raise exc
sd | File "/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/starlette/middleware/errors.py", line 162, in __call__
sd | await self.app(scope, receive, _send)
sd | File "/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/starlette/exceptions.py", line 93, in __call__
sd | raise exc
sd | File "/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/starlette/exceptions.py", line 82, in __call__
sd | await self.app(scope, receive, sender)
sd | File "/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/fastapi/middleware/asyncexitstack.py", line 21, in __call__
sd | raise e
sd | File "/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/fastapi/middleware/asyncexitstack.py", line 18, in __call__
sd | await self.app(scope, receive, send)
sd | File "/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/starlette/routing.py", line 670, in __call__
sd | await route.handle(scope, receive, send)
sd | File "/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/starlette/routing.py", line 266, in handle
sd | await self.app(scope, receive, send)
sd | File "/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/starlette/routing.py", line 65, in app
sd | response = await func(request)
sd | File "/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/fastapi/routing.py", line 227, in app
sd-ui | INFO: 172.18.0.1:59682 - "POST /image HTTP/1.1" 500 Internal Server Error
sd | raw_response = await run_endpoint_function(
sd | File "/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/fastapi/routing.py", line 162, in run_endpoint_function
sd | return await run_in_threadpool(dependant.call, **values)
sd | File "/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/starlette/concurrency.py", line 41, in run_in_threadpool
sd | return await anyio.to_thread.run_sync(func, *args)
sd | File "/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/anyio/to_thread.py", line 31, in run_sync
sd | return await get_asynclib().run_sync_in_worker_thread(
sd | File "/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 937, in run_sync_in_worker_thread
sd | return await future
sd | File "/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 867, in run
sd | result = context.run(func, *args)
sd | File "/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/cog/server/http.py", line 79, in predict
sd | output = predictor.predict(**request.input.dict())
sd | File "/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
sd | return func(*args, **kwargs)
sd | File "/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/torch/amp/autocast_mode.py", line 12, in decorate_autocast
sd | return func(*args, **kwargs)
sd | File "/src/predict.py", line 88, in predict
sd | output = self.pipe(
sd | File "/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
sd | return func(*args, **kwargs)
sd | File "/src/image_to_image.py", line 156, in __call__
sd | noise_pred = self.unet(
sd | File "/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
sd | return forward_call(*input, **kwargs)
sd | File "/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/diffusers/models/unet_2d_condition.py", line 168, in forward
sd | sample = upsample_block(
sd | File "/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
sd | return forward_call(*input, **kwargs)
sd | File "/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/diffusers/models/unet_blocks.py", line 1037, in forward
sd | hidden_states = attn(hidden_states, context=encoder_hidden_states)
sd | File "/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
sd | return forward_call(*input, **kwargs)
sd | File "/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/diffusers/models/attention.py", line 168, in forward
sd | x = block(x, context=context)
sd | File "/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
sd | return forward_call(*input, **kwargs)
sd | File "/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/diffusers/models/attention.py", line 196, in forward
sd | x = self.attn1(self.norm1(x)) + x
sd | File "/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
sd | return forward_call(*input, **kwargs)
sd | File "/root/.pyenv/versions/3.10.6/lib/python3.10/site-packages/diffusers/models/attention.py", line 254, in forward
sd | attn = sim.softmax(dim=-1)
sd | RuntimeError: CUDA error: unknown error
sd | CUDA kernel errors might be asynchronously reported at some other API call,so the stacktrace below might be incorrect.
sd | For debugging consider passing CUDA_LAUNCH_BLOCKING=1.