You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
I recently updated my GPU from s GTX 2060 6gb to a rtx 3060 12gb model. Everything was working better until Windows decided it needed to to updates while I had generations queued with the Agent Scheduler extension. I came back to the PC and everytime I try to generate I get the following:
Traceback (most recent call last):
File "C:\Users\lucen\AppData\Local\Programs\Python\Python310\lib\threading.py", line 1016, in _bootstrap_inner
self.run()
File "F:\stabledif\stable-diffusion-webui\webui\modules\memmon.py", line 53, in run
free, total = self.cuda_mem_get_info()
File "F:\stabledif\stable-diffusion-webui\webui\modules\memmon.py", line 34, in cuda_mem_get_info
return torch.cuda.mem_get_info(index)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\torch\cuda\memory.py", line 663, in mem_get_info
return torch.cuda.cudart().cudaMemGetInfo(device)
RuntimeError: CUDA error: an illegal memory access was encountered
Compile with TORCH_USE_CUDA_DSA to enable device-side assertions.
*** Error completing request
*** Arguments: ('task(qpxthq6ht59sh8v)', <gradio.routes.Request object at 0x000002214114D600>, 'a woman', '', ['Showcase Prompt Jan 2025 rev 1'], 1, 1, 6, 768, 512, True, 0.26, 1.5, '4x-UltraSharp', 20, 0, 0, 'Use same checkpoint', 'Use same sampler', 'Use same scheduler', '', '', [], 0, 30, 'DPM++ 2M SDE', 'Karras', False, '', 0.8, -1, False, -1, 0, 0, 0, True, False, 1, False, False, False, 1.1, 1.5, 100, 0.7, False, False, True, False, False, 0, 'Gustavosta/MagicPrompt-Stable-Diffusion', '', False, 7, 100, 'Constant', 0, 'Constant', 0, 4, True, 'MEAN', 'AD', 1, ControlNetUnit(is_ui=True, input_mode=<InputMode.SIMPLE: 'simple'>, batch_images='', output_dir='', loopback=False, enabled=False, module='none', model='None', weight=1.0, image=None, resize_mode=<ResizeMode.INNER_FIT: 'Crop and Resize'>, low_vram=False, processor_res=-1, threshold_a=-1.0, threshold_b=-1.0, guidance_start=0.0, guidance_end=1.0, pixel_perfect=False, control_mode=<ControlMode.BALANCED: 'Balanced'>, inpaint_crop_input_image=False, hr_option=<HiResFixOption.BOTH: 'Both'>, save_detected_map=True, advanced_weighting=None, effective_region_mask=None, pulid_mode=<PuLIDMode.FIDELITY: 'Fidelity'>, union_control_type=<ControlNetUnionControlType.UNKNOWN: 'Unknown'>, ipadapter_input=None, mask=None, batch_mask_dir=None, animatediff_batch=False, batch_modifiers=[], batch_image_files=[], batch_keyframe_idx=None), ControlNetUnit(is_ui=True, input_mode=<InputMode.SIMPLE: 'simple'>, batch_images='', output_dir='', loopback=False, enabled=False, module='none', model='None', weight=1.0, image=None, resize_mode=<ResizeMode.INNER_FIT: 'Crop and Resize'>, low_vram=False, processor_res=-1, threshold_a=-1.0, threshold_b=-1.0, guidance_start=0.0, guidance_end=1.0, pixel_perfect=False, control_mode=<ControlMode.BALANCED: 'Balanced'>, inpaint_crop_input_image=False, hr_option=<HiResFixOption.BOTH: 'Both'>, save_detected_map=True, advanced_weighting=None, effective_region_mask=None, pulid_mode=<PuLIDMode.FIDELITY: 'Fidelity'>, union_control_type=<ControlNetUnionControlType.UNKNOWN: 'Unknown'>, ipadapter_input=None, mask=None, batch_mask_dir=None, animatediff_batch=False, batch_modifiers=[], batch_image_files=[], batch_keyframe_idx=None), ControlNetUnit(is_ui=True, input_mode=<InputMode.SIMPLE: 'simple'>, batch_images='', output_dir='', loopback=False, enabled=False, module='none', model='None', weight=1.0, image=None, resize_mode=<ResizeMode.INNER_FIT: 'Crop and Resize'>, low_vram=False, processor_res=-1, threshold_a=-1.0, threshold_b=-1.0, guidance_start=0.0, guidance_end=1.0, pixel_perfect=False, control_mode=<ControlMode.BALANCED: 'Balanced'>, inpaint_crop_input_image=False, hr_option=<HiResFixOption.BOTH: 'Both'>, save_detected_map=True, advanced_weighting=None, effective_region_mask=None, pulid_mode=<PuLIDMode.FIDELITY: 'Fidelity'>, union_control_type=<ControlNetUnionControlType.UNKNOWN: 'Unknown'>, ipadapter_input=None, mask=None, batch_mask_dir=None, animatediff_batch=False, batch_modifiers=[], batch_image_files=[], batch_keyframe_idx=None), False, False, 'positive', 'comma', 0, False, False, 'start', '', 1, '', [], 0, '', [], 0, '', [], True, False, False, False, False, False, False, 0, False, None, None, False, None, None, False, None, None, False, 50) {}
Traceback (most recent call last):
File "F:\stabledif\stable-diffusion-webui\webui\modules\call_queue.py", line 74, in f
res = list(func(*args, **kwargs))
File "F:\stabledif\stable-diffusion-webui\webui\modules\call_queue.py", line 53, in f
res = func(*args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\modules\call_queue.py", line 37, in f
res = func(*args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\modules\txt2img.py", line 109, in txt2img
processed = processing.process_images(p)
File "F:\stabledif\stable-diffusion-webui\webui\modules\processing.py", line 847, in process_images
res = process_images_inner(p)
File "F:\stabledif\stable-diffusion-webui\webui\extensions\sd-webui-controlnet\scripts\batch_hijack.py", line 59, in processing_process_images_hijack
return getattr(processing, '__controlnet_original_process_images_inner')(p, *args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\modules\processing.py", line 966, in process_images_inner
p.setup_conds()
File "F:\stabledif\stable-diffusion-webui\webui\modules\processing.py", line 1520, in setup_conds
super().setup_conds()
File "F:\stabledif\stable-diffusion-webui\webui\modules\processing.py", line 502, in setup_conds
self.uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, total_steps, [self.cached_uc], self.extra_network_data)
File "F:\stabledif\stable-diffusion-webui\webui\modules\processing.py", line 488, in get_conds_with_caching
cache[1] = function(shared.sd_model, required_prompts, steps, hires_steps, shared.opts.use_old_scheduling)
File "F:\stabledif\stable-diffusion-webui\webui\modules\prompt_parser.py", line 188, in get_learned_conditioning
conds = model.get_learned_conditioning(texts)
File "F:\stabledif\stable-diffusion-webui\webui\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 669, in get_learned_conditioning
c = self.cond_stage_model(c)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\modules\sd_hijack_clip.py", line 313, in forward
return super().forward(texts)
File "F:\stabledif\stable-diffusion-webui\webui\modules\sd_hijack_clip.py", line 227, in forward
z = self.process_tokens(tokens, multipliers)
File "F:\stabledif\stable-diffusion-webui\webui\modules\sd_hijack_clip.py", line 269, in process_tokens
z = self.encode_with_transformers(tokens)
File "F:\stabledif\stable-diffusion-webui\webui\modules\sd_hijack_clip.py", line 352, in encode_with_transformers
outputs = self.wrapped.transformer(input_ids=tokens, output_hidden_states=-opts.CLIP_stop_at_last_layers)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\transformers\models\clip\modeling_clip.py", line 822, in forward
return self.text_model(
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\transformers\models\clip\modeling_clip.py", line 740, in forward
encoder_outputs = self.encoder(
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\transformers\models\clip\modeling_clip.py", line 654, in forward
layer_outputs = encoder_layer(
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\transformers\models\clip\modeling_clip.py", line 393, in forward
hidden_states = self.mlp(hidden_states)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\transformers\models\clip\modeling_clip.py", line 350, in forward
hidden_states = self.fc2(hidden_states)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\extensions-builtin\Lora\networks.py", line 584, in network_Linear_forward
return originals.Linear_forward(self, input)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\torch\nn\modules\linear.py", line 114, in forward
return F.linear(input, self.weight, self.bias)
RuntimeError: CUDA error: CUBLAS_STATUS_EXECUTION_FAILED when calling cublasGemmEx( handle, opa, opb, m, n, k, &falpha, a, CUDA_R_16F, lda, b, CUDA_R_16F, ldb, &fbeta, c, CUDA_R_16F, ldc, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)
Traceback (most recent call last):
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\gradio\routes.py", line 488, in run_predict
output = await app.get_blocks().process_api(
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\gradio\blocks.py", line 1431, in process_api
result = await self.call_function(
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\gradio\blocks.py", line 1103, in call_function
prediction = await anyio.to_thread.run_sync(
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\anyio\to_thread.py", line 33, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\anyio_backends_asyncio.py", line 877, in run_sync_in_worker_thread
return await future
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\anyio_backends_asyncio.py", line 807, in run
result = context.run(func, *args)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\gradio\utils.py", line 707, in wrapper
response = f(*args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\modules\call_queue.py", line 91, in f
devices.torch_gc()
File "F:\stabledif\stable-diffusion-webui\webui\modules\devices.py", line 81, in torch_gc
torch.cuda.empty_cache()
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\torch\cuda\memory.py", line 159, in empty_cache
torch._C._cuda_emptyCache()
RuntimeError: CUDA error: an illegal memory access was encountered
Compile with TORCH_USE_CUDA_DSA to enable device-side assertions.
I've tried everything thing I can find to resolve it short of reinstalling Windows. Reverted the updates, removed extensions, reinstalled Automatic 1111, deleted the venv folder, updated Nvidia drivers, even switched from the game ready to the studio drivers. Nothing works. I also tried my SwarmUI install using Comfy as the backend and it's broken as well. Tonight I tried updating CUDA, reinstalling xformers, and set COMMANDLINE_ARGS= --autolaunch --skip-torch-cuda-test --medvram with no luck.
Any ideas? I'm planning to swap the video card back to the 2060 tomorrow to troubleshoot if it's the card.
Did some more testing, I couldn't load any games but it would allow Furmark to process without errors. Swapped back to the 2060 and everything works. I'm guessing the card is bad and doing an RMA.
reacted with thumbs up emoji reacted with thumbs down emoji reacted with laugh emoji reacted with hooray emoji reacted with confused emoji reacted with heart emoji reacted with rocket emoji reacted with eyes emoji
-
I recently updated my GPU from s GTX 2060 6gb to a rtx 3060 12gb model. Everything was working better until Windows decided it needed to to updates while I had generations queued with the Agent Scheduler extension. I came back to the PC and everytime I try to generate I get the following:
Traceback (most recent call last):
File "C:\Users\lucen\AppData\Local\Programs\Python\Python310\lib\threading.py", line 1016, in _bootstrap_inner
self.run()
File "F:\stabledif\stable-diffusion-webui\webui\modules\memmon.py", line 53, in run
free, total = self.cuda_mem_get_info()
File "F:\stabledif\stable-diffusion-webui\webui\modules\memmon.py", line 34, in cuda_mem_get_info
return torch.cuda.mem_get_info(index)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\torch\cuda\memory.py", line 663, in mem_get_info
return torch.cuda.cudart().cudaMemGetInfo(device)
RuntimeError: CUDA error: an illegal memory access was encountered
Compile with
TORCH_USE_CUDA_DSA
to enable device-side assertions.*** Error completing request
*** Arguments: ('task(qpxthq6ht59sh8v)', <gradio.routes.Request object at 0x000002214114D600>, 'a woman', '', ['Showcase Prompt Jan 2025 rev 1'], 1, 1, 6, 768, 512, True, 0.26, 1.5, '4x-UltraSharp', 20, 0, 0, 'Use same checkpoint', 'Use same sampler', 'Use same scheduler', '', '', [], 0, 30, 'DPM++ 2M SDE', 'Karras', False, '', 0.8, -1, False, -1, 0, 0, 0, True, False, 1, False, False, False, 1.1, 1.5, 100, 0.7, False, False, True, False, False, 0, 'Gustavosta/MagicPrompt-Stable-Diffusion', '', False, 7, 100, 'Constant', 0, 'Constant', 0, 4, True, 'MEAN', 'AD', 1, ControlNetUnit(is_ui=True, input_mode=<InputMode.SIMPLE: 'simple'>, batch_images='', output_dir='', loopback=False, enabled=False, module='none', model='None', weight=1.0, image=None, resize_mode=<ResizeMode.INNER_FIT: 'Crop and Resize'>, low_vram=False, processor_res=-1, threshold_a=-1.0, threshold_b=-1.0, guidance_start=0.0, guidance_end=1.0, pixel_perfect=False, control_mode=<ControlMode.BALANCED: 'Balanced'>, inpaint_crop_input_image=False, hr_option=<HiResFixOption.BOTH: 'Both'>, save_detected_map=True, advanced_weighting=None, effective_region_mask=None, pulid_mode=<PuLIDMode.FIDELITY: 'Fidelity'>, union_control_type=<ControlNetUnionControlType.UNKNOWN: 'Unknown'>, ipadapter_input=None, mask=None, batch_mask_dir=None, animatediff_batch=False, batch_modifiers=[], batch_image_files=[], batch_keyframe_idx=None), ControlNetUnit(is_ui=True, input_mode=<InputMode.SIMPLE: 'simple'>, batch_images='', output_dir='', loopback=False, enabled=False, module='none', model='None', weight=1.0, image=None, resize_mode=<ResizeMode.INNER_FIT: 'Crop and Resize'>, low_vram=False, processor_res=-1, threshold_a=-1.0, threshold_b=-1.0, guidance_start=0.0, guidance_end=1.0, pixel_perfect=False, control_mode=<ControlMode.BALANCED: 'Balanced'>, inpaint_crop_input_image=False, hr_option=<HiResFixOption.BOTH: 'Both'>, save_detected_map=True, advanced_weighting=None, effective_region_mask=None, pulid_mode=<PuLIDMode.FIDELITY: 'Fidelity'>, union_control_type=<ControlNetUnionControlType.UNKNOWN: 'Unknown'>, ipadapter_input=None, mask=None, batch_mask_dir=None, animatediff_batch=False, batch_modifiers=[], batch_image_files=[], batch_keyframe_idx=None), ControlNetUnit(is_ui=True, input_mode=<InputMode.SIMPLE: 'simple'>, batch_images='', output_dir='', loopback=False, enabled=False, module='none', model='None', weight=1.0, image=None, resize_mode=<ResizeMode.INNER_FIT: 'Crop and Resize'>, low_vram=False, processor_res=-1, threshold_a=-1.0, threshold_b=-1.0, guidance_start=0.0, guidance_end=1.0, pixel_perfect=False, control_mode=<ControlMode.BALANCED: 'Balanced'>, inpaint_crop_input_image=False, hr_option=<HiResFixOption.BOTH: 'Both'>, save_detected_map=True, advanced_weighting=None, effective_region_mask=None, pulid_mode=<PuLIDMode.FIDELITY: 'Fidelity'>, union_control_type=<ControlNetUnionControlType.UNKNOWN: 'Unknown'>, ipadapter_input=None, mask=None, batch_mask_dir=None, animatediff_batch=False, batch_modifiers=[], batch_image_files=[], batch_keyframe_idx=None), False, False, 'positive', 'comma', 0, False, False, 'start', '', 1, '', [], 0, '', [], 0, '', [], True, False, False, False, False, False, False, 0, False, None, None, False, None, None, False, None, None, False, 50) {}
Traceback (most recent call last):
File "F:\stabledif\stable-diffusion-webui\webui\modules\call_queue.py", line 74, in f
res = list(func(*args, **kwargs))
File "F:\stabledif\stable-diffusion-webui\webui\modules\call_queue.py", line 53, in f
res = func(*args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\modules\call_queue.py", line 37, in f
res = func(*args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\modules\txt2img.py", line 109, in txt2img
processed = processing.process_images(p)
File "F:\stabledif\stable-diffusion-webui\webui\modules\processing.py", line 847, in process_images
res = process_images_inner(p)
File "F:\stabledif\stable-diffusion-webui\webui\extensions\sd-webui-controlnet\scripts\batch_hijack.py", line 59, in processing_process_images_hijack
return getattr(processing, '__controlnet_original_process_images_inner')(p, *args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\modules\processing.py", line 966, in process_images_inner
p.setup_conds()
File "F:\stabledif\stable-diffusion-webui\webui\modules\processing.py", line 1520, in setup_conds
super().setup_conds()
File "F:\stabledif\stable-diffusion-webui\webui\modules\processing.py", line 502, in setup_conds
self.uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, total_steps, [self.cached_uc], self.extra_network_data)
File "F:\stabledif\stable-diffusion-webui\webui\modules\processing.py", line 488, in get_conds_with_caching
cache[1] = function(shared.sd_model, required_prompts, steps, hires_steps, shared.opts.use_old_scheduling)
File "F:\stabledif\stable-diffusion-webui\webui\modules\prompt_parser.py", line 188, in get_learned_conditioning
conds = model.get_learned_conditioning(texts)
File "F:\stabledif\stable-diffusion-webui\webui\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 669, in get_learned_conditioning
c = self.cond_stage_model(c)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\modules\sd_hijack_clip.py", line 313, in forward
return super().forward(texts)
File "F:\stabledif\stable-diffusion-webui\webui\modules\sd_hijack_clip.py", line 227, in forward
z = self.process_tokens(tokens, multipliers)
File "F:\stabledif\stable-diffusion-webui\webui\modules\sd_hijack_clip.py", line 269, in process_tokens
z = self.encode_with_transformers(tokens)
File "F:\stabledif\stable-diffusion-webui\webui\modules\sd_hijack_clip.py", line 352, in encode_with_transformers
outputs = self.wrapped.transformer(input_ids=tokens, output_hidden_states=-opts.CLIP_stop_at_last_layers)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\transformers\models\clip\modeling_clip.py", line 822, in forward
return self.text_model(
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\transformers\models\clip\modeling_clip.py", line 740, in forward
encoder_outputs = self.encoder(
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\transformers\models\clip\modeling_clip.py", line 654, in forward
layer_outputs = encoder_layer(
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\transformers\models\clip\modeling_clip.py", line 393, in forward
hidden_states = self.mlp(hidden_states)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\transformers\models\clip\modeling_clip.py", line 350, in forward
hidden_states = self.fc2(hidden_states)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\extensions-builtin\Lora\networks.py", line 584, in network_Linear_forward
return originals.Linear_forward(self, input)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\torch\nn\modules\linear.py", line 114, in forward
return F.linear(input, self.weight, self.bias)
RuntimeError: CUDA error: CUBLAS_STATUS_EXECUTION_FAILED when calling
cublasGemmEx( handle, opa, opb, m, n, k, &falpha, a, CUDA_R_16F, lda, b, CUDA_R_16F, ldb, &fbeta, c, CUDA_R_16F, ldc, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)
Traceback (most recent call last):
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\gradio\routes.py", line 488, in run_predict
output = await app.get_blocks().process_api(
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\gradio\blocks.py", line 1431, in process_api
result = await self.call_function(
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\gradio\blocks.py", line 1103, in call_function
prediction = await anyio.to_thread.run_sync(
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\anyio\to_thread.py", line 33, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\anyio_backends_asyncio.py", line 877, in run_sync_in_worker_thread
return await future
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\anyio_backends_asyncio.py", line 807, in run
result = context.run(func, *args)
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\gradio\utils.py", line 707, in wrapper
response = f(*args, **kwargs)
File "F:\stabledif\stable-diffusion-webui\webui\modules\call_queue.py", line 91, in f
devices.torch_gc()
File "F:\stabledif\stable-diffusion-webui\webui\modules\devices.py", line 81, in torch_gc
torch.cuda.empty_cache()
File "F:\stabledif\stable-diffusion-webui\webui\venv\lib\site-packages\torch\cuda\memory.py", line 159, in empty_cache
torch._C._cuda_emptyCache()
RuntimeError: CUDA error: an illegal memory access was encountered
Compile with
TORCH_USE_CUDA_DSA
to enable device-side assertions.I've tried everything thing I can find to resolve it short of reinstalling Windows. Reverted the updates, removed extensions, reinstalled Automatic 1111, deleted the venv folder, updated Nvidia drivers, even switched from the game ready to the studio drivers. Nothing works. I also tried my SwarmUI install using Comfy as the backend and it's broken as well. Tonight I tried updating CUDA, reinstalling xformers, and set COMMANDLINE_ARGS= --autolaunch --skip-torch-cuda-test --medvram with no luck.
Any ideas? I'm planning to swap the video card back to the 2060 tomorrow to troubleshoot if it's the card.
Did some more testing, I couldn't load any games but it would allow Furmark to process without errors. Swapped back to the 2060 and everything works. I'm guessing the card is bad and doing an RMA.
Beta Was this translation helpful? Give feedback.
All reactions