Skip to content

[Bug]: torch runtime error #127

@ak-sangal

Description

@ak-sangal

Is there an existing issue for this?

What happened?

Error completing request
*** Arguments: ('task(4bkz1d2m6aqg8m4)', 2, 'nude', '', [], None, None, {'image': <PIL.Image.Image image mode=RGBA size=291x512 at 0x20DC07642E0>, 'mask': <PIL.Image.Image image mode=RGB size=291x512 at 0x20DC07651E0>}, None, None, None, None, 20, 'Euler a', 4, 0, 1, 1, 1, 7, 1.5, 0.75, 0, 512, 512, 1, 0, 0, 32, 0, '', '', '', [], False, [], '', <gradio.routes.Request object at 0x0000020DD6ADC670>, 3, False, '', 0.8, -1, False, -1, 0, 0, 0, '* CFG Scale should be 2 or lower.', True, True, '', '', True, 50, True, 1, 0, False, 4, 0.5, 'Linear', 'None', 'None', 'None', 'GPU.0', True, 'Euler a', True, False, 'Latent', 10, 0.5, False, 'None', 0.8, '

Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8

', 128, 8, ['left', 'right', 'up', 'down'], 1, 0.05, 128, 4, 0, ['left', 'right', 'up', 'down'], False, False, 'positive', 'comma', 0, False, False, '', '

Will upscale the image by the selected scale factor; use width and height sliders to set tile size

', 64, 0, 2, 1, '', [], 0, '', [], 0, '', [], True, False, False, False, 0, False) {}
Traceback (most recent call last):
File "D:\stable-diffusion-webui\modules\call_queue.py", line 57, in f
res = list(func(*args, **kwargs))
File "D:\stable-diffusion-webui\modules\call_queue.py", line 36, in f
res = func(*args, **kwargs)
File "D:\stable-diffusion-webui\modules\img2img.py", line 206, in img2img
processed = modules.scripts.scripts_img2img.run(p, *args)
File "D:\stable-diffusion-webui\modules\scripts.py", line 601, in run
processed = script.run(p, *script_args)
File "D:\stable-diffusion-webui\scripts\openvino_accelerate.py", line 1283, in run
processed = process_images_openvino(p, model_config, vae_ckpt, p.sampler_name, enable_caching, override_hires, upscaler, hires_steps, d_strength, openvino_device, mode, is_xl_ckpt, refiner_ckpt, refiner_frac)
File "D:\stable-diffusion-webui\scripts\openvino_accelerate.py", line 998, in process_images_openvino
output = shared.sd_diffusers_model(
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "D:\stable-diffusion-webui\venv\lib\site-packages\diffusers\pipelines\stable_diffusion\pipeline_stable_diffusion_inpaint.py", line 1142, in call
noise_pred = self.unet(
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch_dynamo\eval_frame.py", line 328, in _fn
return fn(*args, **kwargs)
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch_dynamo\eval_frame.py", line 490, in catch_errors
return callback(frame, cache_entry, hooks, frame_state)
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch_dynamo\convert_frame.py", line 641, in _convert_frame
result = inner_convert(frame, cache_size, hooks, frame_state)
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch_dynamo\convert_frame.py", line 133, in _fn
return fn(*args, **kwargs)
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch_dynamo\convert_frame.py", line 389, in _convert_frame_assert
return _compile(
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch_dynamo\convert_frame.py", line 569, in _compile
guarded_code = compile_inner(code, one_graph, hooks, transform)
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch_dynamo\utils.py", line 189, in time_wrapper
r = func(*args, **kwargs)
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch_dynamo\convert_frame.py", line 491, in compile_inner
out_code = transform_code_object(code, transform)
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch_dynamo\bytecode_transformation.py", line 1028, in transform_code_object
transformations(instructions, code_options)
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch_dynamo\convert_frame.py", line 458, in transform
tracer.run()
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch_dynamo\symbolic_convert.py", line 2074, in run
super().run()
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch_dynamo\symbolic_convert.py", line 724, in run
and self.step()
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch_dynamo\symbolic_convert.py", line 688, in step
getattr(self, inst.opname)(inst)
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch_dynamo\symbolic_convert.py", line 392, in wrapper
return inner_fn(self, inst)
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch_dynamo\symbolic_convert.py", line 1115, in CALL_FUNCTION
self.call_function(fn, args, {})
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch_dynamo\symbolic_convert.py", line 562, in call_function
self.push(fn.call_function(self, args, kwargs))
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch_dynamo\variables\nn_module.py", line 302, in call_function
return wrap_fx_proxy(
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch_dynamo\variables\builder.py", line 1187, in wrap_fx_proxy
return wrap_fx_proxy_cls(
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch_dynamo\variables\builder.py", line 1274, in wrap_fx_proxy_cls
example_value = get_fake_value(proxy.node, tx)
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch_dynamo\utils.py", line 1376, in get_fake_value
raise TorchRuntimeError(str(e)).with_traceback(e.traceback) from None
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch_dynamo\utils.py", line 1337, in get_fake_value
return wrap_fake_exception(
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch_dynamo\utils.py", line 916, in wrap_fake_exception
return fn()
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch_dynamo\utils.py", line 1338, in
lambda: run_node(tx.output, node, args, kwargs, nnmodule)
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch_dynamo\utils.py", line 1410, in run_node
raise RuntimeError(fn_str + str(e)).with_traceback(e.traceback) from e
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch_dynamo\utils.py", line 1402, in run_node
return nnmodule(*args, **kwargs)
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "D:\stable-diffusion-webui\extensions-builtin\Lora\networks.py", line 444, in network_Conv2d_forward
return originals.Conv2d_forward(self, input)
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\conv.py", line 460, in forward
return self._conv_forward(input, self.weight, self.bias)
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\conv.py", line 456, in _conv_forward
return F.conv2d(input, weight, bias, self.stride,
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch\utils_stats.py", line 20, in wrapper
return fn(*args, **kwargs)
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch_subclasses\fake_tensor.py", line 1250, in torch_dispatch
return self.dispatch(func, types, args, kwargs)
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch_subclasses\fake_tensor.py", line 1487, in dispatch
op_impl_out = op_impl(self, func, args, **kwargs)
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch_subclasses\fake_tensor.py", line 677, in conv
conv_backend = torch._C._select_conv_backend(**kwargs)
torch._dynamo.exc.TorchRuntimeError: Failed running call_module L__self___conv_in(
(FakeTensor(..., size=(2, 4, 64, 64)),), **{}):
Given groups=1, weight of size [320, 9, 3, 3], expected input[2, 4, 64, 64] to have 9 channels, but got 4 channels instead

from user code:
   File "D:\stable-diffusion-webui\venv\lib\site-packages\diffusers\models\unet_2d_condition.py", line 1026, in forward
    sample = self.conv_in(sample)

Set TORCH_LOGS="+dynamo" and TORCHDYNAMO_VERBOSE=1 for more information

Steps to reproduce the problem

  1. Go to ....
  2. Press ....
  3. ...

What should have happened?

Image

Sysinfo

intel i7 1355U processor
irisx igpu

What browsers do you use to access the UI ?

Mozilla Firefox, Microsoft Edge

Console logs

none

Additional information

No response

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions