forked from AUTOMATIC1111/stable-diffusion-webui
-
Notifications
You must be signed in to change notification settings - Fork 52
Open
Description
Is there an existing issue for this?
- I have searched the existing issues and checked the recent builds/commits
What happened?
when running, after clicking generate, an error is thrown TypeError: Partitioner.__init__() missing 1 required positional argument: 'options' there is no documentation available for what the variable options should be from openvino.frontend.pytorch.torchdynamo.partition import Partitioner
Steps to reproduce the problem
- git clone automatic1111
- get script from openvinotoolkit/stable-diffusion-webui/scripts
- run webui-user.bat
What should have happened?
normal behaviour is running cl and compiling for my system.
Sysinfo
{
"Platform": "Windows-10-10.0.22621-SP0",
"Python": "3.10.6",
"Version": "v1.10.1",
"Commit": "82a973c04367123ae98bd9abdf80d9eda9b910e2",
"Git status": "On branch master\nYour branch is up to date with 'origin/master'.\n\nChanges not staged for commit:\n (use \"git add <file>...\" to update what will be committed)\n (use \"git restore <file>...\" to discard changes in working directory)\n\tmodified: webui-user.bat\n\nUntracked files:\n (use \"git add <file>...\" to include in what will be committed)\n\tscripts/openvino_accelerate.py\n\nno changes added to commit (use \"git add\" and/or \"git commit -a\")",
"Script path": "C:\\Users\\sd\\stable-diffusion-webui",
"Data path": "C:\\Users\\sd\\stable-diffusion-webui",
"Extensions dir": "C:\\Users\\sd\\stable-diffusion-webui\\extensions",
"Checksum": "caa8f3dfa8b6719a257c2fc91f58621139f0a499d8ffd879abff7e28ca27c089",
"Commandline": [
"launch.py",
"--skip-torch-cuda-test",
"--disable-safe-unpickle",
"--lowvram",
"--no-half"
],
"Torch env info": {
"torch_version": "2.5.0+cpu",
"is_debug_build": "False",
"cuda_compiled_version": null,
"gcc_version": null,
"clang_version": null,
"cmake_version": "version 3.31.0-rc2",
"os": "Microsoft Windows 11 Education",
"libc_version": "N/A",
"python_version": "3.10.6 (tags/v3.10.6:9c7b4bd, Aug 1 2022, 21:53:49) [MSC v.1932 64 bit (AMD64)] (64-bit runtime)",
"python_platform": "Windows-10-10.0.22621-SP0",
"is_cuda_available": "False",
"cuda_runtime_version": null,
"cuda_module_loading": "N/A",
"nvidia_driver_version": null,
"nvidia_gpu_models": null,
"cudnn_version": null,
"pip_version": "pip3",
"pip_packages": [
"numpy==1.26.2",
"onnx==1.17.0",
"onnxscript==0.1.0.dev20241021",
"open-clip-torch==2.20.0",
"pytorch-lightning==1.9.4",
"torch==2.5.0",
"torchaudio==2.5.0",
"torchdiffeq==0.2.3",
"torchmetrics==1.5.0",
"torchsde==0.2.6",
"torchvision==0.20.0"
],
"conda_packages": null,
"hip_compiled_version": "N/A",
"hip_runtime_version": "N/A",
"miopen_runtime_version": "N/A",
"caching_allocator_config": "",
"is_xnnpack_available": "True",
"cpu_info": [
"Architecture=9",
"CurrentClockSpeed=1300",
"DeviceID=CPU0",
"Family=205",
"L2CacheSize=6656",
"L2CacheSpeed=",
"Manufacturer=GenuineIntel",
"MaxClockSpeed=1300",
"Name=13th Gen Intel(R) Core(TM) i5-1335U",
"ProcessorType=3",
"Revision="
]
},
"Exceptions": [
{
"exception": "backend='openvino_fx' raised:\nCppCompileError: C++ compile error\n\nCommand:\ncl /I C:/Users/sd/AppData/Local/Programs/Python/Python310/Include /I C:/Users/sd/AppData/Local/Programs/Python/Python310/Include /I C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/include /I C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/include/torch/csrc/api/include /I C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/include/TH /I C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/include/THC /I C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/include /I C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/include/torch/csrc/api/include /I C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/include/TH /I C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/include/THC /D TORCH_INDUCTOR_CPP_WRAPPER /D C10_USING_CUSTOM_GENERATED_MACROS /DLL /MD /O2 /std:c++20 /wd4819 /wd4251 /wd4244 /wd4267 /wd4275 /wd4018 /wd4190 /wd4624 /wd4067 /wd4068 /EHsc /openmp /openmp:experimental C:/Users/sd/AppData/Local/Temp/torchinductor_sd/3r/c3raa7y7ci2b2udpb5l5gvgemgtjfzweilztxrjqx6uadg6f23nn.cpp /LD /FeC:/Users/sd/AppData/Local/Temp/torchinductor_sd/3r/c3raa7y7ci2b2udpb5l5gvgemgtjfzweilztxrjqx6uadg6f23nn.pyd /link /LIBPATH:C:/Users/sd/stable-diffusion-webui/venv/Scripts/libs /LIBPATH:C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/lib /LIBPATH:C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/lib /LIBPATH:C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/lib torch.lib torch_cpu.lib torch_python.lib sleef.lib c10.lib\n\nOutput:\nMicrosoft (R) C/C++ Optimizing Compiler Version 19.41.34123 for x64\r\nCopyright (C) Microsoft Corporation. All rights reserved.\r\n\r\ncl : Command line warning D9025 : overriding '/openmp' with '/openmp:experimental'\r\nc3raa7y7ci2b2udpb5l5gvgemgtjfzweilztxrjqx6uadg6f23nn.cpp\r\nC:/Users/sd/AppData/Local/Temp/torchinductor_sd/vu/cvuvp4i7roujum4xemrfwnb3t4c5t3r3mihr4b7iegh6tcqvdg43.h(3): fatal error C1083: Cannot open include file: 'algorithm': No such file or directory\r\n\n\nSet TORCH_LOGS=\"+dynamo\" and TORCHDYNAMO_VERBOSE=1 for more information\n\n\nYou can suppress this exception and fall back to eager by setting:\n import torch._dynamo\n torch._dynamo.config.suppress_errors = True\n",
"traceback": [
[
"C:\\Users\\sd\\stable-diffusion-webui\\modules\\call_queue.py, line 74, f",
"res = list(func(*args, **kwargs))"
],
[
"C:\\Users\\sd\\stable-diffusion-webui\\modules\\call_queue.py, line 53, f",
"res = func(*args, **kwargs)"
],
[
"C:\\Users\\sd\\stable-diffusion-webui\\modules\\call_queue.py, line 37, f",
"res = func(*args, **kwargs)"
],
[
"C:\\Users\\sd\\stable-diffusion-webui\\modules\\txt2img.py, line 106, txt2img",
"processed = modules.scripts.scripts_txt2img.run(p, *p.script_args)"
],
[
"C:\\Users\\sd\\stable-diffusion-webui\\modules\\scripts.py, line 780, run",
"processed = script.run(p, *script_args)"
],
[
"C:\\Users\\sd\\stable-diffusion-webui\\scripts\\openvino_accelerate.py, line 1276, run",
"processed = process_images_openvino(p, model_config, vae_ckpt, p.sampler_name, enable_caching, override_hires, upscaler, hires_steps, d_strength, openvino_device, mode, is_xl_ckpt, refiner_ckpt, refiner_frac)"
],
[
"C:\\Users\\sd\\stable-diffusion-webui\\scripts\\openvino_accelerate.py, line 998, process_images_openvino",
"output = shared.sd_diffusers_model("
],
[
"C:\\Users\\sd\\stable-diffusion-webui\\venv\\lib\\site-packages\\torch\\utils\\_contextlib.py, line 116, decorate_context",
"return func(*args, **kwargs)"
],
[
"C:\\Users\\sd\\stable-diffusion-webui\\venv\\lib\\site-packages\\diffusers\\pipelines\\stable_diffusion\\pipeline_stable_diffusion.py, line 1000, __call__",
"noise_pred = self.unet("
],
[
"C:\\Users\\sd\\stable-diffusion-webui\\venv\\lib\\site-packages\\torch\\nn\\modules\\module.py, line 1736, _wrapped_call_impl",
"return self._call_impl(*args, **kwargs)"
],
[
"C:\\Users\\sd\\stable-diffusion-webui\\venv\\lib\\site-packages\\torch\\nn\\modules\\module.py, line 1747, _call_impl",
"return forward_call(*args, **kwargs)"
],
[
"C:\\Users\\sd\\stable-diffusion-webui\\venv\\lib\\site-packages\\torch\\_dynamo\\eval_frame.py, line 465, _fn",
"return fn(*args, **kwargs)"
],
[
"C:\\Users\\sd\\stable-diffusion-webui\\venv\\lib\\site-packages\\torch\\nn\\modules\\module.py, line 1736, _wrapped_call_impl",
"return self._call_impl(*args, **kwargs)"
],
[
"C:\\Users\\sd\\stable-diffusion-webui\\venv\\lib\\site-packages\\torch\\nn\\modules\\module.py, line 1747, _call_impl",
"return forward_call(*args, **kwargs)"
],
[
"C:\\Users\\sd\\stable-diffusion-webui\\venv\\lib\\site-packages\\torch\\_dynamo\\convert_frame.py, line 1269, __call__",
"return self._torchdynamo_orig_callable("
],
[
"C:\\Users\\sd\\stable-diffusion-webui\\venv\\lib\\site-packages\\torch\\_dynamo\\convert_frame.py, line 1064, __call__",
"result = self._inner_convert("
],
[
"C:\\Users\\sd\\stable-diffusion-webui\\venv\\lib\\site-packages\\torch\\_dynamo\\convert_frame.py, line 526, __call__",
"return _compile("
],
[
"C:\\Users\\sd\\stable-diffusion-webui\\venv\\lib\\site-packages\\torch\\_dynamo\\convert_frame.py, line 924, _compile",
"guarded_code = compile_inner(code, one_graph, hooks, transform)"
],
[
"C:\\Users\\sd\\stable-diffusion-webui\\venv\\lib\\site-packages\\torch\\_dynamo\\convert_frame.py, line 666, compile_inner",
"return _compile_inner(code, one_graph, hooks, transform)"
],
[
"C:\\Users\\sd\\stable-diffusion-webui\\venv\\lib\\site-packages\\torch\\_utils_internal.py, line 87, wrapper_function",
"return function(*args, **kwargs)"
],
[
"C:\\Users\\sd\\stable-diffusion-webui\\venv\\lib\\site-packages\\torch\\_dynamo\\convert_frame.py, line 699, _compile_inner",
"out_code = transform_code_object(code, transform)"
],
[
"C:\\Users\\sd\\stable-diffusion-webui\\venv\\lib\\site-packages\\torch\\_dynamo\\bytecode_transformation.py, line 1322, transform_code_object",
"transformations(instructions, code_options)"
],
[
"C:\\Users\\sd\\stable-diffusion-webui\\venv\\lib\\site-packages\\torch\\_dynamo\\convert_frame.py, line 219, _fn",
"return fn(*args, **kwargs)"
],
[
"C:\\Users\\sd\\stable-diffusion-webui\\venv\\lib\\site-packages\\torch\\_dynamo\\convert_frame.py, line 634, transform",
"tracer.run()"
],
[
"C:\\Users\\sd\\stable-diffusion-webui\\venv\\lib\\site-packages\\torch\\_dynamo\\symbolic_convert.py, line 2796, run",
"super().run()"
],
[
"C:\\Users\\sd\\stable-diffusion-webui\\venv\\lib\\site-packages\\torch\\_dynamo\\symbolic_convert.py, line 983, run",
"while self.step():"
],
[
"C:\\Users\\sd\\stable-diffusion-webui\\venv\\lib\\site-packages\\torch\\_dynamo\\symbolic_convert.py, line 895, step",
"self.dispatch_table[inst.opcode](self, inst)"
],
[
"C:\\Users\\sd\\stable-diffusion-webui\\venv\\lib\\site-packages\\torch\\_dynamo\\symbolic_convert.py, line 2987, RETURN_VALUE",
"self._return(inst)"
],
[
"C:\\Users\\sd\\stable-diffusion-webui\\venv\\lib\\site-packages\\torch\\_dynamo\\symbolic_convert.py, line 2972, _return",
"self.output.compile_subgraph("
],
[
"C:\\Users\\sd\\stable-diffusion-webui\\venv\\lib\\site-packages\\torch\\_dynamo\\output_graph.py, line 1142, compile_subgraph",
"self.compile_and_call_fx_graph(tx, pass2.graph_output_vars(), root)"
],
[
"C:\\Users\\sd\\stable-diffusion-webui\\venv\\lib\\site-packages\\torch\\_dynamo\\output_graph.py, line 1369, compile_and_call_fx_graph",
"compiled_fn = self.call_user_compiler(gm)"
],
[
"C:\\Users\\sd\\stable-diffusion-webui\\venv\\lib\\site-packages\\torch\\_dynamo\\output_graph.py, line 1416, call_user_compiler",
"return self._call_user_compiler(gm)"
],
[
"C:\\Users\\sd\\stable-diffusion-webui\\venv\\lib\\site-packages\\torch\\_dynamo\\output_graph.py, line 1465, _call_user_compiler",
"raise BackendCompilerFailed(self.compiler_fn, e) from e"
]
]
}
],
"CPU": {
"model": "Intel64 Family 6 Model 186 Stepping 3, GenuineIntel",
"count logical": 12,
"count physical": 10
},
"RAM": {
"total": "16GB",
"used": "13GB",
"free": "2GB"
},
"Extensions": [],
"Inactive extensions": [],
"Environment": {
"COMMANDLINE_ARGS": " --skip-torch-cuda-test --disable-safe-unpickle --lowvram --no-half ",
"GRADIO_ANALYTICS_ENABLED": "False"
},
"Config": {
"ldsr_steps": 100,
"ldsr_cached": false,
"SCUNET_tile": 256,
"SCUNET_tile_overlap": 8,
"SWIN_tile": 192,
"SWIN_tile_overlap": 8,
"SWIN_torch_compile": false,
"hypertile_enable_unet": false,
"hypertile_enable_unet_secondpass": false,
"hypertile_max_depth_unet": 3,
"hypertile_max_tile_unet": 256,
"hypertile_swap_size_unet": 3,
"hypertile_enable_vae": false,
"hypertile_max_depth_vae": 3,
"hypertile_max_tile_vae": 128,
"hypertile_swap_size_vae": 3,
"sd_model_checkpoint": "v1-5-pruned-emaonly.safetensors [6ce0161689]",
"sd_checkpoint_hash": "6ce0161689b3853acaa03779ec93eafe75a02f4ced659bee03f50797806fa2fa"
},
"Startup": {
"total": 15.842720031738281,
"records": {
"initial startup": 0.026000022888183594,
"prepare environment/checks": 0.045052528381347656,
"prepare environment/git version info": 0.13603925704956055,
"prepare environment/torch GPU test": 0.004999637603759766,
"prepare environment/clone repositores": 0.4704570770263672,
"prepare environment/run extensions installers": 0.0,
"prepare environment": 0.6980910301208496,
"launcher": 0.003002166748046875,
"import torch": 5.952870845794678,
"import gradio": 1.2811486721038818,
"setup paths": 2.0488767623901367,
"import ldm": 0.012828826904296875,
"import sgm": 0.0,
"initialize shared": 0.30194902420043945,
"other imports": 1.060591697692871,
"opts onchange": 0.0,
"setup SD model": 0.0,
"setup codeformer": 0.0047686100006103516,
"setup gfpgan": 0.03389167785644531,
"set samplers": 0.0009829998016357422,
"list extensions": 0.002016305923461914,
"restore config state file": 0.0,
"list SD models": 0.07024502754211426,
"list localizations": 0.0010001659393310547,
"load scripts/custom_code.py": 0.01031351089477539,
"load scripts/img2imgalt.py": 0.0020189285278320312,
"load scripts/loopback.py": 0.0029997825622558594,
"load scripts/openvino_accelerate.py": 1.4915974140167236,
"load scripts/outpainting_mk_2.py": 0.0019845962524414062,
"load scripts/poor_mans_outpainting.py": 0.0,
"load scripts/postprocessing_codeformer.py": 0.0009987354278564453,
"load scripts/postprocessing_gfpgan.py": 0.0010001659393310547,
"load scripts/postprocessing_upscale.py": 0.0020189285278320312,
"load scripts/prompt_matrix.py": 0.0,
"load scripts/prompts_from_file.py": 0.0009989738464355469,
"load scripts/sd_upscale.py": 0.0009829998016357422,
"load scripts/xyz_grid.py": 0.0040149688720703125,
"load scripts/ldsr_model.py": 0.42688918113708496,
"load scripts/lora_script.py": 0.250852108001709,
"load scripts/scunet_model.py": 0.052355289459228516,
"load scripts/swinir_model.py": 0.04503130912780762,
"load scripts/hotkey_config.py": 0.002061128616333008,
"load scripts/extra_options_section.py": 0.0009224414825439453,
"load scripts/hypertile_script.py": 0.08260941505432129,
"load scripts/postprocessing_autosized_crop.py": 0.0025320053100585938,
"load scripts/postprocessing_caption.py": 0.002012491226196289,
"load scripts/postprocessing_create_flipped_copies.py": 0.0019867420196533203,
"load scripts/postprocessing_focal_crop.py": 0.003997087478637695,
"load scripts/postprocessing_split_oversized.py": 0.0019998550415039062,
"load scripts/soft_inpainting.py": 0.0020170211791992188,
"load scripts/comments.py": 0.04083371162414551,
"load scripts/refiner.py": 0.0015385150909423828,
"load scripts/sampler.py": 0.0009698867797851562,
"load scripts/seed.py": 0.0010006427764892578,
"load scripts": 2.438537836074829,
"load upscalers": 0.007005453109741211,
"refresh VAE": 0.0020325183868408203,
"refresh textual inversion templates": 0.0009853839874267578,
"scripts list_optimizers": 0.0022830963134765625,
"scripts list_unets": 0.0,
"reload hypernetworks": 0.0,
"initialize extra networks": 0.024051666259765625,
"scripts before_ui_callback": 0.0012240409851074219,
"create ui": 1.4283111095428467,
"gradio launch": 0.46705007553100586,
"add APIs": 0.014517545700073242,
"app_started_callback/lora_script.py": 0.0,
"app_started_callback": 0.0
}
},
"Packages": [
"accelerate==0.21.0",
"aenum==3.1.15",
"aiofiles==23.2.1",
"aiohappyeyeballs==2.4.3",
"aiohttp==3.10.10",
"aiosignal==1.3.1",
"altair==5.4.1",
"antlr4-python3-runtime==4.9.3",
"anyio==3.7.1",
"async-timeout==4.0.3",
"attrs==24.2.0",
"blendmodes==2022",
"certifi==2024.8.30",
"charset-normalizer==3.4.0",
"clean-fid==0.1.35",
"click==8.1.7",
"clip @ https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip#sha256=b5842c25da441d6c581b53a5c60e0c2127ebafe0f746f8e15561a006c6c3be6a",
"colorama==0.4.6",
"contourpy==1.3.0",
"cycler==0.12.1",
"deprecation==2.1.0",
"diffusers==0.30.3",
"diskcache==5.6.3",
"einops==0.4.1",
"exceptiongroup==1.2.2",
"facexlib==0.3.0",
"fastapi==0.94.0",
"ffmpy==0.4.0",
"filelock==3.16.1",
"filterpy==1.4.5",
"fonttools==4.54.1",
"frozenlist==1.4.1",
"fsspec==2024.10.0",
"ftfy==6.3.0",
"gitdb==4.0.11",
"GitPython==3.1.32",
"gradio==3.41.2",
"gradio_client==0.5.0",
"h11==0.12.0",
"httpcore==0.15.0",
"httpx==0.24.1",
"huggingface-hub==0.26.1",
"idna==3.10",
"imageio==2.36.0",
"importlib_metadata==8.5.0",
"importlib_resources==6.4.5",
"inflection==0.5.1",
"Jinja2==3.1.4",
"jsonmerge==1.8.0",
"jsonschema==4.23.0",
"jsonschema-specifications==2024.10.1",
"kiwisolver==1.4.7",
"kornia==0.6.7",
"lark==1.1.2",
"lazy_loader==0.4",
"lightning-utilities==0.11.8",
"llvmlite==0.43.0",
"MarkupSafe==2.1.5",
"matplotlib==3.9.2",
"ml_dtypes==0.5.0",
"mpmath==1.3.0",
"multidict==6.1.0",
"narwhals==1.10.0",
"networkx==3.4.2",
"numba==0.60.0",
"numpy==1.26.2",
"omegaconf==2.2.3",
"onnx==1.17.0",
"onnxscript==0.1.0.dev20241021",
"open-clip-torch==2.20.0",
"opencv-python==4.10.0.84",
"openvino==2024.4.0",
"openvino-telemetry==2024.1.0",
"orjson==3.10.9",
"packaging==24.1",
"pandas==2.2.3",
"piexif==1.1.3",
"Pillow==9.5.0",
"pillow-avif-plugin==1.4.3",
"pip==24.2",
"propcache==0.2.0",
"protobuf==3.20.0",
"psutil==5.9.5",
"pydantic==1.10.18",
"pydub==0.25.1",
"pyparsing==3.2.0",
"python-dateutil==2.9.0.post0",
"python-multipart==0.0.12",
"pytorch-lightning==1.9.4",
"pytz==2024.2",
"PyWavelets==1.7.0",
"PyYAML==6.0.2",
"referencing==0.35.1",
"regex==2024.9.11",
"requests==2.32.3",
"resize-right==0.0.2",
"rpds-py==0.20.0",
"safetensors==0.4.2",
"scikit-image==0.21.0",
"scipy==1.14.1",
"semantic-version==2.10.0",
"sentencepiece==0.2.0",
"setuptools==69.5.1",
"six==1.16.0",
"smmap==5.0.1",
"sniffio==1.3.1",
"spandrel==0.3.4",
"spandrel_extra_arches==0.1.1",
"starlette==0.26.1",
"sympy==1.13.1",
"tifffile==2024.9.20",
"timm==1.0.11",
"tokenizers==0.13.3",
"tomesd==0.1.3",
"torch==2.5.0",
"torchaudio==2.5.0",
"torchdiffeq==0.2.3",
"torchmetrics==1.5.0",
"torchsde==0.2.6",
"torchvision==0.20.0",
"tqdm==4.66.5",
"trampoline==0.1.2",
"transformers==4.30.2",
"typing_extensions==4.12.2",
"tzdata==2024.2",
"urllib3==2.2.3",
"uvicorn==0.32.0",
"wcwidth==0.2.13",
"websockets==11.0.3",
"yarl==1.15.5",
"zipp==3.20.2"
]
}What browsers do you use to access the UI ?
Mozilla Firefox
Console logs
PS C:\Users\sd\stable-diffusion-webui> .\webui-user.bat
venv "C:\Users\sd\stable-diffusion-webui\venv\Scripts\Python.exe"
Python 3.10.6 (tags/v3.10.6:9c7b4bd, Aug 1 2022, 21:53:49) [MSC v.1932 64 bit (AMD64)]
Version: v1.10.1
Commit hash: 82a973c04367123ae98bd9abdf80d9eda9b910e2
Launching Web UI with arguments: --skip-torch-cuda-test --disable-safe-unpickle --lowvram --no-half
C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\timm\models\layers\__init__.py:48: FutureWarning: Importing from timm.models.layers is deprecated, please import via timm.layers
warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.layers", FutureWarning)
no module 'xformers'. Processing without...
no module 'xformers'. Processing without...
No module 'xformers'. Proceeding without it.
Warning: caught exception 'Torch not compiled with CUDA enabled', memory monitor disabled
Loading weights [6ce0161689] from C:\Users\sd\stable-diffusion-webui\models\Stable-diffusion\v1-5-pruned-emaonly.safetensors
Creating model from config: C:\Users\sd\stable-diffusion-webui\configs\v1-inference.yaml
Running on local URL: http://127.0.0.1:7860
To create a public link, set `share=True` in `launch()`.
Startup time: 14.9s (prepare environment: 0.6s, import torch: 4.9s, import gradio: 1.1s, setup paths: 1.1s, initialize shared: 0.3s, other imports: 1.1s, load scripts: 3.2s, create ui: 1.8s, gradio launch: 0.5s).
Applying attention optimization: InvokeAI... done.
Model loaded in 8.3s (load weights from disk: 1.7s, create model: 0.8s, apply weights to model: 5.5s, calculate empty prompt: 0.2s).
{}
Loading weights [6ce0161689] from C:\Users\sd\stable-diffusion-webui\models\Stable-diffusion\v1-5-pruned-emaonly.safetensors
OpenVINO Script: created model from config : C:\Users\sd\stable-diffusion-webui\configs\v1-inference.yaml
Fetching 11 files: 100%|███████████████████████████████████████████████████████████████████████| 11/11 [00:00<?, ?it/s]
Loading pipeline components...: 100%|████████████████████████████████████████████████████| 6/6 [00:00<00:00, 14.79it/s]
You have disabled the safety checker for <class 'diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline'> by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .
0%| | 0/20 [00:00<?, ?it/s]Partitioner.__init__() missing 1 required positional argument: 'options'
0%| | 0/20 [00:45<?, ?it/s]
*** Error completing request
*** Arguments: ('task(rkxbdpf2rvvwm1j)', <gradio.routes.Request object at 0x00000146C7083AF0>, 'a cat', '', [], 1, 1, 7, 512, 512, False, 0.7, 2, 'Latent', 0, 0, 0, 'Use same checkpoint', 'Use same sampler', 'Use same scheduler', '', '', [], 1, 20, 'DPM++ 2M', 'Automatic', False, '', 0.8, -1, False, -1, 0, 0, 0, 'None', 'None', 'CPU', True, 'Euler a', True, False, 'Latent', 10, 0.5, False, 'None', 0.8, False, False, 'positive', 'comma', 0, False, False, 'start', '', 1, '', [], 0, '', [], 0, '', [], True, False, False, False, False, False, False, 0, False) {}
Traceback (most recent call last):
File "C:\Users\sd\stable-diffusion-webui\scripts\openvino_accelerate.py", line 218, in openvino_fx
partitioner = Partitioner()
TypeError: Partitioner.__init__() missing 1 required positional argument: 'options'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_inductor\cpp_builder.py", line 331, in _run_compile_cmd
status = subprocess.check_output(args=cmd, cwd=cwd, stderr=subprocess.STDOUT)
File "C:\Users\sd\AppData\Local\Programs\Python\Python310\lib\subprocess.py", line 420, in check_output
return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,
File "C:\Users\sd\AppData\Local\Programs\Python\Python310\lib\subprocess.py", line 524, in run
raise CalledProcessError(retcode, process.args,
subprocess.CalledProcessError: Command '['cl', '/I', 'C:/Users/sd/AppData/Local/Programs/Python/Python310/Include', '/I', 'C:/Users/sd/AppData/Local/Programs/Python/Python310/Include', '/I', 'C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/include', '/I', 'C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/include/torch/csrc/api/include', '/I', 'C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/include/TH', '/I', 'C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/include/THC', '/I', 'C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/include', '/I', 'C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/include/torch/csrc/api/include', '/I', 'C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/include/TH', '/I', 'C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/include/THC', '/D', 'TORCH_INDUCTOR_CPP_WRAPPER', '/D', 'C10_USING_CUSTOM_GENERATED_MACROS', '/DLL', '/MD', '/O2', '/std:c++20', '/wd4819', '/wd4251', '/wd4244', '/wd4267', '/wd4275', '/wd4018', '/wd4190', '/wd4624', '/wd4067', '/wd4068', '/EHsc', '/openmp', '/openmp:experimental', 'C:/Users/sd/AppData/Local/Temp/torchinductor_sd/3r/c3raa7y7ci2b2udpb5l5gvgemgtjfzweilztxrjqx6uadg6f23nn.cpp', '/LD', '/FeC:/Users/sd/AppData/Local/Temp/torchinductor_sd/3r/c3raa7y7ci2b2udpb5l5gvgemgtjfzweilztxrjqx6uadg6f23nn.pyd', '/link', '/LIBPATH:C:/Users/sd/stable-diffusion-webui/venv/Scripts/libs', '/LIBPATH:C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/lib', '/LIBPATH:C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/lib', '/LIBPATH:C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/lib', 'torch.lib', 'torch_cpu.lib', 'torch_python.lib', 'sleef.lib', 'c10.lib']' returned non-zero exit status 2.
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_dynamo\output_graph.py", line 1446, in _call_user_compiler
compiled_fn = compiler_fn(gm, self.example_inputs())
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_dynamo\repro\after_dynamo.py", line 129, in __call__
compiled_gm = compiler_fn(gm, example_inputs)
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\__init__.py", line 2280, in __call__
return self.compiler_fn(model_, inputs_, **self.kwargs)
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_dynamo\backends\common.py", line 114, in wrapper
return fn(model, inputs, **kwargs)
File "C:\Users\sd\stable-diffusion-webui\scripts\openvino_accelerate.py", line 234, in openvino_fx
return compile_fx(subgraph, example_inputs)
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_inductor\compile_fx.py", line 1521, in compile_fx
return aot_autograd(
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_dynamo\backends\common.py", line 72, in __call__
cg = aot_module_simplified(gm, example_inputs, **self.kwargs)
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_functorch\aot_autograd.py", line 1071, in aot_module_simplified
compiled_fn = dispatch_and_compile()
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_functorch\aot_autograd.py", line 1056, in dispatch_and_compile
compiled_fn, _ = create_aot_dispatcher_function(
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_functorch\aot_autograd.py", line 522, in create_aot_dispatcher_function
return _create_aot_dispatcher_function(
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_functorch\aot_autograd.py", line 759, in _create_aot_dispatcher_function
compiled_fn, fw_metadata = compiler_fn(
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_functorch\_aot_autograd\jit_compile_runtime_wrappers.py", line 179, in aot_dispatch_base
compiled_fw = compiler(fw_module, updated_flat_args)
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_inductor\compile_fx.py", line 1350, in fw_compiler_base
return _fw_compiler_base(model, example_inputs, is_inference)
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_inductor\compile_fx.py", line 1421, in _fw_compiler_base
return inner_compile(
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_inductor\compile_fx.py", line 475, in compile_fx_inner
return wrap_compiler_debug(_compile_fx_inner, compiler_name="inductor")(
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_dynamo\repro\after_aot.py", line 85, in debug_wrapper
inner_compiled_fn = compiler_fn(gm, example_inputs)
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_inductor\compile_fx.py", line 661, in _compile_fx_inner
compiled_graph = FxGraphCache.load(
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_inductor\codecache.py", line 1334, in load
compiled_graph = compile_fx_fn(
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_inductor\compile_fx.py", line 570, in codegen_and_compile
compiled_graph = fx_codegen_and_compile(gm, example_inputs, **fx_kwargs)
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_inductor\compile_fx.py", line 878, in fx_codegen_and_compile
compiled_fn = graph.compile_to_fn()
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_inductor\graph.py", line 1913, in compile_to_fn
return self.compile_to_module().call
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_inductor\graph.py", line 1839, in compile_to_module
return self._compile_to_module()
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_inductor\graph.py", line 1867, in _compile_to_module
mod = PyCodeCache.load_by_key_path(
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_inductor\codecache.py", line 2876, in load_by_key_path
mod = _reload_python_module(key, path)
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_inductor\runtime\compile_tasks.py", line 45, in _reload_python_module
exec(code, mod.__dict__, mod.__dict__)
File "C:\Users\sd\AppData\Local\Temp\torchinductor_sd\l3\cl3t6zhxu5vljeh26k6hryshvukmbnomb63tksftg7nzziteixrh.py", line 29, in <module>
cpp_fused_convolution_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'float*', 'float*'], '''
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_inductor\async_compile.py", line 223, in cpp_pybinding
return CppPythonBindingsCodeCache.load_pybinding(argtypes, source_code)
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_inductor\codecache.py", line 2385, in load_pybinding
return cls.load_pybinding_async(*args, **kwargs)()
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_inductor\codecache.py", line 2377, in future
result = get_result()
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_inductor\codecache.py", line 2178, in load_fn
result = worker_fn()
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_inductor\codecache.py", line 2218, in _worker_compile_cpp
cpp_builder.build()
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_inductor\cpp_builder.py", line 1508, in build
status = run_compile_cmd(build_cmd, cwd=_build_tmp_dir)
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_inductor\cpp_builder.py", line 352, in run_compile_cmd
return _run_compile_cmd(cmd_line, cwd)
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_inductor\cpp_builder.py", line 346, in _run_compile_cmd
raise exc.CppCompileError(cmd, output) from e
torch._inductor.exc.CppCompileError: C++ compile error
Command:
cl /I C:/Users/sd/AppData/Local/Programs/Python/Python310/Include /I C:/Users/sd/AppData/Local/Programs/Python/Python310/Include /I C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/include /I C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/include/torch/csrc/api/include /I C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/include/TH /I C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/include/THC /I C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/include /I C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/include/torch/csrc/api/include /I C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/include/TH /I C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/include/THC /D TORCH_INDUCTOR_CPP_WRAPPER /D C10_USING_CUSTOM_GENERATED_MACROS /DLL /MD /O2 /std:c++20 /wd4819 /wd4251 /wd4244 /wd4267 /wd4275 /wd4018 /wd4190 /wd4624 /wd4067 /wd4068 /EHsc /openmp /openmp:experimental C:/Users/sd/AppData/Local/Temp/torchinductor_sd/3r/c3raa7y7ci2b2udpb5l5gvgemgtjfzweilztxrjqx6uadg6f23nn.cpp /LD /FeC:/Users/sd/AppData/Local/Temp/torchinductor_sd/3r/c3raa7y7ci2b2udpb5l5gvgemgtjfzweilztxrjqx6uadg6f23nn.pyd /link /LIBPATH:C:/Users/sd/stable-diffusion-webui/venv/Scripts/libs /LIBPATH:C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/lib /LIBPATH:C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/lib /LIBPATH:C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/lib torch.lib torch_cpu.lib torch_python.lib sleef.lib c10.lib
Output:
Microsoft (R) C/C++ Optimizing Compiler Version 19.41.34123 for x64
Copyright (C) Microsoft Corporation. All rights reserved.
cl : Command line warning D9025 : overriding '/openmp' with '/openmp:experimental'
c3raa7y7ci2b2udpb5l5gvgemgtjfzweilztxrjqx6uadg6f23nn.cpp
C:/Users/sd/AppData/Local/Temp/torchinductor_sd/vu/cvuvp4i7roujum4xemrfwnb3t4c5t3r3mihr4b7iegh6tcqvdg43.h(3): fatal error C1083: Cannot open include file: 'algorithm': No such file or directory
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "C:\Users\sd\stable-diffusion-webui\modules\call_queue.py", line 74, in f
res = list(func(*args, **kwargs))
File "C:\Users\sd\stable-diffusion-webui\modules\call_queue.py", line 53, in f
res = func(*args, **kwargs)
File "C:\Users\sd\stable-diffusion-webui\modules\call_queue.py", line 37, in f
res = func(*args, **kwargs)
File "C:\Users\sd\stable-diffusion-webui\modules\txt2img.py", line 106, in txt2img
processed = modules.scripts.scripts_txt2img.run(p, *p.script_args)
File "C:\Users\sd\stable-diffusion-webui\modules\scripts.py", line 780, in run
processed = script.run(p, *script_args)
File "C:\Users\sd\stable-diffusion-webui\scripts\openvino_accelerate.py", line 1276, in run
processed = process_images_openvino(p, model_config, vae_ckpt, p.sampler_name, enable_caching, override_hires, upscaler, hires_steps, d_strength, openvino_device, mode, is_xl_ckpt, refiner_ckpt, refiner_frac)
File "C:\Users\sd\stable-diffusion-webui\scripts\openvino_accelerate.py", line 998, in process_images_openvino
output = shared.sd_diffusers_model(
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\utils\_contextlib.py", line 116, in decorate_context
return func(*args, **kwargs)
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\diffusers\pipelines\stable_diffusion\pipeline_stable_diffusion.py", line 1000, in __call__
noise_pred = self.unet(
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1736, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1747, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_dynamo\eval_frame.py", line 465, in _fn
return fn(*args, **kwargs)
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1736, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1747, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_dynamo\convert_frame.py", line 1269, in __call__
return self._torchdynamo_orig_callable(
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_dynamo\convert_frame.py", line 1064, in __call__
result = self._inner_convert(
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_dynamo\convert_frame.py", line 526, in __call__
return _compile(
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_dynamo\convert_frame.py", line 924, in _compile
guarded_code = compile_inner(code, one_graph, hooks, transform)
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_dynamo\convert_frame.py", line 666, in compile_inner
return _compile_inner(code, one_graph, hooks, transform)
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_utils_internal.py", line 87, in wrapper_function
return function(*args, **kwargs)
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_dynamo\convert_frame.py", line 699, in _compile_inner
out_code = transform_code_object(code, transform)
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_dynamo\bytecode_transformation.py", line 1322, in transform_code_object
transformations(instructions, code_options)
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_dynamo\convert_frame.py", line 219, in _fn
return fn(*args, **kwargs)
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_dynamo\convert_frame.py", line 634, in transform
tracer.run()
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_dynamo\symbolic_convert.py", line 2796, in run
super().run()
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_dynamo\symbolic_convert.py", line 983, in run
while self.step():
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_dynamo\symbolic_convert.py", line 895, in step
self.dispatch_table[inst.opcode](self, inst)
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_dynamo\symbolic_convert.py", line 2987, in RETURN_VALUE
self._return(inst)
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_dynamo\symbolic_convert.py", line 2972, in _return
self.output.compile_subgraph(
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_dynamo\output_graph.py", line 1142, in compile_subgraph
self.compile_and_call_fx_graph(tx, pass2.graph_output_vars(), root)
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_dynamo\output_graph.py", line 1369, in compile_and_call_fx_graph
compiled_fn = self.call_user_compiler(gm)
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_dynamo\output_graph.py", line 1416, in call_user_compiler
return self._call_user_compiler(gm)
File "C:\Users\sd\stable-diffusion-webui\venv\lib\site-packages\torch\_dynamo\output_graph.py", line 1465, in _call_user_compiler
raise BackendCompilerFailed(self.compiler_fn, e) from e
torch._dynamo.exc.BackendCompilerFailed: backend='openvino_fx' raised:
CppCompileError: C++ compile error
Command:
cl /I C:/Users/sd/AppData/Local/Programs/Python/Python310/Include /I C:/Users/sd/AppData/Local/Programs/Python/Python310/Include /I C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/include /I C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/include/torch/csrc/api/include /I C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/include/TH /I C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/include/THC /I C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/include /I C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/include/torch/csrc/api/include /I C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/include/TH /I C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/include/THC /D TORCH_INDUCTOR_CPP_WRAPPER /D C10_USING_CUSTOM_GENERATED_MACROS /DLL /MD /O2 /std:c++20 /wd4819 /wd4251 /wd4244 /wd4267 /wd4275 /wd4018 /wd4190 /wd4624 /wd4067 /wd4068 /EHsc /openmp /openmp:experimental C:/Users/sd/AppData/Local/Temp/torchinductor_sd/3r/c3raa7y7ci2b2udpb5l5gvgemgtjfzweilztxrjqx6uadg6f23nn.cpp /LD /FeC:/Users/sd/AppData/Local/Temp/torchinductor_sd/3r/c3raa7y7ci2b2udpb5l5gvgemgtjfzweilztxrjqx6uadg6f23nn.pyd /link /LIBPATH:C:/Users/sd/stable-diffusion-webui/venv/Scripts/libs /LIBPATH:C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/lib /LIBPATH:C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/lib /LIBPATH:C:/Users/sd/stable-diffusion-webui/venv/lib/site-packages/torch/lib torch.lib torch_cpu.lib torch_python.lib sleef.lib c10.lib
Output:
Microsoft (R) C/C++ Optimizing Compiler Version 19.41.34123 for x64
Copyright (C) Microsoft Corporation. All rights reserved.
cl : Command line warning D9025 : overriding '/openmp' with '/openmp:experimental'
c3raa7y7ci2b2udpb5l5gvgemgtjfzweilztxrjqx6uadg6f23nn.cpp
C:/Users/sd/AppData/Local/Temp/torchinductor_sd/vu/cvuvp4i7roujum4xemrfwnb3t4c5t3r3mihr4b7iegh6tcqvdg43.h(3): fatal error C1083: Cannot open include file: 'algorithm': No such file or directory
Set TORCH_LOGS="+dynamo" and TORCHDYNAMO_VERBOSE=1 for more information
You can suppress this exception and fall back to eager by setting:
import torch._dynamo
torch._dynamo.config.suppress_errors = TrueAdditional information
No response
Metadata
Metadata
Assignees
Labels
No labels