Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
S
Stable Diffusion Webui
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Locked Files
Issues
0
Issues
0
List
Boards
Labels
Service Desk
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Security & Compliance
Security & Compliance
Dependency List
License Compliance
Packages
Packages
List
Container Registry
Analytics
Analytics
CI / CD
Code Review
Insights
Issues
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
novelai-storage
Stable Diffusion Webui
Commits
8b40f475
Commit
8b40f475
authored
Nov 10, 2023
by
Nuullll
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Initial IPEX support
parent
f0f100e6
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
51 additions
and
2 deletions
+51
-2
modules/devices.py
modules/devices.py
+9
-2
modules/xpu_specific.py
modules/xpu_specific.py
+42
-0
No files found.
modules/devices.py
View file @
8b40f475
...
@@ -3,7 +3,7 @@ import contextlib
...
@@ -3,7 +3,7 @@ import contextlib
from
functools
import
lru_cache
from
functools
import
lru_cache
import
torch
import
torch
from
modules
import
errors
,
shared
from
modules
import
errors
,
shared
,
xpu_specific
if
sys
.
platform
==
"darwin"
:
if
sys
.
platform
==
"darwin"
:
from
modules
import
mac_specific
from
modules
import
mac_specific
...
@@ -30,6 +30,9 @@ def get_optimal_device_name():
...
@@ -30,6 +30,9 @@ def get_optimal_device_name():
if
has_mps
():
if
has_mps
():
return
"mps"
return
"mps"
if
xpu_specific
.
has_ipex
:
return
xpu_specific
.
get_xpu_device_string
()
return
"cpu"
return
"cpu"
...
@@ -100,11 +103,15 @@ def autocast(disable=False):
...
@@ -100,11 +103,15 @@ def autocast(disable=False):
if
dtype
==
torch
.
float32
or
shared
.
cmd_opts
.
precision
==
"full"
:
if
dtype
==
torch
.
float32
or
shared
.
cmd_opts
.
precision
==
"full"
:
return
contextlib
.
nullcontext
()
return
contextlib
.
nullcontext
()
if
xpu_specific
.
has_xpu
:
return
torch
.
autocast
(
"xpu"
)
return
torch
.
autocast
(
"cuda"
)
return
torch
.
autocast
(
"cuda"
)
def
without_autocast
(
disable
=
False
):
def
without_autocast
(
disable
=
False
):
return
torch
.
autocast
(
"cuda"
,
enabled
=
False
)
if
torch
.
is_autocast_enabled
()
and
not
disable
else
contextlib
.
nullcontext
()
device_type
=
"xpu"
if
xpu_specific
.
has_xpu
else
"cuda"
return
torch
.
autocast
(
device_type
,
enabled
=
False
)
if
torch
.
is_autocast_enabled
()
and
not
disable
else
contextlib
.
nullcontext
()
class
NansException
(
Exception
):
class
NansException
(
Exception
):
...
...
modules/xpu_specific.py
0 → 100644
View file @
8b40f475
import
contextlib
from
modules
import
shared
from
modules.sd_hijack_utils
import
CondFunc
has_ipex
=
False
try
:
import
torch
import
intel_extension_for_pytorch
as
ipex
has_ipex
=
True
except
Exception
:
pass
def
check_for_xpu
():
if
not
has_ipex
:
return
False
return
hasattr
(
torch
,
'xpu'
)
and
torch
.
xpu
.
is_available
()
has_xpu
=
check_for_xpu
()
def
get_xpu_device_string
():
if
shared
.
cmd_opts
.
device_id
is
not
None
:
return
f
"xpu:{shared.cmd_opts.device_id}"
return
"xpu"
def
return_null_context
(
*
args
,
**
kwargs
):
# pylint: disable=unused-argument
return
contextlib
.
nullcontext
()
if
has_xpu
:
CondFunc
(
'torch.Generator'
,
lambda
orig_func
,
device
=
None
:
torch
.
xpu
.
Generator
(
device
),
lambda
orig_func
,
device
=
None
:
device
is
not
None
and
device
!=
torch
.
device
(
"cpu"
)
and
device
!=
"cpu"
)
CondFunc
(
'torch.nn.functional.layer_norm'
,
lambda
orig_func
,
input
,
normalized_shape
=
None
,
weight
=
None
,
*
args
,
**
kwargs
:
orig_func
(
input
.
to
(
weight
.
data
.
dtype
),
normalized_shape
,
weight
,
*
args
,
**
kwargs
),
lambda
orig_func
,
input
,
normalized_shape
=
None
,
weight
=
None
,
*
args
,
**
kwargs
:
weight
is
not
None
and
input
.
dtype
!=
weight
.
data
.
dtype
)
CondFunc
(
'torch.nn.modules.GroupNorm.forward'
,
lambda
orig_func
,
self
,
input
:
orig_func
(
self
,
input
.
to
(
self
.
weight
.
data
.
dtype
)),
lambda
orig_func
,
self
,
input
:
input
.
dtype
!=
self
.
weight
.
data
.
dtype
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment