Skip to content

Commit

Permalink
Starting point for MacOS development
Browse files Browse the repository at this point in the history
-- Do not include in feature merging! --
  • Loading branch information
marijnvg-tng committed Nov 28, 2024
1 parent 6e64bd1 commit e3a6dec
Show file tree
Hide file tree
Showing 12 changed files with 46 additions and 40 deletions.
2 changes: 1 addition & 1 deletion WebUI/electron/main.ts
Original file line number Diff line number Diff line change
Expand Up @@ -577,7 +577,7 @@ function wakeupApiService() {
const wordkDir = path.resolve(app.isPackaged ? path.join(process.resourcesPath, "service") : path.join(__dirname, "../../../service"));
const comfyWordkDir = path.resolve(app.isPackaged ? path.join(process.resourcesPath, "ComfyUI") : path.join(__dirname, "../../../ComfyUI"));
const baseDir = app.isPackaged ? process.resourcesPath : path.join(__dirname, "../../../");
const pythonExe = path.resolve(path.join(baseDir, "env/python.exe"));
const pythonExe = path.resolve(path.join(baseDir, "env/bin/python"));
const additionalEnvVariables = {
"SYCL_ENABLE_DEFAULT_CONTEXTS": "1",
"SYCL_CACHE_PERSISTENT": "1",
Expand Down
10 changes: 5 additions & 5 deletions WebUI/src/assets/js/store/globalSetup.ts
Original file line number Diff line number Diff line change
Expand Up @@ -112,10 +112,10 @@ export const useGlobalSetup = defineStore("globalSetup", () => {
}
}
await reloadGraphics();
if (graphicsList.value.length == 0) {
await window.electronAPI.showMessageBoxSync({ message: useI18N().state.ERROR_UNFOUND_GRAPHICS, title: "error", icon: "error" });
window.electronAPI.exitApp();
}
// if (graphicsList.value.length == 0) {
// await window.electronAPI.showMessageBoxSync({ message: useI18N().state.ERROR_UNFOUND_GRAPHICS, title: "error", icon: "error" });
// window.electronAPI.exitApp();
// }
await loadUserSettings();
loadingState.value = "running";
}
Expand Down Expand Up @@ -264,7 +264,7 @@ export const useGlobalSetup = defineStore("globalSetup", () => {
modelSettings.lora = models.value.lora[0];
changeUserSetup = true;
}
if (!graphicsList.value.find(item => item.index == modelSettings.graphics)) {
if (!graphicsList.value.find(item => item.index == modelSettings.graphics) && graphicsList.value.length != 0) {
modelSettings.graphics = graphicsList.value[0].index;
}
if (changeUserSetup) {
Expand Down
12 changes: 11 additions & 1 deletion service/aipg_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,13 +201,23 @@ def get_ESRGAN_size():


def get_support_graphics(env_type: str):
import re
import model_config

device_count = torch.xpu.device_count()
print('xpu device_count:', device_count)
model_config.env_type = env_type
graphics = list()
for i in range(device_count):
device_name = torch.xpu.get_device_name(i)
# if device_name == "Intel(R) Arc(TM) Graphics" or re.search("Intel\(R\) Arc\(TM\)", device_name) is not None:
print('device_name', device_name)
if device_name == "Intel(R) Arc(TM) Graphics" or re.search("Intel\(R\) Arc\(TM\)", device_name) is not None:
graphics.append({"index": i, "name": device_name})
device_count = torch.cuda.device_count()
print('cuda device_count:', device_count)
model_config.env_type = env_type
for i in range(device_count):
device_name = torch.cuda.get_device_name(i)
print('device_name', device_name)
graphics.append({"index": i, "name": device_name})
return graphics
12 changes: 7 additions & 5 deletions service/llm_biz.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,10 @@
AutoTokenizer,
PreTrainedModel,
PreTrainedTokenizer,
AutoModelForCausalLM,
)

from ipex_llm.transformers import AutoModelForCausalLM
#from ipex_llm.transformers import AutoModelForCausalLM
from typing import Callable
from transformers.generation.stopping_criteria import (
StoppingCriteria,
Expand Down Expand Up @@ -72,6 +73,7 @@ def stream_chat_generate(
args: dict,
error_callback: Callable[[Exception], None] = None,
):
print(args)
try:
model.generate(**args)
sys.stdout.flush()
Expand Down Expand Up @@ -173,8 +175,8 @@ def chat(
# if prev genera not finish, stop it
stop_generate()

torch.xpu.set_device(params.device)
model_config.device = f"xpu:{params.device}"
torch.cuda.set_device(params.device)
model_config.device = f"cuda:{params.device}"
prompt = params.prompt
enable_rag = params.enable_rag
model_repo_id = params.model_repo_id
Expand All @@ -189,7 +191,7 @@ def chat(
if _model is not None:
del _model
gc.collect()
torch.xpu.empty_cache()
torch.cuda.empty_cache()

model_base_path = model_config.config.get("llm")
model_name = model_repo_id.replace("/", "---")
Expand Down Expand Up @@ -293,7 +295,7 @@ def dispose():
del _model
_model = None
gc.collect()
torch.xpu.empty_cache()
torch.cuda.empty_cache()


class StopGenerateException(Exception):
Expand Down
3 changes: 2 additions & 1 deletion service/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,9 @@
import time
import traceback
import torch
from transformers import pipeline, PreTrainedModel, TextIteratorStreamer
import intel_extension_for_pytorch as ipex
from transformers import pipeline,PreTrainedModel,TextIteratorStreamer
# import intel_extension_for_pytorch as ipex


def stream_chat_generate(model: PreTrainedModel, args: dict):
Expand Down
4 changes: 2 additions & 2 deletions service/model_config.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@

# CONFIG_PATH = "./model_config.json"

config = {
Expand All @@ -12,11 +11,12 @@
"preview": "./models/stable_diffusion/preview",
}

device = "cuda"
comfyUIConfig = {
"unet": "../ComfyUI/models/unet",
"clip": "../ComfyUI/models/clip",
"vae": "../ComfyUI/models/vae",
}

device = "xpu"
env_type = "arc"
env_type = "arc"
17 changes: 9 additions & 8 deletions service/paint_biz.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,9 @@
import schedulers_util
from compel import Compel
from threading import Event
from xpu_hijacks import ipex_hijacks
# from cuda_hijacks import ipex_hijacks

ipex_hijacks()
# ipex_hijacks()
print("workarounds applied")

# region class define
Expand Down Expand Up @@ -185,6 +185,7 @@ def get_basic_model(input_model_name: str) -> DiffusionPipeline | Any:
_safety_checker = None
# perf optimization
_basic_model_pipe.enable_vae_tiling()
print('model_config', model_config)
_basic_model_pipe.to(model_config.device)

print(
Expand Down Expand Up @@ -239,7 +240,7 @@ def get_ext_pipe(params: TextImageParams, pipe_classes: List, init_class: any):
return _ext_model_pipe
del _ext_model_pipe
gc.collect()
torch.xpu.empty_cache()
torch.cuda.empty_cache()

basic_model_pipe = get_basic_model(params.model_name)
_ext_model_pipe = init_class.from_pipe(basic_model_pipe)
Expand Down Expand Up @@ -827,8 +828,8 @@ def generate(params: TextImageParams):

try:
stop_generate()
torch.xpu.set_device(params.device)
# model_config.device = f"xpu:{params.device}"
torch.cuda.set_device(params.device)
# model_config.device = f"cuda:{params.device}"
if _last_model_name != params.model_name:
# hange model dispose basic model
if _basic_model_pipe is not None:
Expand All @@ -853,7 +854,7 @@ def generate(params: TextImageParams):
text_to_image(params)
_last_mode = params.mode

torch.xpu.empty_cache()
torch.cuda.empty_cache()
finally:
_generating = False

Expand Down Expand Up @@ -891,15 +892,15 @@ def dispose_basic_model():
_last_mode = None

gc.collect()
torch.xpu.empty_cache()
torch.cuda.empty_cache()


def dispose_ext_model():
global _ext_model_pipe
del _ext_model_pipe
_ext_model_pipe = None
gc.collect()
torch.xpu.empty_cache()
torch.cuda.empty_cache()


def dispose():
Expand Down
2 changes: 1 addition & 1 deletion service/rag.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
from langchain_community.vectorstores.faiss import FAISS, Document

# from sentence_transformers import SentenceTransformer
import intel_extension_for_pytorch as ipex # noqa: F401
# import intel_extension_for_pytorch as ipex # noqa: F401
from sentence_transformers import SentenceTransformer

#### CONFIGURATIONS ------------------------------------------------------------------------------------------------------------------------
Expand Down
4 changes: 2 additions & 2 deletions service/realesrgan.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,9 @@
from basicsr.archs.rrdbnet_arch import RRDBNet
from torch.nn import functional as F
import model_config
import xpu_hijacks
# import xpu_hijacks

xpu_hijacks.ipex_hijacks()
# xpu_hijacks.ipex_hijacks()

ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
Expand Down
15 changes: 3 additions & 12 deletions service/requirements-arc.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,6 @@
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/
mkl-dpcpp==2024.2.1
torch==2.3.1.post0+cxx11.abi
torchvision==0.18.1.post0+cxx11.abi
torchaudio==2.3.1.post0+cxx11.abi
intel-extension-for-pytorch==2.3.110.post0+xpu

# IPEX-LLM
--pre
ipex_llm==2.2.0b2
bigdl-core-xe-23==2.6.0b2
bigdl-core-xe-addons-23==2.6.0b2
bigdl-core-xe-batch-23==2.6.0b2
onednn-devel==2024.1.1
torch==2.3.1
torchvision==0.18.1
torchaudio==2.3.1
3 changes: 2 additions & 1 deletion service/web_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,10 @@
UpscaleImageParams,
)
import paint_biz
import aipg_utils

Check failure on line 32 in service/web_api.py

View workflow job for this annotation

GitHub Actions / ruff

Ruff (F401)

service/web_api.py:32:8: F401 `aipg_utils` imported but unused

Check failure on line 32 in service/web_api.py

View workflow job for this annotation

GitHub Actions / ruff

Ruff (F401)

service/web_api.py:32:8: F401 `aipg_utils` imported but unused
# import rag
import llm_biz
import aipg_utils as utils
import rag
import model_config
from model_downloader import HFPlaygroundDownloader
from psutil._common import bytes2human
Expand Down
2 changes: 1 addition & 1 deletion service/xpu_hijacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from functools import wraps
from contextlib import nullcontext
import torch
import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import
import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import
import numpy as np


Expand Down

0 comments on commit e3a6dec

Please sign in to comment.