Skip to content

Commit

Permalink
refactor: clean up llm logic in frontend code
Browse files Browse the repository at this point in the history
  • Loading branch information
mschuettlerTNG committed Feb 23, 2025
1 parent 0c7ca98 commit 4cf1e0a
Show file tree
Hide file tree
Showing 16 changed files with 278 additions and 387 deletions.
2 changes: 1 addition & 1 deletion WebUI/src/App.vue
Original file line number Diff line number Diff line change
Expand Up @@ -369,7 +369,7 @@ function switchTab(index: number) {
}
watch(textInference, (newSetting, _oldSetting) => {
if (newSetting.backend === 'LLAMA.CPP') {
if (newSetting.backend === 'llamaCPP') {
answer.value!.disableRag()
} else {
answer.value!.restoreRagState()
Expand Down
38 changes: 32 additions & 6 deletions WebUI/src/assets/js/store/backendServices.ts
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
import { defineStore } from 'pinia'

const backends = ['ai-backend', 'comfyui-backend', 'llamacpp-backend', 'openvino-backend'] as const
export type BackendServiceName = (typeof backends)[number]

export const useBackendServices = defineStore(
'backendServices',
() => {
const currentServiceInfo = ref<ApiServiceInformation[]>([])
const serviceListeners: Map<BackendServiceName, BackendServiceSetupProgressListener> = new Map([
['ai-backend', new BackendServiceSetupProgressListener('ai-backend')],
['comfyui-backend', new BackendServiceSetupProgressListener('comfyui-backend')],
['llamacpp-backend', new BackendServiceSetupProgressListener('llamacpp-backend')],
['openvino-backend', new BackendServiceSetupProgressListener('openvino-backend')],
])
const serviceListeners = new Map(
backends.map((b) => [b, new BackendServiceSetupProgressListener(b)]),
)

window.electronAPI
.getServices()
Expand Down Expand Up @@ -95,12 +95,38 @@ export const useBackendServices = defineStore(
return window.electronAPI.sendStopSignal(serviceName)
}

const lastUsedBackend = ref<BackendServiceName | null>(null)

function updateLastUsedBackend(currentInferenceBackend: BackendServiceName) {
lastUsedBackend.value = currentInferenceBackend
}

async function resetLastUsedInferenceBackend(currentInferenceBackend: BackendServiceName) {
const lastUsedBackendSnapshot = lastUsedBackend.value
if (lastUsedBackendSnapshot === null || lastUsedBackendSnapshot === currentInferenceBackend) {
return
}
try {
const stopStatus = await stopService(lastUsedBackendSnapshot)
console.info(`unused service ${lastUsedBackendSnapshot} now in state ${stopStatus}`)
const startStatus = await startService(lastUsedBackendSnapshot)
console.info(`service ${lastUsedBackendSnapshot} now in state ${startStatus}`)
} catch (e) {
console.warn(
`Could not reset last used inference backend ${lastUsedBackendSnapshot} due to ${e}`,
)
}
}

return {
info: currentServiceInfo,
serviceInfoUpdateReceived: serviceInfoUpdatePresent,
allRequiredSetUp,
allRequiredRunning,
initalStartupRequestComplete,
lastUsedBackend,
updateLastUsedBackend,
resetLastUsedInferenceBackend,
startAllSetUpServices,
setUpService,
startService,
Expand Down
56 changes: 0 additions & 56 deletions WebUI/src/assets/js/store/globalSetup.ts
Original file line number Diff line number Diff line change
@@ -1,10 +1,8 @@
import { defineStore } from 'pinia'
import * as util from '../util'
import { useI18N } from './i18n'
import { useBackendServices } from './backendServices'

type GlobalSetupState = 'running' | 'verifyBackend' | 'manageInstallations' | 'loading' | 'failed'
type LastUsedBackend = BackendServiceName | 'None'

export const useGlobalSetup = defineStore('globalSetup', () => {
const state = reactive<KVObject>({
Expand All @@ -14,7 +12,6 @@ export const useGlobalSetup = defineStore('globalSetup', () => {
})

const defaultBackendBaseUrl = ref('http://127.0.0.1:9999')
const lastUsedBackend = ref<LastUsedBackend>('None')

const models = ref<ModelLists>({
llm: new Array<string>(),
Expand All @@ -31,9 +28,6 @@ export const useGlobalSetup = defineStore('globalSetup', () => {
resolution: 0,
quality: 0,
enableRag: false,
llm_model: 'microsoft/Phi-3-mini-4k-instruct',
ggufLLM_model: 'bartowski/Llama-3.2-3B-Instruct-GGUF/Llama-3.2-3B-Instruct-Q4_K_S.gguf',
openvinoLLM_model: 'OpenVINO/TinyLlama-1.1B-Chat-v1.0-int4-ov',
sd_model: 'Lykon/dreamshaper-8',
inpaint_model: 'Lykon/dreamshaper-8-inpainting',
negativePrompt: 'bad hands, nsfw',
Expand Down Expand Up @@ -74,8 +68,6 @@ export const useGlobalSetup = defineStore('globalSetup', () => {
const errorMessage = ref('')
const hdPersistentConfirmation = ref(localStorage.getItem('HdPersistentConfirmation') === 'true')

const backendServices = useBackendServices()

watchEffect(() => {
localStorage.setItem('HdPersistentConfirmation', hdPersistentConfirmation.value.toString())
})
Expand Down Expand Up @@ -222,27 +214,6 @@ export const useGlobalSetup = defineStore('globalSetup', () => {
assertSelectExist()
}

function updateLastUsedBackend(currentInferenceBackend: BackendServiceName) {
lastUsedBackend.value = currentInferenceBackend
}

async function resetLastUsedInferenceBackend(currentInferenceBackend: BackendServiceName) {
const lastUsedBackendSnapshot = lastUsedBackend.value
if (lastUsedBackendSnapshot === 'None' || lastUsedBackendSnapshot === currentInferenceBackend) {
return
}
try {
const stopStatus = await backendServices.stopService(lastUsedBackendSnapshot)
console.info(`unused service ${lastUsedBackendSnapshot} now in state ${stopStatus}`)
const startStatus = await backendServices.startService(lastUsedBackendSnapshot)
console.info(`service ${lastUsedBackendSnapshot} now in state ${startStatus}`)
} catch (e) {
console.warn(
`Could not reset last used inference backend ${lastUsedBackendSnapshot} due to ${e}`,
)
}
}

function assertSelectExist() {
let changeUserSetup = false
if (models.value.llm.length > 0 && !models.value.llm.includes(modelSettings.llm_model)) {
Expand Down Expand Up @@ -288,28 +259,6 @@ export const useGlobalSetup = defineStore('globalSetup', () => {
}
}

async function checkModelAlreadyLoaded(params: CheckModelAlreadyLoadedParameters[]) {
const response = await fetch(`${defaultBackendBaseUrl.value}/api/checkModelAlreadyLoaded`, {
method: 'POST',
body: JSON.stringify({ data: params }),
headers: {
'Content-Type': 'application/json',
},
})
const parsedResponse = (await response.json()) as ApiResponse & {
data: CheckModelAlreadyLoadedResult[]
}
return parsedResponse.data
}

async function checkIfHuggingFaceUrlExists(repo_id: string) {
const response = await fetch(
`${defaultBackendBaseUrl.value}/api/checkHFRepoExists?repo_id=${repo_id}`,
)
const data = await response.json()
return data.exists
}

return {
state,
modelSettings,
Expand All @@ -319,20 +268,15 @@ export const useGlobalSetup = defineStore('globalSetup', () => {
apiHost: defaultBackendBaseUrl,
graphicsList,
loadingState,
lastUsedBackend,
errorMessage,
hdPersistentConfirmation,
updateLastUsedBackend,
resetLastUsedInferenceBackend,
initSetup,
applyPathsSettings,
applyModelSettings,
refreshLLMModles,
refreshSDModles,
refreshInpaintModles,
refreshLora,
checkModelAlreadyLoaded: checkModelAlreadyLoaded,
checkIfHuggingFaceUrlExists,
applyPresetModelSettings,
restorePathsSettings,
}
Expand Down
10 changes: 5 additions & 5 deletions WebUI/src/assets/js/store/imageGeneration.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@ import { useComfyUi } from './comfyUi'
import { useStableDiffusion } from './stableDiffusion'
import { useI18N } from './i18n'
import * as Const from '../const'
import { useGlobalSetup } from './globalSetup'
import * as toast from '@/assets/js/toast.ts'
import { useModels } from './models'

export type StableDiffusionSettings = {
resolution: 'standard' | 'hd' | 'manual' // ~ modelSettings.resolution 0, 1, 3
Expand Down Expand Up @@ -386,7 +386,7 @@ export const useImageGeneration = defineStore(

const comfyUi = useComfyUi()
const stableDiffusion = useStableDiffusion()
const globalSetup = useGlobalSetup()
const models = useModels()
const i18nState = useI18N().state

const hdWarningDismissed = ref(false)
Expand Down Expand Up @@ -690,12 +690,12 @@ export const useImageGeneration = defineStore(
const checkList: CheckModelAlreadyLoadedParameters[] =
workflow.comfyUIRequirements.requiredModels.map(extractDownloadModelParamsFromString)
const checkedModels: CheckModelAlreadyLoadedResult[] =
await globalSetup.checkModelAlreadyLoaded(checkList)
await models.checkModelAlreadyLoaded(checkList)
const modelsToBeLoaded = checkedModels.filter(
(checkModelExistsResult) => !checkModelExistsResult.already_loaded,
)
for (const item of modelsToBeLoaded) {
if (!(await globalSetup.checkIfHuggingFaceUrlExists(item.repo_id))) {
if (!(await models.checkIfHuggingFaceUrlExists(item.repo_id))) {
toast.error(`declared model ${item.repo_id} does not exist. Aborting Generation.`)
return []
}
Expand Down Expand Up @@ -723,7 +723,7 @@ export const useImageGeneration = defineStore(
})
}

const result = await globalSetup.checkModelAlreadyLoaded(checkList)
const result = await models.checkModelAlreadyLoaded(checkList)
return result.filter((checkModelExistsResult) => !checkModelExistsResult.already_loaded)
}

Expand Down
Loading

0 comments on commit 4cf1e0a

Please sign in to comment.