|
| 1 | +import { ref, computed, onMounted } from "vue" |
| 2 | +import { useClient } from "@servicestack/vue" |
| 3 | +import { createErrorStatus } from "@servicestack/client" |
| 4 | +import { ActiveAiModels, QueryPrompts, OpenAiChatCompletion } from "dtos" |
| 5 | + |
| 6 | +export default { |
| 7 | + template:` |
| 8 | + <div v-if="system"> |
| 9 | + <button @click="show=!show" type="button" class="-ml-3 bg-white text-gray-600 hover:text-gray-900 group w-full flex items-center pr-2 py-2 text-left text-sm font-medium"> |
| 10 | + <svg v-if="show" class="text-gray-400 rotate-90 mr-0.5 flex-shrink-0 h-5 w-5 transform group-hover:text-gray-400 transition-colors ease-in-out duration-150" viewBox="0 0 20 20" aria-hidden="true"><path d="M6 6L14 10L6 14V6Z" fill="currentColor"></path></svg> |
| 11 | + <svg v-else class="text-gray-300 mr-0.5 flex-shrink-0 h-5 w-5 transform group-hover:text-gray-400 transition-colors ease-in-out duration-150" viewBox="0 0 20 20" aria-hidden="true"><path d="M6 6L14 10L6 14V6Z" fill="currentColor"></path></svg> |
| 12 | + AI Prompt Generator |
| 13 | + </button> |
| 14 | + <div v-if="show"> |
| 15 | + <form class="grid grid-cols-6 gap-4" @submit.prevent="send()" :disabled="!validPrompt"> |
| 16 | + <div class="col-span-6 sm:col-span-2"> |
| 17 | + <TextInput id="subject" v-model="subject" label="subject" placeholder="Use AI to generate image prompts for..." /> |
| 18 | + </div> |
| 19 | + <div class="col-span-6 sm:col-span-2"> |
| 20 | + <Autocomplete id="model" :options="models" v-model="model" label="model" |
| 21 | + :match="(x, value) => x.toLowerCase().includes(value.toLowerCase())" |
| 22 | + placeholder="Select Model..."> |
| 23 | + <template #item="name"> |
| 24 | + <div class="flex items-center"> |
| 25 | + <Icon class="h-6 w-6 flex-shrink-0" :src="'/icons/models/' + name" loading="lazy" /> |
| 26 | + <span class="ml-3 truncate">{{name}}</span> |
| 27 | + </div> |
| 28 | + </template> |
| 29 | + </Autocomplete> |
| 30 | + </div> |
| 31 | + <div class="col-span-6 sm:col-span-1"> |
| 32 | + <TextInput type="number" id="count" v-model="count" label="count" min="1" /> |
| 33 | + </div> |
| 34 | + <div class="col-span-6 sm:col-span-1 align-bottom"> |
| 35 | + <div> </div> |
| 36 | + <PrimaryButton :disabled="!validPrompt">Generate</PrimaryButton> |
| 37 | + </div> |
| 38 | + </form> |
| 39 | + <Loading v-if="client.loading.value">Asking {{model}}...</Loading> |
| 40 | + <ErrorSummary v-else-if="error" :status="error" /> |
| 41 | + <div v-else-if="results.length" class="mt-4"> |
| 42 | + <div v-for="result in results" @click="$emit('selected',result)" class="message mb-2 cursor-pointer rounded-lg inline-flex justify-center rounded-lg text-sm py-3 px-4 bg-gray-50 text-slate-900 ring-1 ring-slate-900/10 hover:bg-white/25 hover:ring-slate-900/15"> |
| 43 | + {{result}} |
| 44 | + </div> |
| 45 | + </div> |
| 46 | + </div> |
| 47 | + </div> |
| 48 | + `, |
| 49 | + emits:['selected'], |
| 50 | + props: { |
| 51 | + promptId: String, |
| 52 | + systemPrompt: String, |
| 53 | + }, |
| 54 | + setup(props) { |
| 55 | + const client = useClient() |
| 56 | + const request = ref(new OpenAiChatCompletion({ })) |
| 57 | + const system = ref(props.systemPrompt) |
| 58 | + const subject = ref('') |
| 59 | + const defaults = { |
| 60 | + show: false, |
| 61 | + model: 'gemini-flash', |
| 62 | + count: 3, |
| 63 | + } |
| 64 | + const prefsKey = 'img2txt.gen.prefs' |
| 65 | + const prefs = JSON.parse(localStorage.getItem(prefsKey) ?? JSON.stringify(defaults)) |
| 66 | + const show = ref(prefs.show) |
| 67 | + const count = ref(prefs.count) |
| 68 | + const model = ref(prefs.model) |
| 69 | + const error = ref() |
| 70 | + const models = ref([]) |
| 71 | + const results = ref([]) |
| 72 | + const validPrompt = computed(() => subject.value && model.value && count.value) |
| 73 | + |
| 74 | + function savePrefs() { |
| 75 | + prefs.show = show.value |
| 76 | + prefs.model = model.value |
| 77 | + prefs.count = count.value |
| 78 | + localStorage.setItem(prefsKey, JSON.stringify(prefs)) |
| 79 | + } |
| 80 | + |
| 81 | + if (!system.value && props.promptId) { |
| 82 | + onMounted(async () => { |
| 83 | + const apiPrompt = await client.api(new QueryPrompts({ |
| 84 | + id: props.promptId |
| 85 | + })) |
| 86 | + system.value = apiPrompt?.response?.results?.[0] |
| 87 | + |
| 88 | + const api = await client.api(new ActiveAiModels()) |
| 89 | + models.value = await api.response.results |
| 90 | + models.value.sort((a,b) => a.localeCompare(b)) |
| 91 | + }) |
| 92 | + } |
| 93 | + |
| 94 | + async function send() { |
| 95 | + if (!validPrompt.value) return |
| 96 | + savePrefs() |
| 97 | + |
| 98 | + const content = `Provide ${count.value} great descriptive prompts to generate images of ${subject.value} in Stable Diffusion SDXL and Mid Journey. Respond with only the prompts in a JSON array. Example ["prompt1","prompt2"]` |
| 99 | + |
| 100 | + const msgs = [ |
| 101 | + { role:'system', content:system.value }, |
| 102 | + { role:'user', content }, |
| 103 | + ] |
| 104 | + |
| 105 | + const request = new OpenAiChatCompletion({ |
| 106 | + tag: "admin", |
| 107 | + model: model.value, |
| 108 | + messages: msgs, |
| 109 | + temperature: 0.7, |
| 110 | + maxTokens: 2048, |
| 111 | + }) |
| 112 | + error.value = null |
| 113 | + const api = await client.api(request) |
| 114 | + error.value = api.error |
| 115 | + if (api.response) { |
| 116 | + let json = api.response?.choices[0]?.message?.content?.trim() ?? '' |
| 117 | + console.debug(api.response) |
| 118 | + if (json) { |
| 119 | + results.value = [] |
| 120 | + const docPrefix = '```json' |
| 121 | + if (json.startsWith(docPrefix)) { |
| 122 | + json = json.substring(docPrefix.length, json.length - 3) |
| 123 | + } |
| 124 | + try { |
| 125 | + console.log('json', json) |
| 126 | + const obj = JSON.parse(json) |
| 127 | + if (Array.isArray(obj)) { |
| 128 | + results.value = obj |
| 129 | + } |
| 130 | + } catch(e) { |
| 131 | + console.warn('could not parse json', e, json) |
| 132 | + } |
| 133 | + } |
| 134 | + if (!results.value.length) { |
| 135 | + error.value = createErrorStatus('Could not parse prompts') |
| 136 | + } |
| 137 | + } |
| 138 | + } |
| 139 | + |
| 140 | + return { client, system, request, show, subject, count, models, model, error, results, validPrompt, send } |
| 141 | + } |
| 142 | +} |
0 commit comments