Skip to content

Commit 93dc9eb

Browse files
author
Attashe
committed
Merge branch 'main' into ace_plus_plus_patch-pr
2 parents 29ddb7f + 64f3e56 commit 93dc9eb

File tree

30 files changed

+931
-210
lines changed

30 files changed

+931
-210
lines changed

docs/contributing/dev-environment.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ If you just want to use Invoke, you should use the [launcher][launcher link].
4141
With the modifications made, the install command should look something like this:
4242
4343
```sh
44-
uv pip install -e ".[dev,test,docs,xformers]" --python 3.12 --python-preference only-managed --index=https://download.pytorch.org/whl/cu124 --reinstall
44+
uv pip install -e ".[dev,test,docs,xformers]" --python 3.12 --python-preference only-managed --index=https://download.pytorch.org/whl/cu126 --reinstall
4545
```
4646
4747
6. At this point, you should have Invoke installed, a venv set up and activated, and the server running. But you will see a warning in the terminal that no UI was found. If you go to the URL for the server, you won't get a UI.

docs/installation/manual.md

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,14 @@ The following commands vary depending on the version of Invoke being installed a
7171
7272
7. Determine the `PyPI` index URL to use for installation, if any. This is necessary to get the right version of torch installed.
7373
74-
=== "Invoke v5 or later"
74+
=== "Invoke v5.10.0 and later"
75+
76+
- If you are on Windows or Linux with an Nvidia GPU, use `https://download.pytorch.org/whl/cu126`.
77+
- If you are on Linux with no GPU, use `https://download.pytorch.org/whl/cpu`.
78+
- If you are on Linux with an AMD GPU, use `https://download.pytorch.org/whl/rocm6.2.4`.
79+
- **In all other cases, do not use an index.**
80+
81+
=== "Invoke v5.0.0 to v5.9.1"
7582
7683
- If you are on Windows with an Nvidia GPU, use `https://download.pytorch.org/whl/cu124`.
7784
- If you are on Linux with no GPU, use `https://download.pytorch.org/whl/cpu`.

invokeai/app/api/dependencies.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,6 @@ def initialize(
113113
safe_globals=[torch.Tensor],
114114
ephemeral=True,
115115
),
116-
max_cache_size=0,
117116
)
118117
conditioning = ObjectSerializerForwardCache(
119118
ObjectSerializerDisk[ConditioningFieldData](

invokeai/app/invocations/metadata_linked.py

Lines changed: 141 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,17 @@
3939
VAEField,
4040
VAEOutput,
4141
)
42-
from invokeai.app.invocations.primitives import BooleanOutput, FloatOutput, IntegerOutput, LatentsOutput, StringOutput
42+
from invokeai.app.invocations.primitives import (
43+
BooleanCollectionOutput,
44+
BooleanOutput,
45+
FloatCollectionOutput,
46+
FloatOutput,
47+
IntegerCollectionOutput,
48+
IntegerOutput,
49+
LatentsOutput,
50+
StringCollectionOutput,
51+
StringOutput,
52+
)
4353
from invokeai.app.invocations.scheduler import SchedulerOutput
4454
from invokeai.app.invocations.t2i_adapter import T2IAdapterField, T2IAdapterInvocation
4555
from invokeai.app.services.shared.invocation_context import InvocationContext
@@ -1162,3 +1172,133 @@ def invoke(self, context: InvocationContext) -> MDT2IAdapterListOutput:
11621172
adapters = append_list(T2IAdapterField, i.t2i_adapter, adapters)
11631173

11641174
return MDT2IAdapterListOutput(t2i_adapter_list=adapters)
1175+
1176+
1177+
@invocation(
1178+
"metadata_to_string_collection",
1179+
title="Metadata To String Collection",
1180+
tags=["metadata"],
1181+
category="metadata",
1182+
version="1.0.0",
1183+
classification=Classification.Beta,
1184+
)
1185+
class MetadataToStringCollectionInvocation(BaseInvocation, WithMetadata):
1186+
"""Extracts a string collection value of a label from metadata"""
1187+
1188+
label: CORE_LABELS_STRING = InputField(
1189+
default=CUSTOM_LABEL,
1190+
description=FieldDescriptions.metadata_item_label,
1191+
input=Input.Direct,
1192+
)
1193+
custom_label: Optional[str] = InputField(
1194+
default=None,
1195+
description=FieldDescriptions.metadata_item_label,
1196+
input=Input.Direct,
1197+
)
1198+
default_value: list[str] = InputField(
1199+
description="The default string collection to use if not found in the metadata"
1200+
)
1201+
1202+
_validate_custom_label = model_validator(mode="after")(validate_custom_label)
1203+
1204+
def invoke(self, context: InvocationContext) -> StringCollectionOutput:
1205+
data: Dict[str, Any] = {} if self.metadata is None else self.metadata.root
1206+
output = data.get(str(self.custom_label if self.label == CUSTOM_LABEL else self.label), self.default_value)
1207+
1208+
return StringCollectionOutput(collection=output)
1209+
1210+
1211+
@invocation(
1212+
"metadata_to_integer_collection",
1213+
title="Metadata To Integer Collection",
1214+
tags=["metadata"],
1215+
category="metadata",
1216+
version="1.0.0",
1217+
classification=Classification.Beta,
1218+
)
1219+
class MetadataToIntegerCollectionInvocation(BaseInvocation, WithMetadata):
1220+
"""Extracts an integer value Collection of a label from metadata"""
1221+
1222+
label: CORE_LABELS_INTEGER = InputField(
1223+
default=CUSTOM_LABEL,
1224+
description=FieldDescriptions.metadata_item_label,
1225+
input=Input.Direct,
1226+
)
1227+
custom_label: Optional[str] = InputField(
1228+
default=None,
1229+
description=FieldDescriptions.metadata_item_label,
1230+
input=Input.Direct,
1231+
)
1232+
default_value: list[int] = InputField(description="The default integer to use if not found in the metadata")
1233+
1234+
_validate_custom_label = model_validator(mode="after")(validate_custom_label)
1235+
1236+
def invoke(self, context: InvocationContext) -> IntegerCollectionOutput:
1237+
data: Dict[str, Any] = {} if self.metadata is None else self.metadata.root
1238+
output = data.get(str(self.custom_label if self.label == CUSTOM_LABEL else self.label), self.default_value)
1239+
1240+
return IntegerCollectionOutput(collection=output)
1241+
1242+
1243+
@invocation(
1244+
"metadata_to_float_collection",
1245+
title="Metadata To Float Collection",
1246+
tags=["metadata"],
1247+
category="metadata",
1248+
version="1.0.0",
1249+
classification=Classification.Beta,
1250+
)
1251+
class MetadataToFloatCollectionInvocation(BaseInvocation, WithMetadata):
1252+
"""Extracts a Float value Collection of a label from metadata"""
1253+
1254+
label: CORE_LABELS_FLOAT = InputField(
1255+
default=CUSTOM_LABEL,
1256+
description=FieldDescriptions.metadata_item_label,
1257+
input=Input.Direct,
1258+
)
1259+
custom_label: Optional[str] = InputField(
1260+
default=None,
1261+
description=FieldDescriptions.metadata_item_label,
1262+
input=Input.Direct,
1263+
)
1264+
default_value: list[float] = InputField(description="The default float to use if not found in the metadata")
1265+
1266+
_validate_custom_label = model_validator(mode="after")(validate_custom_label)
1267+
1268+
def invoke(self, context: InvocationContext) -> FloatCollectionOutput:
1269+
data: Dict[str, Any] = {} if self.metadata is None else self.metadata.root
1270+
output = data.get(str(self.custom_label if self.label == CUSTOM_LABEL else self.label), self.default_value)
1271+
1272+
return FloatCollectionOutput(collection=output)
1273+
1274+
1275+
@invocation(
1276+
"metadata_to_bool_collection",
1277+
title="Metadata To Bool Collection",
1278+
tags=["metadata"],
1279+
category="metadata",
1280+
version="1.0.0",
1281+
classification=Classification.Beta,
1282+
)
1283+
class MetadataToBoolCollectionInvocation(BaseInvocation, WithMetadata):
1284+
"""Extracts a Boolean value Collection of a label from metadata"""
1285+
1286+
label: CORE_LABELS_BOOL = InputField(
1287+
default=CUSTOM_LABEL,
1288+
description=FieldDescriptions.metadata_item_label,
1289+
input=Input.Direct,
1290+
)
1291+
custom_label: Optional[str] = InputField(
1292+
default=None,
1293+
description=FieldDescriptions.metadata_item_label,
1294+
input=Input.Direct,
1295+
)
1296+
default_value: list[bool] = InputField(description="The default bool to use if not found in the metadata")
1297+
1298+
_validate_custom_label = model_validator(mode="after")(validate_custom_label)
1299+
1300+
def invoke(self, context: InvocationContext) -> BooleanCollectionOutput:
1301+
data: Dict[str, Any] = {} if self.metadata is None else self.metadata.root
1302+
output = data.get(str(self.custom_label if self.label == CUSTOM_LABEL else self.label), self.default_value)
1303+
1304+
return BooleanCollectionOutput(collection=output)

invokeai/frontend/web/public/locales/en.json

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2393,9 +2393,8 @@
23932393
"whatsNew": {
23942394
"whatsNewInInvoke": "What's New in Invoke",
23952395
"items": [
2396-
"Workflows: Support for custom string drop-downs in Workflow Builder.",
2397-
"FLUX: Support for FLUX Fill in Workflows and Canvas.",
2398-
"LLaVA OneVision VLLM: Beta support in Workflows."
2396+
"CogView4: Support for CogView4 models in Canvas and Workflows.",
2397+
"Updated Dependencies: Invoke now runs on the latest version of its dependencies, including Python 3.12 and Pytorch 2.6.0."
23992398
],
24002399
"readReleaseNotes": "Read Release Notes",
24012400
"watchRecentReleaseVideos": "Watch Recent Release Videos",

invokeai/frontend/web/public/locales/it.json

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -636,7 +636,8 @@
636636
"urlUnauthorizedErrorMessage2": "Scopri come qui.",
637637
"urlForbidden": "Non hai accesso a questo modello",
638638
"urlForbiddenErrorMessage": "Potrebbe essere necessario richiedere l'autorizzazione al sito che distribuisce il modello.",
639-
"urlUnauthorizedErrorMessage": "Potrebbe essere necessario configurare un gettone API per accedere a questo modello."
639+
"urlUnauthorizedErrorMessage": "Potrebbe essere necessario configurare un gettone API per accedere a questo modello.",
640+
"fileSize": "Dimensione del file"
640641
},
641642
"parameters": {
642643
"images": "Immagini",
@@ -717,7 +718,8 @@
717718
"collectionNumberGTExclusiveMax": "{{value}} >= {{exclusiveMaximum}} (excl max)",
718719
"collectionNumberLTExclusiveMin": "{{value}} <= {{exclusiveMinimum}} (excl min)",
719720
"collectionEmpty": "raccolta vuota",
720-
"batchNodeCollectionSizeMismatchNoGroupId": "Dimensione della raccolta di gruppo nel Lotto non corrisponde"
721+
"batchNodeCollectionSizeMismatchNoGroupId": "Dimensione della raccolta di gruppo nel Lotto non corrisponde",
722+
"modelIncompatibleBboxWidth": "La larghezza del riquadro di delimitazione è {{width}} ma {{model}} richiede multipli di {{multiple}}"
721723
},
722724
"useCpuNoise": "Usa la CPU per generare rumore",
723725
"iterations": "Iterazioni",
@@ -1807,7 +1809,6 @@
18071809
"unpublishableInputs": "Questi input non pubblicabili verranno omessi",
18081810
"publishWarnings": "Avvertenze",
18091811
"errorWorkflowHasUnsavedChanges": "Il flusso di lavoro presenta modifiche non salvate",
1810-
"errorWorkflowHasBatchOrGeneratorNodes": "Il flusso di lavoro ha nodi lotto e/o generatori",
18111812
"errorWorkflowHasInvalidGraph": "Grafico del flusso di lavoro non valido (passare il mouse sul pulsante Invoke per i dettagli)",
18121813
"errorWorkflowHasNoOutputNode": "Nessun nodo di uscita selezionato",
18131814
"warningWorkflowHasUnpublishableInputFields": "Il flusso di lavoro presenta alcuni ingressi non pubblicabili: questi verranno omessi dal flusso di lavoro pubblicato",
@@ -2399,8 +2400,7 @@
23992400
"watchUiUpdatesOverview": "Guarda le novità dell'interfaccia",
24002401
"items": [
24012402
"Flussi di lavoro: supporto per menu a discesa di stringhe personalizzate nel Generatore di Flussi di lavoro.",
2402-
"FLUX: supporto per FLUX Fill in Flussi di lavoro e Tela.",
2403-
"LLaVA OneVision VLLM: supporto beta nei flussi di lavoro."
2403+
"FLUX: supporto per FLUX Fill in Flussi di lavoro e Tela."
24042404
]
24052405
},
24062406
"system": {

0 commit comments

Comments
 (0)