@@ -367,10 +367,10 @@ private enum Vision {
367
367
}
368
368
369
369
public func callAsFunction(
370
- _ x: MLXArray , gridThw : [ THW ] , rotaryPositionEmbedding: MLXArray
370
+ _ x: MLXArray , frames : [ THW ] , rotaryPositionEmbedding: MLXArray
371
371
) -> MLXArray {
372
372
let sequenceLength = x. dim ( 0 )
373
- let B = gridThw [ 0 ] . t
373
+ let B = frames [ 0 ] . t
374
374
let L = sequenceLength / B
375
375
376
376
let qkv = qkv ( x)
@@ -435,13 +435,13 @@ private enum Vision {
435
435
}
436
436
437
437
func callAsFunction(
438
- _ hiddenStates: MLXArray , gridThw : [ THW ] , rotaryPositionEmbedding: MLXArray
438
+ _ hiddenStates: MLXArray , frames : [ THW ] , rotaryPositionEmbedding: MLXArray
439
439
) -> MLXArray {
440
440
var hiddenStates =
441
441
hiddenStates
442
442
+ attention(
443
443
norm1 ( hiddenStates) ,
444
- gridThw : gridThw ,
444
+ frames : frames ,
445
445
rotaryPositionEmbedding: rotaryPositionEmbedding
446
446
)
447
447
hiddenStates = hiddenStates + mlp( norm2 ( hiddenStates) )
@@ -479,10 +479,10 @@ private enum Vision {
479
479
spatialMergeSize: 2 )
480
480
}
481
481
482
- func rotaryPositionEmbedding( _ gridThw : [ THW ] ) -> MLXArray {
482
+ func rotaryPositionEmbedding( _ frames : [ THW ] ) -> MLXArray {
483
483
var positionIds = [ MLXArray] ( )
484
484
485
- for row in gridThw {
485
+ for row in frames {
486
486
let ( t, h, w) = row. values
487
487
488
488
var hposIds = expandedDimensions ( MLXArray ( 0 ..< h) , axis: 1 )
@@ -516,22 +516,22 @@ private enum Vision {
516
516
}
517
517
518
518
let indices = concatenated ( positionIds, axis: 0 )
519
- let maxGridSize = gridThw . lazy. map { max ( $0. h, $0. w) } . max ( ) ?? 0
520
- let rotaryPositionEmbedFull = rotaryPositionEmbedding ( sequenceLength: maxGridSize ) [
519
+ let maxFrameSize = frames . lazy. map { max ( $0. h, $0. w) } . max ( ) ?? 0
520
+ let rotaryPositionEmbedFull = rotaryPositionEmbedding ( sequenceLength: maxFrameSize ) [
521
521
indices]
522
522
523
523
return rotaryPositionEmbedFull. reshaped ( indices. dim ( 0 ) , - 1 )
524
524
}
525
525
526
- public func callAsFunction( _ hiddenStates: MLXArray , gridThw : [ THW ] ) -> MLXArray {
526
+ public func callAsFunction( _ hiddenStates: MLXArray , frames : [ THW ] ) -> MLXArray {
527
527
var hiddenStates = patchEmbed ( hiddenStates)
528
- let rotaryPositionEmbedding = rotaryPositionEmbedding ( gridThw )
528
+ let rotaryPositionEmbedding = rotaryPositionEmbedding ( frames )
529
529
530
- let batchSize = gridThw . count
530
+ let batchSize = frames . count
531
531
532
532
for block in blocks {
533
533
hiddenStates = block (
534
- hiddenStates, gridThw : gridThw ,
534
+ hiddenStates, frames : frames ,
535
535
rotaryPositionEmbedding: rotaryPositionEmbedding)
536
536
}
537
537
@@ -585,6 +585,10 @@ private enum Vision {
585
585
/// This is meant to be used with ``Qwen2VL`` and is typically created by ``VLMModelFactory``.
586
586
public class Qwen2VLProcessor : UserInputProcessor {
587
587
588
+ enum Qwen2VLProcessorError : Error {
589
+ case framesIsNil
590
+ }
591
+
588
592
private let config : Qwen2VLProcessorConfiguration
589
593
private let tokenizer : any Tokenizer
590
594
@@ -686,72 +690,74 @@ public class Qwen2VLProcessor: UserInputProcessor {
686
690
return ( flattenedPatches, . init( gridT, gridH, gridW) )
687
691
}
688
692
689
- public func prepare( prompt: UserInput . Prompt , imageTHW: [ THW ] ? ) -> String {
690
- // the tokenizer does have a chat template and it expects messages
691
- // like this:
692
- //
693
- // [{'role': 'user', 'content': [{'type': 'text', 'text': 'What are these?'},
694
- // {'type': 'image'}, {'type': 'image'}, {'type': 'image'}]}]
695
- //
696
- // The output of the prompt template is fed into
697
- // image_processing_qwen2_vl.preprocess where it is further augmented
698
- // by replacing tokens according to imageTHW.
699
- //
700
- // Neither the structured content nor the postprocessing of the template
701
- // are supported in current Tokenizer/Jinja (swift) so handle that here.
702
-
703
- var messages = prompt. asMessages ( )
704
- if messages [ 0 ] [ " role " ] != " system " {
693
+ private func prepareMessages( _ messages: [ Message ] ) -> [ Message ] {
694
+ var messages = messages
695
+ // Add system message if not present
696
+ if let role = messages [ 0 ] [ " role " ] as? String , role != " system " {
705
697
messages. insert ( [ " role " : " system " , " content " : " You are a helpful assistant. " ] , at: 0 )
706
698
}
707
-
708
- let lastIndex = messages. count - 1
709
- var lastMessage = messages [ lastIndex] [ " content " ] ?? " "
710
-
711
- // image_processing_qwen2_vl.preprocess -- inject image_pad tokens for each image
712
- let mergeLength = config. mergeSize * config. mergeSize
713
- for thw in imageTHW ?? [ ] {
714
- lastMessage += " <|vision_start|> "
715
- lastMessage += Array ( repeating: " <|image_pad|> " , count: thw. product / mergeLength)
716
- . joined ( )
717
- lastMessage += " <|vision_end|> "
718
- }
719
-
720
- messages [ lastIndex] [ " content " ] = lastMessage
721
-
722
- return
723
- messages
724
- . map {
725
- " <|im_start|> \( $0 [ " role " ] ?? " user " ) \n \( $0 [ " content " ] ?? " " ) <|im_end|> "
726
- }
727
- . joined ( separator: " \n " )
728
- + " \n <|im_start|>assistant \n "
699
+ return messages
729
700
}
730
701
702
+ // public func prepare(prompt: UserInput.Prompt, frames: [THW]?) throws -> String {
703
+ // let messages = prepareMessages(prompt.asMessages())
704
+ // let tokens = try tokenizer.applyChatTemplate(messages: messages)
705
+ // return tokenizer.decode(tokens: tokens)
706
+ // }
707
+
731
708
public func prepare( input: UserInput ) throws -> LMInput {
709
+ // Text-only input
732
710
if input. images. isEmpty {
733
- // just a straight text prompt
734
- let prompt = prepare ( prompt: input. prompt, imageTHW: nil )
735
- let promptTokens = try tokenizer. encode ( text: prompt)
711
+ let messages = input. prompt. asMessages ( )
712
+ let promptTokens = try tokenizer. applyChatTemplate ( messages: messages)
736
713
return LMInput ( tokens: MLXArray ( promptTokens) )
737
714
}
738
-
739
- // image_processing_qwen2_vl.preprocess
740
- let images = try input. images. map {
715
+ // Input with images
716
+ let pixelsAndFrames = try input. images. map {
741
717
try preprocess ( images: [ $0. asCIImage ( ) ] , processing: input. processing)
742
718
}
743
- let pixels = concatenated ( images. map { $0. 0 } )
744
- let image = LMInput . ProcessedImage ( pixels: pixels, imageGridThw: images. map { $0. 1 } )
745
-
746
- // processing_qwen2_vl.Qwen2VLProcessor
747
- let prompt = prepare ( prompt: input. prompt, imageTHW: image. imageGridThw)
748
- let promptTokens = try tokenizer. encode ( text: prompt)
719
+ let pixelsConcatenated = concatenated ( pixelsAndFrames. map { $0. 0 } )
720
+ let image = LMInput . ProcessedImage (
721
+ pixels: pixelsConcatenated, frames: pixelsAndFrames. map { $0. 1 } )
722
+ let messages = prepareMessages ( input. prompt. asMessages ( ) )
723
+ var promptTokens = try tokenizer. applyChatTemplate ( messages: messages)
724
+ // Replace single image pad token with correct number for each image
725
+ let mergeLength = config. mergeSize * config. mergeSize
726
+ let imagePlaceholderTokens = try tokenizer. encode (
727
+ text: " <|vision_start|><|image_pad|><|vision_end|> " )
728
+ guard let frames = image. frames else {
729
+ throw Qwen2VLProcessorError . framesIsNil
730
+ }
731
+ let placeholderRanges = promptTokens. ranges ( of: imagePlaceholderTokens)
732
+ guard placeholderRanges. count == frames. count else {
733
+ throw VLMError . processing ( " Number of image placeholders does not match number of frames " )
734
+ }
735
+ let replacementSequences = try frames. map { thw in
736
+ let paddingCount = thw. product / mergeLength
737
+ return try tokenizer. encode (
738
+ text:
739
+ " <|vision_start|> \( Array ( repeating: " <|image_pad|> " , count: paddingCount) . joined ( ) ) <|vision_end|> "
740
+ )
741
+ }
742
+ // Build the final array
743
+ var result : [ Int ] = [ ]
744
+ var currentIndex = promptTokens. startIndex
745
+ for (range, replacement) in zip ( placeholderRanges, replacementSequences) {
746
+ // Add tokens before the placeholder
747
+ result. append ( contentsOf: promptTokens [ currentIndex ..< range. lowerBound] )
748
+ // Add replacement sequence
749
+ result. append ( contentsOf: replacement)
750
+ currentIndex = range. upperBound
751
+ }
752
+ // Add any remaining tokens after the last replacement
753
+ if currentIndex < promptTokens. endIndex {
754
+ result. append ( contentsOf: promptTokens [ currentIndex... ] )
755
+ }
756
+ promptTokens = result
749
757
let promptArray = MLXArray ( promptTokens) . expandedDimensions ( axis: 0 )
750
758
let mask = ones ( like: promptArray) . asType ( . int8)
751
-
752
759
return LMInput ( text: . init( tokens: promptArray, mask: mask) , image: image)
753
760
}
754
-
755
761
}
756
762
757
763
// MARK: - Model
@@ -779,18 +785,18 @@ public class Qwen2VL: Module, VLMModel, KVCacheDimensionProvider {
779
785
self . _languageModel. wrappedValue = Language . LanguageModel ( config. textConfiguration)
780
786
}
781
787
782
- private func inputEmbeddings( inputIds: MLXArray , pixelValues: MLXArray ? , gridThw : [ THW ] ? )
788
+ private func inputEmbeddings( inputIds: MLXArray , pixelValues: MLXArray ? , frames : [ THW ] ? )
783
789
-> MLXArray
784
790
{
785
- guard let pixelValues, let gridThw else {
791
+ guard let pixelValues, let frames else {
786
792
return languageModel. model. embedTokens ( inputIds [ . newAxis, . ellipsis] )
787
793
}
788
794
789
795
// Get the input embeddings from the language model
790
796
let inputEmbeds = languageModel. model. embedTokens ( inputIds)
791
797
792
798
// Get the ouptut hidden states from the vision model
793
- var hiddenStates = self . visionModel ( pixelValues, gridThw : gridThw )
799
+ var hiddenStates = self . visionModel ( pixelValues, frames : frames )
794
800
795
801
if hiddenStates. ndim == 2 {
796
802
hiddenStates = hiddenStates [ . newAxis, 0 ... , 0 ... ]
@@ -820,13 +826,13 @@ public class Qwen2VL: Module, VLMModel, KVCacheDimensionProvider {
820
826
public func prepare( _ input: LMInput , cache: [ any KVCache ] , windowSize: Int ? ) throws
821
827
-> PrepareResult
822
828
{
823
- let gridThw = input. image? . imageGridThw
829
+ let frames = input. image? . frames
824
830
825
831
let dtype = visionModel. patchEmbed. proj. weight. dtype
826
832
let pixels = input. image? . pixels. asType ( dtype)
827
833
828
834
let inputEmbeddings = self . inputEmbeddings (
829
- inputIds: input. text. tokens, pixelValues: pixels, gridThw : gridThw )
835
+ inputIds: input. text. tokens, pixelValues: pixels, frames : frames )
830
836
831
837
let result = languageModel ( nil , cache: cache, inputEmbedding: inputEmbeddings)
832
838
0 commit comments