@@ -686,69 +686,65 @@ public class Qwen2VLProcessor: UserInputProcessor {
686
686
return ( flattenedPatches, . init( gridT, gridH, gridW) )
687
687
}
688
688
689
- public func prepare( prompt: UserInput . Prompt , imageTHW: [ THW ] ? ) -> String {
690
- // the tokenizer does have a chat template and it expects messages
691
- // like this:
692
- //
693
- // [{'role': 'user', 'content': [{'type': 'text', 'text': 'What are these?'},
694
- // {'type': 'image'}, {'type': 'image'}, {'type': 'image'}]}]
695
- //
696
- // The output of the prompt template is fed into
697
- // image_processing_qwen2_vl.preprocess where it is further augmented
698
- // by replacing tokens according to imageTHW.
699
- //
700
- // Neither the structured content nor the postprocessing of the template
701
- // are supported in current Tokenizer/Jinja (swift) so handle that here.
702
-
703
- var messages = prompt. asMessages ( )
704
- if messages [ 0 ] [ " role " ] != " system " {
689
+ private func prepareMessages( _ messages: [ Message ] ) -> [ Message ] {
690
+ var messages = messages
691
+ print ( messages)
692
+ // Add system message if not present
693
+ if let role = messages [ 0 ] [ " role " ] as? String , role != " system " {
705
694
messages. insert ( [ " role " : " system " , " content " : " You are a helpful assistant. " ] , at: 0 )
706
695
}
707
696
708
- let lastIndex = messages. count - 1
709
- var lastMessage = messages [ lastIndex] [ " content " ] ?? " "
710
-
711
- // image_processing_qwen2_vl.preprocess -- inject image_pad tokens for each image
712
- let mergeLength = config. mergeSize * config. mergeSize
713
- for thw in imageTHW ?? [ ] {
714
- lastMessage += " <|vision_start|> "
715
- lastMessage += Array ( repeating: " <|image_pad|> " , count: thw. product / mergeLength)
716
- . joined ( )
717
- lastMessage += " <|vision_end|> "
718
- }
719
-
720
- messages [ lastIndex] [ " content " ] = lastMessage
721
-
722
- return
723
- messages
724
- . map {
725
- " <|im_start|> \( $0 [ " role " ] ?? " user " ) \n \( $0 [ " content " ] ?? " " ) <|im_end|> "
726
- }
727
- . joined ( separator: " \n " )
728
- + " \n <|im_start|>assistant \n "
697
+ return messages
729
698
}
730
699
700
+ // public func prepare(prompt: UserInput.Prompt, imageTHW: [THW]?) throws -> String {
701
+ // let messages = prepareMessages(prompt.asMessages())
702
+ // let tokens = try tokenizer.applyChatTemplate(messages: messages)
703
+ // return tokenizer.decode(tokens: tokens)
704
+ // }
705
+
731
706
public func prepare( input: UserInput ) throws -> LMInput {
707
+ // Text-only input
732
708
if input. images. isEmpty {
733
- // just a straight text prompt
734
- let prompt = prepare ( prompt: input. prompt, imageTHW: nil )
735
- let promptTokens = try tokenizer. encode ( text: prompt)
709
+ let messages = input. prompt. asMessages ( )
710
+ let promptTokens = try tokenizer. applyChatTemplate ( messages: messages)
736
711
return LMInput ( tokens: MLXArray ( promptTokens) )
737
712
}
738
713
739
- // image_processing_qwen2_vl.preprocess
714
+ // Input with images
740
715
let images = try input. images. map {
741
716
try preprocess ( images: [ $0. asCIImage ( ) ] , processing: input. processing)
742
717
}
743
718
let pixels = concatenated ( images. map { $0. 0 } )
744
719
let image = LMInput . ProcessedImage ( pixels: pixels, imageGridThw: images. map { $0. 1 } )
745
720
746
- // processing_qwen2_vl.Qwen2VLProcessor
747
- let prompt = prepare ( prompt: input. prompt, imageTHW: image. imageGridThw)
748
- let promptTokens = try tokenizer. encode ( text: prompt)
721
+ // Get tokens from messages
722
+ let messages = prepareMessages ( input. prompt. asMessages ( ) )
723
+ var promptTokens = try tokenizer. applyChatTemplate ( messages: messages)
724
+
725
+ // Replace single image pad token with correct number for each image
726
+ let imagePadToken = try tokenizer. encode ( text: " <|image_pad|> " ) . first!
727
+ let mergeLength = config. mergeSize * config. mergeSize
728
+
729
+ // TODO: This assumes that there is only one image. A better solution is needed for the case when multiple images are included.
730
+ if let imageGridThw = image. imageGridThw {
731
+ for thw in imageGridThw {
732
+ if let padIndex = promptTokens. firstIndex ( of: imagePadToken) {
733
+ let paddingCount = thw. product / mergeLength
734
+ promptTokens. replaceSubrange (
735
+ padIndex ... ( padIndex) ,
736
+ with: Array ( repeating: imagePadToken, count: paddingCount)
737
+ )
738
+ }
739
+ }
740
+ }
741
+
742
+ // TODO: For debugging. Remove later.
743
+ let promptTokensDecoded = try tokenizer. decode ( tokens: promptTokens)
744
+ print ( promptTokensDecoded)
745
+
749
746
let promptArray = MLXArray ( promptTokens) . expandedDimensions ( axis: 0 )
750
747
let mask = ones ( like: promptArray) . asType ( . int8)
751
-
752
748
return LMInput ( text: . init( tokens: promptArray, mask: mask) , image: image)
753
749
}
754
750
0 commit comments