@@ -948,6 +948,63 @@ TEST_F(QnnHTPBackendTests, Float32ModelWithFP16PrecisionTest) {
948
948
0 .008f );
949
949
}
950
950
951
+ // Test that QNN EP only handles nodes with static shapes and rejects nodes with dynamic shape I/O.
952
+ TEST_F (QnnHTPBackendTests, EPRejectsDynamicShapesF32) {
953
+ // Local function that builds a model in which the last two nodes use dynamic shapes.
954
+ auto model_build_fn = [](ModelTestBuilder& builder) {
955
+ NodeArg* input1 = builder.MakeInput <float >(std::vector<int64_t >{1 , 2 , 8 , 8 },
956
+ GetFloatDataInRange (0 .0f , 1 .0f , 128 ));
957
+ NodeArg* input2 = builder.MakeInput <int64_t >(std::vector<int64_t >{3 }, std::vector<int64_t >{1 , 2 , 49 });
958
+
959
+ // Add a Conv with known shapes. QNN EP should support it.
960
+ NodeArg* weight = builder.MakeInitializer <float >(std::vector<int64_t >{2 , 2 , 2 , 2 },
961
+ GetFloatDataInRange (-0 .3f , 0 .3f , 16 ));
962
+ NodeArg* bias = builder.MakeInitializer <float >(std::vector<int64_t >{2 }, {0 .0f , 1 .0f });
963
+
964
+ auto * conv_output = builder.MakeIntermediate ();
965
+ builder.AddNode (" Conv" , {input1, weight, bias}, {conv_output});
966
+
967
+ // Add a Reshape to a dynamic shape. QNN EP should reject this node.
968
+ auto * reshape_output = builder.MakeIntermediate ();
969
+ builder.AddNode (" Reshape" , {conv_output, input2}, {reshape_output});
970
+
971
+ // Add a Softmax. QNN EP should reject this node because its input has a dynamic shape.
972
+ NodeArg* output = builder.MakeOutput ();
973
+ builder.AddNode (" Softmax" , {reshape_output}, {output});
974
+ };
975
+
976
+ // Local function that checks that the nodes with dynamic shape I/O were assigned to CPU EP.
977
+ std::function<void (const Graph&)> ep_graph_checker = [](const Graph& graph) {
978
+ for (const Node& node : graph.Nodes ()) {
979
+ const std::string& ep_name = node.GetExecutionProviderType ();
980
+ const std::string& op_type = node.OpType ();
981
+ if (op_type == " Reshape" || op_type == " Softmax" ) {
982
+ EXPECT_EQ (ep_name, kCpuExecutionProvider );
983
+ } else {
984
+ EXPECT_EQ (ep_name, kQnnExecutionProvider );
985
+ }
986
+ }
987
+ };
988
+
989
+ ProviderOptions provider_options;
990
+ #if defined(_WIN32)
991
+ provider_options[" backend_path" ] = " QnnHtp.dll" ;
992
+ #else
993
+ provider_options[" backend_path" ] = " libQnnHtp.so" ;
994
+ #endif
995
+ provider_options[" enable_htp_fp16_precision" ] = " 1" ; // QNN EP will use fp16 precision.
996
+ // CPU EP will use fp32, so we can relax accuracy requirements.
997
+
998
+ RunQnnModelTest (model_build_fn,
999
+ provider_options,
1000
+ /* opset*/ 19 ,
1001
+ ExpectedEPNodeAssignment::Some,
1002
+ /* abs_err*/ 1e-4f ,
1003
+ logging::Severity::kERROR ,
1004
+ /* verify_output*/ true ,
1005
+ &ep_graph_checker);
1006
+ }
1007
+
951
1008
#endif // defined(__aarch64__) || defined(_M_ARM64) || defined(__linux__)
952
1009
#endif // !defined(ORT_MINIMAL_BUILD)
953
1010
0 commit comments