@@ -698,7 +698,7 @@ from inference.load_model import load_model,to_cuda,to_float,format_input
698
698
699
699
# configure input
700
700
model_path= " hicfoundation_model/hicfoundation_pretrain.pth.tar" # specify the path of pre-trained model on your directory
701
- input_row_size= 4000 # specify the input matrix row size, should be a multiply of 16 (patch_size of HiCFoundation)
701
+ input_row_size= 1000 # specify the input matrix row size, should be a multiply of 16 (patch_size of HiCFoundation)
702
702
input_col_size= 128 # specify the input matrix column size, should be a multiply of 16 (patch_size of HiCFoundation)
703
703
total_count = 100000000 # the total read of your HiC matrix.
704
704
embed_depth= 0 # Specified the embedding to use for your purpose, default: 0 (encoder output embeddings).
@@ -723,11 +723,16 @@ total_count=to_float(total_count)
723
723
model= to_float(model)
724
724
725
725
# inference of HiCFoundation
726
+ input_mat = input_mat.unsqueeze(0 ) # add batch dimension
727
+
728
+ total_count = total_count.unsqueeze(0 ) if total_count is not None else None # add batch dimension
729
+
726
730
output = model(input_mat,total_count)
727
731
output = output[embed_depth] # fetch the interested embedding
728
- # output shape (1,input_row_size/16,input_col_size/16, embedding_dim)
732
+ output = output.squeeze(0 ) # remove batch dimension
733
+ # output shape (input_row_size/16,input_col_size/16, embedding_dim)
729
734
# you can get any interested patch embedding in this tensor
730
- mat_embedding = output[ 0 ] .reshape(- 1 ,output.shape[- 1 ]).mean(dim = 0 ) # (embedding_dim), The embedding dim of encoder is 1024, of decoder is 512.
735
+ mat_embedding = output.reshape(- 1 ,output.shape[- 1 ]).mean(dim = 0 ) # (embedding_dim), The embedding dim of encoder is 1024, of decoder is 512.
731
736
732
737
```
733
738
0 commit comments