1
+ # coding=utf-8
2
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ MiniCPM model configuration"""
21
+
22
+ from ...configuration_utils import PretrainedConfig
23
+ from mindnlp .utils import logging
24
+
25
+
26
+ logger = logging .get_logger (__name__ )
27
+
28
+ MINICPM3_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
29
+
30
+
31
+ class MiniCPM3Config (PretrainedConfig ):
32
+ r"""
33
+ This is the configuration class to store the configuration of a [`MiniCPMModel`]. It is used to instantiate an MiniCPM
34
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
35
+ defaults will yield a similar configuration to that of the MiniCPM-7B.
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+ Args:
39
+ vocab_size (`int`, *optional*, defaults to 32000):
40
+ Vocabulary size of the MiniCPM model. Defines the number of different tokens that can be represented by the
41
+ `inputs_ids` passed when calling [`MiniCPMModel`]
42
+ hidden_size (`int`, *optional*, defaults to 4096):
43
+ Dimension of the hidden representations.
44
+ intermediate_size (`int`, *optional*, defaults to 11008):
45
+ Dimension of the MLP representations.
46
+ num_hidden_layers (`int`, *optional*, defaults to 32):
47
+ Number of hidden layers in the Transformer decoder.
48
+ num_attention_heads (`int`, *optional*, defaults to 32):
49
+ Number of attention heads for each attention layer in the Transformer decoder.
50
+ num_key_value_heads (`int`, *optional*):
51
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
52
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
53
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
54
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
55
+ by meanpooling all the original heads within that group. For more details checkout [this
56
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
57
+ `num_attention_heads`.
58
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
59
+ The non-linear activation function (function or string) in the decoder.
60
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
61
+ The maximum sequence length that this model might ever be used with. MiniCPM 1 supports up to 2048 tokens,
62
+ MiniCPM 2 up to 4096, CodeMiniCPM up to 16384.
63
+ initializer_range (`float`, *optional*, defaults to 0.02):
64
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
65
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
66
+ The epsilon used by the rms normalization layers.
67
+ use_cache (`bool`, *optional*, defaults to `True`):
68
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
69
+ relevant if `config.is_decoder=True`.
70
+ pad_token_id (`int`, *optional*):
71
+ Padding token id.
72
+ bos_token_id (`int`, *optional*, defaults to 1):
73
+ Beginning of stream token id.
74
+ eos_token_id (`int`, *optional*, defaults to 2):
75
+ End of stream token id.
76
+ pretraining_tp (`int`, *optional*, defaults to 1):
77
+ Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
78
+ document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
79
+ necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
80
+ issue](https://github.com/pytorch/pytorch/issues/76232).
81
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
82
+ Whether to tie weight embeddings
83
+ rope_theta (`float`, *optional*, defaults to 10000.0):
84
+ The base period of the RoPE embeddings.
85
+ rope_scaling (`Dict`, *optional*):
86
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
87
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
88
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
89
+ `max_position_embeddings` to the expected new maximum. See the following thread for more information on how
90
+ these scaling strategies behave:
91
+ https://www.reddit.com/r/LocalMiniCPM/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
92
+ experimental feature, subject to breaking API changes in future versions.
93
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
94
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
95
+ attention_dropout (`float`, *optional*, defaults to 0.0):
96
+ The dropout ratio for the attention probabilities.
97
+ ```python
98
+ >>> from transformers import MiniCPMModel, MiniCPMConfig
99
+ >>> # Initializing a MiniCPM minicpm-7b style configuration
100
+ >>> configuration = MiniCPMConfig()
101
+ >>> # Initializing a model from the minicpm-7b style configuration
102
+ >>> model = MiniCPMModel(configuration)
103
+ >>> # Accessing the model configuration
104
+ >>> configuration = model.config
105
+ ```"""
106
+
107
+ model_type = "minicpm3"
108
+ keys_to_ignore_at_inference = ["past_key_values" ]
109
+
110
+ def __init__ (
111
+ self ,
112
+ vocab_size = 32000 ,
113
+ hidden_size = 4096 ,
114
+ intermediate_size = 11008 ,
115
+ num_hidden_layers = 32 ,
116
+ num_attention_heads = 32 ,
117
+ num_key_value_heads = None ,
118
+ qk_nope_head_dim = 64 ,
119
+ qk_rope_head_dim = 32 ,
120
+ q_lora_rank = 768 ,
121
+ kv_lora_rank = 256 ,
122
+ v_head_dim = None ,
123
+ head_dim = None ,
124
+ hidden_act = "silu" ,
125
+ max_position_embeddings = 2048 ,
126
+ initializer_range = 0.02 ,
127
+ rms_norm_eps = 1e-6 ,
128
+ use_cache = True ,
129
+ pad_token_id = None ,
130
+ bos_token_id = 1 ,
131
+ eos_token_id = 2 ,
132
+ pretraining_tp = 1 ,
133
+ tie_word_embeddings = True ,
134
+ rope_theta = 10000.0 ,
135
+ rope_scaling = None ,
136
+ attention_bias = False ,
137
+ attention_dropout = 0.0 ,
138
+ scale_emb = 1 ,
139
+ dim_model_base = 1 ,
140
+ scale_depth = 1 ,
141
+ ** kwargs ,
142
+ ):
143
+ self .vocab_size = vocab_size
144
+ self .max_position_embeddings = max_position_embeddings
145
+ self .hidden_size = hidden_size
146
+ self .intermediate_size = intermediate_size
147
+ self .num_hidden_layers = num_hidden_layers
148
+ self .num_attention_heads = num_attention_heads
149
+ self .qk_nope_head_dim = qk_nope_head_dim
150
+ self .qk_rope_head_dim = qk_rope_head_dim
151
+ self .q_lora_rank = q_lora_rank
152
+ self .kv_lora_rank = kv_lora_rank
153
+
154
+ if v_head_dim is None :
155
+ v_head_dim = qk_nope_head_dim
156
+ self .v_head_dim = v_head_dim
157
+
158
+ # for backward compatibility
159
+ if num_key_value_heads is None :
160
+ num_key_value_heads = num_attention_heads
161
+
162
+ self .num_key_value_heads = num_key_value_heads
163
+ self .hidden_act = hidden_act
164
+ self .initializer_range = initializer_range
165
+ self .rms_norm_eps = rms_norm_eps
166
+ self .pretraining_tp = pretraining_tp
167
+ self .use_cache = use_cache
168
+ self .rope_theta = rope_theta
169
+ self .rope_scaling = rope_scaling
170
+ self .attention_bias = attention_bias
171
+ self .attention_dropout = attention_dropout
172
+ self .scale_emb = scale_emb
173
+ self .dim_model_base = dim_model_base
174
+ self .scale_depth = scale_depth
175
+ self .head_dim = self .qk_nope_head_dim + self .qk_rope_head_dim
176
+
177
+ super ().__init__ (
178
+ pad_token_id = pad_token_id ,
179
+ bos_token_id = bos_token_id ,
180
+ eos_token_id = eos_token_id ,
181
+ tie_word_embeddings = tie_word_embeddings ,
182
+ ** kwargs ,
183
+ )
184
+
185
+ __all__ = ["MiniCPM3Config" ]
0 commit comments