Skip to content

Commit 84cf91c

Browse files
committed
fix pylint error
1 parent b1d710c commit 84cf91c

File tree

5 files changed

+17
-19
lines changed

5 files changed

+17
-19
lines changed

mindnlp/transformers/models/minicpm3/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,4 +23,4 @@
2323
__all__ = []
2424
__all__.extend(modeling_minicpm3.__all__)
2525
__all__.extend(configuration_minicpm3.__all__)
26-
__all__.extend(tokenization_minicpm3.__all__)
26+
__all__.extend(tokenization_minicpm3.__all__)

mindnlp/transformers/models/minicpm3/configuration_minicpm3.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,8 @@
1919
# limitations under the License.
2020
""" MiniCPM model configuration"""
2121

22-
from ...configuration_utils import PretrainedConfig
2322
from mindnlp.utils import logging
23+
from ...configuration_utils import PretrainedConfig
2424

2525

2626
logger = logging.get_logger(__name__)
@@ -150,11 +150,11 @@ def __init__(
150150
self.qk_rope_head_dim = qk_rope_head_dim
151151
self.q_lora_rank = q_lora_rank
152152
self.kv_lora_rank = kv_lora_rank
153-
153+
154154
if v_head_dim is None:
155155
v_head_dim = qk_nope_head_dim
156156
self.v_head_dim = v_head_dim
157-
157+
158158
# for backward compatibility
159159
if num_key_value_heads is None:
160160
num_key_value_heads = num_attention_heads
@@ -182,4 +182,4 @@ def __init__(
182182
**kwargs,
183183
)
184184

185-
__all__ = ["MiniCPM3Config"]
185+
__all__ = ["MiniCPM3Config"]

mindnlp/transformers/models/minicpm3/modeling_minicpm3.py

Lines changed: 10 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -21,12 +21,12 @@
2121
import math
2222
import warnings
2323
from typing import List, Optional, Tuple, Union, Dict
24-
import numpy as np
2524

2625
import mindspore
27-
from mindspore import Tensor
2826
from mindspore.common.initializer import initializer, Normal
2927

28+
from mindnlp.utils import logging
29+
3030
from mindnlp.core import nn, ops
3131
from mindnlp.core.nn import functional as F
3232
from mindnlp.core.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
@@ -41,9 +41,8 @@
4141
from ...modeling_utils import PreTrainedModel
4242
from ...ms_utils import ALL_LAYERNORM_LAYERS
4343

44-
from mindnlp.utils import logging
4544
from .configuration_minicpm3 import MiniCPM3Config
46-
import re
45+
4746

4847

4948
logger = logging.get_logger(__name__)
@@ -205,7 +204,7 @@ def _set_cos_sin_cache(self, seq_len, dtype):
205204
ext_factors = mindspore.Tensor(self.long_factor, dtype=mindspore.float32)
206205
else:
207206
ext_factors = mindspore.Tensor(self.short_factor, dtype=mindspore.float32)
208-
207+
209208
freqs = ops.mul(
210209
ops.outer(t, 1.0 / ext_factors),
211210
self.inv_freq.to(dtype)
@@ -568,7 +567,7 @@ def forward(
568567
use_cache=use_cache,
569568
**kwargs,
570569
)
571-
570+
572571
hidden_states = residual + hidden_states * (self.scale_depth / math.sqrt(self.num_hidden_layers))
573572

574573
# Fully Connected
@@ -766,7 +765,7 @@ def forward(
766765
all_self_attns += (layer_outputs[1],)
767766

768767
hidden_states = self.norm(hidden_states)
769-
768+
770769
# add hidden states from the last decoder layer
771770
if output_hidden_states:
772771
all_hidden_states += (hidden_states,)
@@ -884,7 +883,6 @@ def forward(
884883
shift_logits = shift_logits.view(-1, self.config.vocab_size)
885884
shift_labels = shift_labels.view(-1)
886885
# Enable model parallelism
887-
shift_labels = shift_labels
888886
loss = loss_fct(shift_logits, shift_labels)
889887

890888
if not return_dict:
@@ -963,7 +961,7 @@ def _reorder_cache(past_key_values, beam_idx):
963961
tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),
964962
)
965963
return reordered_past
966-
964+
967965
def chat(self, tokenizer, query: str, history: List[Dict] = None, role: str = "user",
968966
max_length: int = 4096, num_beams=1, do_sample=True, top_p=0.8, temperature=0.3, logits_processor=None,
969967
**kwargs):
@@ -975,7 +973,7 @@ def chat(self, tokenizer, query: str, history: List[Dict] = None, role: str = "u
975973
else:
976974
gen_kwargs = {"max_length": max_length, "num_beams": num_beams, "do_sample": do_sample, "top_p": top_p,
977975
"temperature": temperature, "logits_processor": logits_processor, **kwargs}
978-
976+
979977
history.append({"role": role, "content": query})
980978
history_str = tokenizer.apply_chat_template(history, tokenize=False, add_generation_prompt=True)
981979
inputs = tokenizer(history_str, return_tensors='ms')
@@ -1057,7 +1055,6 @@ def forward(
10571055

10581056
loss = None
10591057
if labels is not None:
1060-
labels = labels
10611058
if self.config.problem_type is None:
10621059
if self.num_labels == 1:
10631060
self.config.problem_type = "regression"
@@ -1089,10 +1086,10 @@ def forward(
10891086
hidden_states=transformer_outputs.hidden_states,
10901087
attentions=transformer_outputs.attentions,
10911088
)
1092-
1089+
10931090
__all__ = [
10941091
"MiniCPM3Model",
10951092
"MiniCPM3ForCausalLM",
10961093
"MiniCPM3ForSequenceClassification",
10971094
"MiniCPM3PreTrainedModel",
1098-
]
1095+
]

mindnlp/transformers/models/minicpm3/tokenization_minicpm3.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -427,4 +427,4 @@ def resolve_ast_by_type(value):
427427
raise Exception(f"Unsupported AST type: {type(value)}")
428428
return output
429429

430-
__all__ = ["MiniCPM3Tokenizer"]
430+
__all__ = ["MiniCPM3Tokenizer"]

requirements/requirements.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,4 +32,5 @@ bs4
3232
tiktoken
3333
faiss_cpu
3434
phonemizer
35+
datamodel_code_generator
3536
git+https://github.com/lvyufeng/einops

0 commit comments

Comments
 (0)