|
17 | 17 | import json
|
18 | 18 | import os
|
19 | 19 | from datetime import datetime
|
| 20 | +from dataclasses import dataclass |
20 | 21 | from typing import Any, Dict, List, Literal, Optional
|
21 | 22 |
|
22 | 23 | from fastdeploy import envs
|
@@ -467,7 +468,63 @@ def print(self):
|
467 | 468 | llm_logger.info("Parallel Configuration Information :")
|
468 | 469 | for k, v in self.__dict__.items():
|
469 | 470 | llm_logger.info("{:<20}:{:<6}{}".format(k, "", v))
|
470 |
| - llm_logger.info("==================") |
| 471 | + llm_logger.info( |
| 472 | + "=============================================================") |
| 473 | + |
| 474 | + |
| 475 | +@dataclass |
| 476 | +class CommitConfig: |
| 477 | + """ |
| 478 | + Configuration for tracking version information from version.txt |
| 479 | +
|
| 480 | + Attributes: |
| 481 | + fastdeploy_commit: Full FastDeploy git commit hash |
| 482 | + paddle_version: PaddlePaddle version string |
| 483 | + paddle_commit: PaddlePaddle git commit hash |
| 484 | + cuda_version: CUDA version string |
| 485 | + compiler_version: CXX compiler version string |
| 486 | + """ |
| 487 | + fastdeploy_commit: str = "" |
| 488 | + paddle_version: str = "" |
| 489 | + paddle_commit: str = "" |
| 490 | + cuda_version: str = "" |
| 491 | + compiler_version: str = "" |
| 492 | + |
| 493 | + def __post_init__(self): |
| 494 | + """Automatically load version info when initialized""" |
| 495 | + self._load_from_version_file() |
| 496 | + |
| 497 | + def _load_from_version_file(self, file_path: str = "fastdeploy/version.txt"): |
| 498 | + """Internal method to load version info from file""" |
| 499 | + try: |
| 500 | + with open(file_path, 'r') as f: |
| 501 | + for line in f: |
| 502 | + line = line.strip() |
| 503 | + if line.startswith("fastdeploy GIT COMMIT ID:"): |
| 504 | + self.fastdeploy_commit = line.split(":")[1].strip() |
| 505 | + elif line.startswith("Paddle version:"): |
| 506 | + self.paddle_version = line.split(":")[1].strip() |
| 507 | + elif line.startswith("Paddle GIT COMMIT ID:"): |
| 508 | + self.paddle_commit = line.split(":")[1].strip() |
| 509 | + elif line.startswith("CUDA version:"): |
| 510 | + self.cuda_version = line.split(":")[1].strip() |
| 511 | + elif line.startswith("CXX compiler version:"): |
| 512 | + self.compiler_version = line.split(":")[1].strip() |
| 513 | + except FileNotFoundError: |
| 514 | + llm_logger.info(f"Warning: Version file not found at {file_path}") |
| 515 | + except Exception as e: |
| 516 | + llm_logger.info(f"Warning: Could not read version file - {str(e)}") |
| 517 | + |
| 518 | + def print(self): |
| 519 | + """ |
| 520 | + print all config |
| 521 | +
|
| 522 | + """ |
| 523 | + llm_logger.info("Fasedeploy Commit Information :") |
| 524 | + for k, v in self.__dict__.items(): |
| 525 | + llm_logger.info("{:<20}:{:<6}{}".format(k, "", v)) |
| 526 | + llm_logger.info( |
| 527 | + "=============================================================") |
471 | 528 |
|
472 | 529 |
|
473 | 530 | class Config:
|
@@ -502,6 +559,7 @@ def __init__(
|
502 | 559 | cache_config: CacheConfig,
|
503 | 560 | scheduler_config: SchedulerConfig,
|
504 | 561 | parallel_config: ParallelConfig,
|
| 562 | + commit_config: CommitConfig = CommitConfig(), |
505 | 563 | model_name_or_path: str = None,
|
506 | 564 | tokenizer: str = None,
|
507 | 565 | tensor_parallel_size: int = 8,
|
@@ -559,6 +617,7 @@ def __init__(
|
559 | 617 | self.cache_config = cache_config
|
560 | 618 | self.scheduler_config = scheduler_config
|
561 | 619 | self.parallel_config = parallel_config
|
| 620 | + self.commit_config = commit_config |
562 | 621 | self.model_name_or_path = model_name_or_path
|
563 | 622 | self.tokenizer = tokenizer
|
564 | 623 | self.max_num_batched_tokens = max_num_batched_tokens
|
@@ -756,7 +815,11 @@ def print(self, file=None):
|
756 | 815 | if k == "generation_config" and v is not None:
|
757 | 816 | for gck, gcv in v.to_dict().items():
|
758 | 817 | llm_logger.info("{:<20}:{:<6}{}".format(gck, "", gcv))
|
759 |
| - elif k == "cache_config" or k == "model_config" or k == "scheduler_config" or k == "parallel_config": |
| 818 | + elif (k == "cache_config" or |
| 819 | + k == "model_config" or |
| 820 | + k == "scheduler_config" or |
| 821 | + k == "parallel_config" or |
| 822 | + k == "commit_config"): |
760 | 823 | v.print()
|
761 | 824 | else:
|
762 | 825 | llm_logger.info("{:<20}:{:<6}{}".format(k, "", v))
|
|
0 commit comments