Skip to content

feat(tracing): align resource attributes with OpenTelemetry semantic conventions #1164

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
May 28, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 11 additions & 4 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 1 addition & 3 deletions dragonfly-client-backend/src/hdfs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ pub const HDFS_SCHEME: &str = "hdfs";
const DEFAULT_NAMENODE_PORT: u16 = 9870;

/// Hdfs is a struct that implements the Backend trait.
#[derive(Default)]
pub struct Hdfs {
/// scheme is the scheme of the HDFS.
scheme: String,
Expand All @@ -39,15 +40,13 @@ pub struct Hdfs {
/// Hdfs implements the Backend trait.
impl Hdfs {
/// new returns a new HDFS backend.
#[instrument(skip_all)]
pub fn new() -> Self {
Self {
scheme: HDFS_SCHEME.to_string(),
}
}

/// operator initializes the operator with the parsed URL and HDFS config.
#[instrument(skip_all)]
pub fn operator(
&self,
url: Url,
Expand Down Expand Up @@ -84,7 +83,6 @@ impl Hdfs {
#[tonic::async_trait]
impl super::Backend for Hdfs {
/// scheme returns the scheme of the HDFS backend.
#[instrument(skip_all)]
fn scheme(&self) -> String {
self.scheme.clone()
}
Expand Down
3 changes: 0 additions & 3 deletions dragonfly-client-backend/src/http.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@ pub struct HTTP {
/// HTTP implements the http interface.
impl HTTP {
/// new returns a new HTTP.
#[instrument(skip_all)]
pub fn new(scheme: &str) -> Result<HTTP> {
// Default TLS client config with no validation.
let client_config_builder = rustls::ClientConfig::builder()
Expand Down Expand Up @@ -75,7 +74,6 @@ impl HTTP {
}

/// client returns a new reqwest client.
#[instrument(skip_all)]
fn client(
&self,
client_cert: Option<Vec<CertificateDer<'static>>>,
Expand Down Expand Up @@ -117,7 +115,6 @@ impl HTTP {
#[tonic::async_trait]
impl super::Backend for HTTP {
/// scheme returns the scheme of the HTTP backend.
#[instrument(skip_all)]
fn scheme(&self) -> String {
self.scheme.clone()
}
Expand Down
7 changes: 1 addition & 6 deletions dragonfly-client-backend/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ use std::str::FromStr;
use std::{collections::HashMap, pin::Pin, time::Duration};
use std::{fmt::Debug, fs};
use tokio::io::{AsyncRead, AsyncReadExt};
use tracing::{error, info, instrument, warn};
use tracing::{error, info, warn};
use url::Url;

pub mod hdfs;
Expand Down Expand Up @@ -227,7 +227,6 @@ pub struct BackendFactory {
/// https://github.com/dragonflyoss/client/tree/main/dragonfly-client-backend/examples/plugin/.
impl BackendFactory {
/// new returns a new BackendFactory.
#[instrument(skip_all)]
pub fn new(plugin_dir: Option<&Path>) -> Result<Self> {
let mut backend_factory = Self::default();
backend_factory.load_builtin_backends()?;
Expand All @@ -243,13 +242,11 @@ impl BackendFactory {
}

/// supported_download_directory returns whether the scheme supports directory download.
#[instrument(skip_all)]
pub fn supported_download_directory(scheme: &str) -> bool {
object_storage::Scheme::from_str(scheme).is_ok() || scheme == hdfs::HDFS_SCHEME
}

/// build returns the backend by the scheme of the url.
#[instrument(skip_all)]
pub fn build(&self, url: &str) -> Result<&(dyn Backend + Send + Sync)> {
let url = Url::parse(url).or_err(ErrorType::ParseError)?;
let scheme = url.scheme();
Expand All @@ -260,7 +257,6 @@ impl BackendFactory {
}

/// load_builtin_backends loads the builtin backends.
#[instrument(skip_all)]
fn load_builtin_backends(&mut self) -> Result<()> {
self.backends.insert(
"http".to_string(),
Expand Down Expand Up @@ -330,7 +326,6 @@ impl BackendFactory {
}

/// load_plugin_backends loads the plugin backends.
#[instrument(skip_all)]
fn load_plugin_backends(&mut self, plugin_dir: &Path) -> Result<()> {
let backend_plugin_dir = plugin_dir.join(NAME);
if !backend_plugin_dir.exists() {
Expand Down
8 changes: 0 additions & 8 deletions dragonfly-client-backend/src/object_storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,6 @@ pub struct ObjectStorage {
/// ObjectStorage implements the ObjectStorage trait.
impl ObjectStorage {
/// Returns ObjectStorage that implements the Backend trait.
#[instrument(skip_all)]
pub fn new(scheme: Scheme) -> ClientResult<ObjectStorage> {
// Initialize the reqwest client.
let client = reqwest::Client::builder()
Expand All @@ -196,7 +195,6 @@ impl ObjectStorage {
}

/// operator initializes the operator with the parsed URL and object storage.
#[instrument(skip_all)]
pub fn operator(
&self,
parsed_url: &super::object_storage::ParsedURL,
Expand All @@ -223,7 +221,6 @@ impl ObjectStorage {
}

/// s3_operator initializes the S3 operator with the parsed URL and object storage.
#[instrument(skip_all)]
pub fn s3_operator(
&self,
parsed_url: &super::object_storage::ParsedURL,
Expand Down Expand Up @@ -276,7 +273,6 @@ impl ObjectStorage {
}

/// gcs_operator initializes the GCS operator with the parsed URL and object storage.
#[instrument(skip_all)]
pub fn gcs_operator(
&self,
parsed_url: &super::object_storage::ParsedURL,
Expand Down Expand Up @@ -311,7 +307,6 @@ impl ObjectStorage {
}

/// abs_operator initializes the ABS operator with the parsed URL and object storage.
#[instrument(skip_all)]
pub fn abs_operator(
&self,
parsed_url: &super::object_storage::ParsedURL,
Expand Down Expand Up @@ -354,7 +349,6 @@ impl ObjectStorage {
}

/// oss_operator initializes the OSS operator with the parsed URL and object storage.
#[instrument(skip_all)]
pub fn oss_operator(
&self,
parsed_url: &super::object_storage::ParsedURL,
Expand Down Expand Up @@ -398,7 +392,6 @@ impl ObjectStorage {
}

/// obs_operator initializes the OBS operator with the parsed URL and object storage.
#[instrument(skip_all)]
pub fn obs_operator(
&self,
parsed_url: &super::object_storage::ParsedURL,
Expand Down Expand Up @@ -487,7 +480,6 @@ impl ObjectStorage {
#[tonic::async_trait]
impl crate::Backend for ObjectStorage {
/// scheme returns the scheme of the object storage.
#[instrument(skip_all)]
fn scheme(&self) -> String {
self.scheme.to_string()
}
Expand Down
1 change: 1 addition & 0 deletions dragonfly-client-init/src/bin/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@ async fn main() -> Result<(), anyhow::Error> {
args.log_max_files,
None,
None,
false,
args.log_to_stdout,
);

Expand Down
13 changes: 2 additions & 11 deletions dragonfly-client-storage/src/content.rs
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,6 @@ pub struct WritePersistentCacheTaskResponse {
/// Content implements the content storage.
impl Content {
/// new returns a new content.
#[instrument(skip_all)]
pub async fn new(config: Arc<Config>, dir: &Path) -> Result<Content> {
let dir = dir.join(DEFAULT_CONTENT_DIR);

Expand All @@ -85,21 +84,18 @@ impl Content {
}

/// available_space returns the available space of the disk.
#[instrument(skip_all)]
pub fn available_space(&self) -> Result<u64> {
let stat = fs2::statvfs(&self.dir)?;
Ok(stat.available_space())
}

/// total_space returns the total space of the disk.
#[instrument(skip_all)]
pub fn total_space(&self) -> Result<u64> {
let stat = fs2::statvfs(&self.dir)?;
Ok(stat.total_space())
}

/// has_enough_space checks if the storage has enough space to store the content.
#[instrument(skip_all)]
pub fn has_enough_space(&self, content_length: u64) -> Result<bool> {
let available_space = self.available_space()?;
if available_space < content_length {
Expand All @@ -115,7 +111,6 @@ impl Content {
}

/// is_same_dev_inode checks if the source and target are the same device and inode.
#[instrument(skip_all)]
async fn is_same_dev_inode<P: AsRef<Path>, Q: AsRef<Path>>(
&self,
source: P,
Expand All @@ -141,7 +136,6 @@ impl Content {
}

/// is_same_dev_inode_as_task checks if the task and target are the same device and inode.
#[instrument(skip_all)]
pub async fn is_same_dev_inode_as_task(&self, task_id: &str, to: &Path) -> Result<bool> {
let task_path = self.get_task_path(task_id);
self.is_same_dev_inode(&task_path, to).await
Expand All @@ -152,6 +146,7 @@ impl Content {
/// Behavior of `create_task`:
/// 1. If the task already exists, return the task path.
/// 2. If the task does not exist, create the task directory and file.
#[instrument(skip_all)]
pub async fn create_task(&self, task_id: &str, length: u64) -> Result<PathBuf> {
let task_path = self.get_task_path(task_id);
if task_path.exists() {
Expand Down Expand Up @@ -244,7 +239,6 @@ impl Content {
}

/// delete_task deletes the task content.
#[instrument(skip_all)]
pub async fn delete_task(&self, task_id: &str) -> Result<()> {
info!("delete task content: {}", task_id);
let task_path = self.get_task_path(task_id);
Expand Down Expand Up @@ -378,7 +372,6 @@ impl Content {
}

/// get_task_path returns the task path by task id.
#[instrument(skip_all)]
fn get_task_path(&self, task_id: &str) -> PathBuf {
// The task needs split by the first 3 characters of task id(sha256) to
// avoid too many files in one directory.
Expand All @@ -388,7 +381,6 @@ impl Content {

/// is_same_dev_inode_as_persistent_cache_task checks if the persistent cache task and target
/// are the same device and inode.
#[instrument(skip_all)]
pub async fn is_same_dev_inode_as_persistent_cache_task(
&self,
task_id: &str,
Expand All @@ -403,6 +395,7 @@ impl Content {
/// Behavior of `create_persistent_cache_task`:
/// 1. If the persistent cache task already exists, return the persistent cache task path.
/// 2. If the persistent cache task does not exist, create the persistent cache task directory and file.
#[instrument(skip_all)]
pub async fn create_persistent_cache_task(
&self,
task_id: &str,
Expand Down Expand Up @@ -593,7 +586,6 @@ impl Content {
}

/// delete_task deletes the persistent cache task content.
#[instrument(skip_all)]
pub async fn delete_persistent_cache_task(&self, task_id: &str) -> Result<()> {
info!("delete persistent cache task content: {}", task_id);
let persistent_cache_task_path = self.get_persistent_cache_task_path(task_id);
Expand All @@ -606,7 +598,6 @@ impl Content {
}

/// get_persistent_cache_task_path returns the persistent cache task path by task id.
#[instrument(skip_all)]
fn get_persistent_cache_task_path(&self, task_id: &str) -> PathBuf {
// The persistent cache task needs split by the first 3 characters of task id(sha256) to
// avoid too many files in one directory.
Expand Down
11 changes: 1 addition & 10 deletions dragonfly-client-storage/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,6 @@ pub struct Storage {
/// Storage implements the storage.
impl Storage {
/// new returns a new storage.
#[instrument(skip_all)]
pub async fn new(config: Arc<Config>, dir: &Path, log_dir: PathBuf) -> Result<Self> {
let metadata = metadata::Metadata::new(config.clone(), dir, &log_dir)?;
let content = content::Content::new(config.clone(), dir).await?;
Expand All @@ -71,19 +70,16 @@ impl Storage {
}

/// total_space returns the total space of the disk.
#[instrument(skip_all)]
pub fn total_space(&self) -> Result<u64> {
self.content.total_space()
}

/// available_space returns the available space of the disk.
#[instrument(skip_all)]
pub fn available_space(&self) -> Result<u64> {
self.content.available_space()
}

/// has_enough_space checks if the storage has enough space to store the content.
#[instrument(skip_all)]
pub fn has_enough_space(&self, content_length: u64) -> Result<bool> {
self.content.has_enough_space(content_length)
}
Expand All @@ -102,14 +98,12 @@ impl Storage {

/// is_same_dev_inode_as_task checks if the task content is on the same device inode as the
/// destination.
#[instrument(skip_all)]
pub async fn is_same_dev_inode_as_task(&self, id: &str, to: &Path) -> Result<bool> {
self.content.is_same_dev_inode_as_task(id, to).await
}

/// prepare_download_task_started prepares the metadata of the task when the task downloads
/// started.
#[instrument(skip_all)]
pub async fn prepare_download_task_started(&self, id: &str) -> Result<metadata::Task> {
self.metadata.download_task_started(id, None, None, None)
}
Expand Down Expand Up @@ -227,7 +221,6 @@ impl Storage {

/// is_same_dev_inode_as_persistent_cache_task checks if the persistent cache task content is on the same device inode as the
/// destination.
#[instrument(skip_all)]
pub async fn is_same_dev_inode_as_persistent_cache_task(
&self,
id: &str,
Expand Down Expand Up @@ -633,7 +626,6 @@ impl Storage {
}

/// get_piece returns the piece metadata.
#[instrument(skip_all)]
pub fn get_piece(&self, piece_id: &str) -> Result<Option<metadata::Piece>> {
self.metadata.get_piece(piece_id)
}
Expand All @@ -645,13 +637,13 @@ impl Storage {
}

/// get_pieces returns the piece metadatas.
#[instrument(skip_all)]
pub fn get_pieces(&self, task_id: &str) -> Result<Vec<metadata::Piece>> {
self.metadata.get_pieces(task_id)
}

/// piece_id returns the piece id.
#[inline]
#[instrument(skip_all)]
pub fn piece_id(&self, task_id: &str, number: u32) -> String {
self.metadata.piece_id(task_id, number)
}
Expand Down Expand Up @@ -789,7 +781,6 @@ impl Storage {

/// persistent_cache_piece_id returns the persistent cache piece id.
#[inline]
#[instrument(skip_all)]
pub fn persistent_cache_piece_id(&self, task_id: &str, number: u32) -> String {
self.metadata.piece_id(task_id, number)
}
Expand Down
Loading
Loading