Skip to content

style: project not formatted with either gofmt or golangci-lint #11

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Dec 26, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 9 additions & 10 deletions sample-app/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ func workflow_example() {

traceloop, err := tlp.NewClient(ctx, tlp.Config{
BaseURL: "api-staging.traceloop.com",
APIKey: os.Getenv("TRACELOOP_API_KEY"),
APIKey: os.Getenv("TRACELOOP_API_KEY"),
})
defer func() { traceloop.Shutdown(ctx) }()

Expand All @@ -24,7 +24,7 @@ func workflow_example() {
return
}

request, err := traceloop.GetOpenAIChatCompletionRequest("example-prompt", map[string]interface{}{ "date": time.Now().Format("01/02") })
request, err := traceloop.GetOpenAIChatCompletionRequest("example-prompt", map[string]interface{}{"date": time.Now().Format("01/02")})
if err != nil {
fmt.Printf("GetOpenAIChatCompletionRequest error: %v\n", err)
return
Expand All @@ -40,11 +40,11 @@ func workflow_example() {
}

llmSpan, err := traceloop.LogPrompt(
ctx,
ctx,
tlp.Prompt{
Vendor: "openai",
Mode: "chat",
Model: request.Model,
Vendor: "openai",
Mode: "chat",
Model: request.Model,
Messages: promptMsgs,
},
tlp.WorkflowAttributes{
Expand Down Expand Up @@ -79,11 +79,10 @@ func workflow_example() {
Model: resp.Model,
Messages: completionMsgs,
}, tlp.Usage{
TotalTokens: resp.Usage.TotalTokens,
CompletionTokens: resp.Usage.CompletionTokens,
PromptTokens: resp.Usage.PromptTokens,
TotalTokens: resp.Usage.TotalTokens,
CompletionTokens: resp.Usage.CompletionTokens,
PromptTokens: resp.Usage.PromptTokens,
})


fmt.Println(resp.Choices[0].Message.Content)
}
24 changes: 12 additions & 12 deletions sample-app/workflow_example.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ func main() {

traceloop, err := tlp.NewClient(ctx, tlp.Config{
BaseURL: "api-staging.traceloop.com",
APIKey: os.Getenv("TRACELOOP_API_KEY"),
APIKey: os.Getenv("TRACELOOP_API_KEY"),
})
defer func() { traceloop.Shutdown(ctx) }()

Expand All @@ -32,7 +32,7 @@ func main() {
factGenTask := wf.NewTask("current_date_fact_generation")
defer factGenTask.End()

request, err := traceloop.GetOpenAIChatCompletionRequest("example-prompt", map[string]interface{}{ "date": time.Now().Format("01/02") })
request, err := traceloop.GetOpenAIChatCompletionRequest("example-prompt", map[string]interface{}{"date": time.Now().Format("01/02")})
if err != nil {
fmt.Printf("GetOpenAIChatCompletionRequest error: %v\n", err)
return
Expand All @@ -49,9 +49,9 @@ func main() {

llmSpan, err := factGenTask.LogPrompt(
tlp.Prompt{
Vendor: "openai",
Mode: "chat",
Model: request.Model,
Vendor: "openai",
Mode: "chat",
Model: request.Model,
Messages: promptMsgs,
},
)
Expand Down Expand Up @@ -83,9 +83,9 @@ func main() {
Model: resp.Model,
Messages: completionMsgs,
}, tlp.Usage{
TotalTokens: resp.Usage.TotalTokens,
CompletionTokens: resp.Usage.CompletionTokens,
PromptTokens: resp.Usage.PromptTokens,
TotalTokens: resp.Usage.TotalTokens,
CompletionTokens: resp.Usage.CompletionTokens,
PromptTokens: resp.Usage.PromptTokens,
})

someOtherTask := wf.NewTask("some_other_task")
Expand All @@ -94,7 +94,7 @@ func main() {
otherPrompt, _ := someOtherTask.LogPrompt(tlp.Prompt{
Vendor: "openai",
Mode: "chat",
Model: request.Model,
Model: request.Model,
Messages: []tlp.Message{
{
Index: 0,
Expand All @@ -108,9 +108,9 @@ func main() {
Model: resp.Model,
Messages: completionMsgs,
}, tlp.Usage{
TotalTokens: resp.Usage.TotalTokens,
CompletionTokens: resp.Usage.CompletionTokens,
PromptTokens: resp.Usage.PromptTokens,
TotalTokens: resp.Usage.TotalTokens,
CompletionTokens: resp.Usage.CompletionTokens,
PromptTokens: resp.Usage.PromptTokens,
})

fmt.Println(resp.Choices[0].Message.Content)
Expand Down
56 changes: 28 additions & 28 deletions semconv-ai/attributes.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,33 +4,33 @@ import "go.opentelemetry.io/otel/attribute"

const (
// LLM
LLMVendor = attribute.Key("llm.vendor")
LLMRequestType = attribute.Key("llm.request.type")
LLMRequestModel = attribute.Key("llm.request.model")
LLMResponseModel = attribute.Key("llm.response.model")
LLMRequestMaxTokens = attribute.Key("llm.request.max_tokens")
LLMUsageTotalTokens = attribute.Key("llm.usage.total_tokens")
LLMUsageCompletionTokens = attribute.Key("llm.usage.completion_tokens")
LLMUsagePromptTokens = attribute.Key("llm.usage.prompt_tokens")
LLMTemperature = attribute.Key("llm.temperature")
LLMUser = attribute.Key("llm.user")
LLMHeaders = attribute.Key("llm.headers")
LLMTopP = attribute.Key("llm.top_p")
LLMTopK = attribute.Key("llm.top_k")
LLMFrequencyPenalty = attribute.Key("llm.frequency_penalty")
LLMPresencePenalty = attribute.Key("llm.presence_penalty")
LLMPrompts = attribute.Key("llm.prompts")
LLMCompletions = attribute.Key("llm.completions")
LLMChatStopSequence = attribute.Key("llm.chat.stop_sequences")
LLMRequestFunctions = attribute.Key("llm.request.functions")
LLMVendor = attribute.Key("llm.vendor")
LLMRequestType = attribute.Key("llm.request.type")
LLMRequestModel = attribute.Key("llm.request.model")
LLMResponseModel = attribute.Key("llm.response.model")
LLMRequestMaxTokens = attribute.Key("llm.request.max_tokens")
LLMUsageTotalTokens = attribute.Key("llm.usage.total_tokens")
LLMUsageCompletionTokens = attribute.Key("llm.usage.completion_tokens")
LLMUsagePromptTokens = attribute.Key("llm.usage.prompt_tokens")
LLMTemperature = attribute.Key("llm.temperature")
LLMUser = attribute.Key("llm.user")
LLMHeaders = attribute.Key("llm.headers")
LLMTopP = attribute.Key("llm.top_p")
LLMTopK = attribute.Key("llm.top_k")
LLMFrequencyPenalty = attribute.Key("llm.frequency_penalty")
LLMPresencePenalty = attribute.Key("llm.presence_penalty")
LLMPrompts = attribute.Key("llm.prompts")
LLMCompletions = attribute.Key("llm.completions")
LLMChatStopSequence = attribute.Key("llm.chat.stop_sequences")
LLMRequestFunctions = attribute.Key("llm.request.functions")

// Vector DB
VectorDBVendor = attribute.Key("vector_db.vendor")
VectorDBQueryTopK = attribute.Key("vector_db.query.top_k")
// Vector DB
VectorDBVendor = attribute.Key("vector_db.vendor")
VectorDBQueryTopK = attribute.Key("vector_db.query.top_k")

// LLM Workflows
TraceloopSpanKind = attribute.Key("traceloop.span.kind")
TraceloopWorkflowName = attribute.Key("traceloop.workflow.name")
TraceloopEntityName = attribute.Key("traceloop.entity.name")
TraceloopAssociationProperties = attribute.Key("traceloop.association.properties")
)
// LLM Workflows
TraceloopSpanKind = attribute.Key("traceloop.span.kind")
TraceloopWorkflowName = attribute.Key("traceloop.workflow.name")
TraceloopEntityName = attribute.Key("traceloop.entity.name")
TraceloopAssociationProperties = attribute.Key("traceloop.association.properties")
)
14 changes: 7 additions & 7 deletions traceloop-sdk/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,14 @@ package traceloop
import "time"

type BackoffConfig struct {
MaxRetries uint64
MaxRetries uint64
}

type Config struct {
BaseURL string
APIKey string
TracerName string
ServiceName string
PollingInterval time.Duration
BackoffConfig BackoffConfig
BaseURL string
APIKey string
TracerName string
ServiceName string
PollingInterval time.Duration
BackoffConfig BackoffConfig
}
62 changes: 31 additions & 31 deletions traceloop-sdk/model/prompt_registry.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,48 +3,48 @@ package model
import "time"

type ModelConfig struct {
Mode string `json:"mode"`
Model string `json:"model"`
Temperature float32 `json:"temperature"`
TopP float32 `json:"top_p"`
Stop []string `json:"stop"`
FrequencyPenalty float32 `json:"frequency_penalty"`
PresencePenalty float32 `json:"presence_penalty"`
Mode string `json:"mode"`
Model string `json:"model"`
Temperature float32 `json:"temperature"`
TopP float32 `json:"top_p"`
Stop []string `json:"stop"`
FrequencyPenalty float32 `json:"frequency_penalty"`
PresencePenalty float32 `json:"presence_penalty"`
}

type Message struct {
Index int `json:"index"`
Role string `json:"role"`
Template string `json:"template"`
Variables []string `json:"variables"`
Index int `json:"index"`
Role string `json:"role"`
Template string `json:"template"`
Variables []string `json:"variables"`
}

type PromptVersion struct {
Id string `json:"id"`
Hash string `json:"hash"`
Version uint `json:"version"`
Name string `json:"name"`
CreatedAt time.Time `json:"created_at"`
Provider string `json:"provider"`
TemplatingEngine string `json:"templating_engine"`
Messages []Message `json:"messages"`
LlmConfig ModelConfig `json:"llm_config"`
Id string `json:"id"`
Hash string `json:"hash"`
Version uint `json:"version"`
Name string `json:"name"`
CreatedAt time.Time `json:"created_at"`
Provider string `json:"provider"`
TemplatingEngine string `json:"templating_engine"`
Messages []Message `json:"messages"`
LlmConfig ModelConfig `json:"llm_config"`
}

type Target struct {
Id string `json:"id"`
PromptId string `json:"prompt_id"`
Version string `json:"version"`
UpdatedAt time.Time `json:"updated_at"`
Id string `json:"id"`
PromptId string `json:"prompt_id"`
Version string `json:"version"`
UpdatedAt time.Time `json:"updated_at"`
}

type Prompt struct {
Id string `json:"id"`
Versions []PromptVersion `json:"versions"`
Target Target `json:"target"`
Key string `json:"key"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
Id string `json:"id"`
Versions []PromptVersion `json:"versions"`
Target Target `json:"target"`
Key string `json:"key"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
}

type PromptRegistry map[string]*Prompt
type PromptRegistry map[string]*Prompt
20 changes: 10 additions & 10 deletions traceloop-sdk/prompt_registry.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@ import (
)

type PromptsResponse struct {
Prompts []model.Prompt `json:"prompts"`
Environment string `json:"environment"`
Prompts []model.Prompt `json:"prompts"`
Environment string `json:"environment"`
}

func (instance *Traceloop) populatePromptRegistry() {
Expand All @@ -38,7 +38,7 @@ func (instance *Traceloop) populatePromptRegistry() {

func (instance *Traceloop) pollPrompts() {
prompts := make(chan []model.Prompt)
errs := make(chan error)
errs := make(chan error)

instance.populatePromptRegistry()

Expand Down Expand Up @@ -103,12 +103,12 @@ func (instance *Traceloop) GetOpenAIChatCompletionRequest(key string, variables
}

return &openai.ChatCompletionRequest{
Model: promptVersion.LlmConfig.Model,
Temperature: promptVersion.LlmConfig.Temperature,
TopP: promptVersion.LlmConfig.TopP,
Stop: promptVersion.LlmConfig.Stop,
Model: promptVersion.LlmConfig.Model,
Temperature: promptVersion.LlmConfig.Temperature,
TopP: promptVersion.LlmConfig.TopP,
Stop: promptVersion.LlmConfig.Stop,
FrequencyPenalty: promptVersion.LlmConfig.FrequencyPenalty,
PresencePenalty: promptVersion.LlmConfig.PresencePenalty,
Messages: messages,
PresencePenalty: promptVersion.LlmConfig.PresencePenalty,
Messages: messages,
}, nil
}
}
24 changes: 12 additions & 12 deletions traceloop-sdk/sdk.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,10 @@ import (
const PromptsPath = "/v1/traceloop/prompts"

type Traceloop struct {
config Config
promptRegistry model.PromptRegistry
tracerProvider *trace.TracerProvider
http.Client
config Config
promptRegistry model.PromptRegistry
tracerProvider *trace.TracerProvider
http.Client
}

type LLMSpan struct {
Expand All @@ -47,7 +47,7 @@ func NewClient(ctx context.Context, config Config) (*Traceloop, error) {
func (instance *Traceloop) initialize(ctx context.Context) error {
if instance.config.BaseURL == "" {
baseUrl := os.Getenv("TRACELOOP_BASE_URL")
if baseUrl == "" {
if baseUrl == "" {
instance.config.BaseURL = "api.traceloop.com"
} else {
instance.config.BaseURL = baseUrl
Expand Down Expand Up @@ -78,8 +78,8 @@ func setMessagesAttribute(span apitrace.Span, prefix string, messages []Message)
for _, message := range messages {
attrsPrefix := fmt.Sprintf("%s.%d", prefix, message.Index)
span.SetAttributes(
attribute.String(attrsPrefix + ".content", message.Content),
attribute.String(attrsPrefix + ".role", message.Role),
attribute.String(attrsPrefix+".content", message.Content),
attribute.String(attrsPrefix+".role", message.Role),
)
}
}
Expand All @@ -89,7 +89,7 @@ func (instance *Traceloop) tracerName() string {
return instance.config.TracerName
} else {
return "traceloop.tracer"
}
}
}

func (instance *Traceloop) getTracer() apitrace.Tracer {
Expand All @@ -99,7 +99,7 @@ func (instance *Traceloop) getTracer() apitrace.Tracer {
func (instance *Traceloop) LogPrompt(ctx context.Context, prompt Prompt, workflowAttrs WorkflowAttributes) (LLMSpan, error) {
spanName := fmt.Sprintf("%s.%s", prompt.Vendor, prompt.Mode)
_, span := instance.getTracer().Start(ctx, spanName)

span.SetAttributes(
semconvai.LLMVendor.String(prompt.Vendor),
semconvai.LLMRequestModel.String(prompt.Model),
Expand All @@ -109,7 +109,7 @@ func (instance *Traceloop) LogPrompt(ctx context.Context, prompt Prompt, workflo

setMessagesAttribute(span, "llm.prompts", prompt.Messages)

return LLMSpan{
return LLMSpan{
span: span,
}, nil
}
Expand All @@ -130,7 +130,7 @@ func (llmSpan *LLMSpan) LogCompletion(ctx context.Context, completion Completion
}

func (instance *Traceloop) Shutdown(ctx context.Context) {
if instance.tracerProvider != nil{
instance.tracerProvider.Shutdown(ctx)
if instance.tracerProvider != nil {
instance.tracerProvider.Shutdown(ctx)
}
}
Loading
Loading