Skip to content

Reorganize workflow examples and improve code structure #16

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 35 additions & 6 deletions sample-app/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ import (
tlp "github.com/traceloop/go-openllmetry/traceloop-sdk"
)

func workflow_example() {
func main() {
ctx := context.Background()

traceloop, err := tlp.NewClient(ctx, tlp.Config{
Expand All @@ -24,6 +24,14 @@ func workflow_example() {
return
}

wf := traceloop.NewWorkflow(ctx, tlp.WorkflowAttributes{
Name: "history_generation",
})
defer wf.End()

factGenTask := wf.NewTask("current_date_fact_generation")
defer factGenTask.End()

request, err := traceloop.GetOpenAIChatCompletionRequest("example-prompt", map[string]interface{}{"date": time.Now().Format("01/02")})
if err != nil {
fmt.Printf("GetOpenAIChatCompletionRequest error: %v\n", err)
Expand All @@ -39,17 +47,13 @@ func workflow_example() {
})
}

llmSpan, err := traceloop.LogPrompt(
ctx,
llmSpan, err := factGenTask.LogPrompt(
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That won't work. Have you tested this?

tlp.Prompt{
Vendor: "openai",
Mode: "chat",
Model: request.Model,
Messages: promptMsgs,
},
tlp.WorkflowAttributes{
Name: "example-workflow",
},
)
if err != nil {
fmt.Printf("LogPrompt error: %v\n", err)
Expand Down Expand Up @@ -84,5 +88,30 @@ func workflow_example() {
PromptTokens: resp.Usage.PromptTokens,
})

someOtherTask := wf.NewTask("some_other_task")
defer someOtherTask.End()

otherPrompt, _ := someOtherTask.LogPrompt(tlp.Prompt{
Vendor: "openai",
Mode: "chat",
Model: request.Model,
Messages: []tlp.Message{
{
Index: 0,
Content: "some other prompt",
Role: "user",
},
},
})

otherPrompt.LogCompletion(ctx, tlp.Completion{
Model: resp.Model,
Messages: completionMsgs,
}, tlp.Usage{
TotalTokens: resp.Usage.TotalTokens,
CompletionTokens: resp.Usage.CompletionTokens,
PromptTokens: resp.Usage.PromptTokens,
})

fmt.Println(resp.Choices[0].Message.Content)
}
41 changes: 6 additions & 35 deletions sample-app/workflow_example.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ import (
tlp "github.com/traceloop/go-openllmetry/traceloop-sdk"
)

func main() {
func workflow_example() {
ctx := context.Background()

traceloop, err := tlp.NewClient(ctx, tlp.Config{
Expand All @@ -24,14 +24,6 @@ func main() {
return
}

wf := traceloop.NewWorkflow(ctx, tlp.WorkflowAttributes{
Name: "history_generation",
})
defer wf.End()

factGenTask := wf.NewTask("current_date_fact_generation")
defer factGenTask.End()

request, err := traceloop.GetOpenAIChatCompletionRequest("example-prompt", map[string]interface{}{"date": time.Now().Format("01/02")})
if err != nil {
fmt.Printf("GetOpenAIChatCompletionRequest error: %v\n", err)
Expand All @@ -47,13 +39,17 @@ func main() {
})
}

llmSpan, err := factGenTask.LogPrompt(
llmSpan, err := traceloop.LogPrompt(
ctx,
tlp.Prompt{
Vendor: "openai",
Mode: "chat",
Model: request.Model,
Messages: promptMsgs,
},
tlp.WorkflowAttributes{
Name: "example-workflow",
},
)
if err != nil {
fmt.Printf("LogPrompt error: %v\n", err)
Expand Down Expand Up @@ -88,30 +84,5 @@ func main() {
PromptTokens: resp.Usage.PromptTokens,
})

someOtherTask := wf.NewTask("some_other_task")
defer someOtherTask.End()

otherPrompt, _ := someOtherTask.LogPrompt(tlp.Prompt{
Vendor: "openai",
Mode: "chat",
Model: request.Model,
Messages: []tlp.Message{
{
Index: 0,
Content: "some other prompt",
Role: "user",
},
},
})

otherPrompt.LogCompletion(ctx, tlp.Completion{
Model: resp.Model,
Messages: completionMsgs,
}, tlp.Usage{
TotalTokens: resp.Usage.TotalTokens,
CompletionTokens: resp.Usage.CompletionTokens,
PromptTokens: resp.Usage.PromptTokens,
})

fmt.Println(resp.Choices[0].Message.Content)
}
Loading