Skip to content

[PLT-1357] Vb/fixtures for feature schemas plt 1342 #1766

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Aug 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 12 additions & 12 deletions .github/workflows/python-package-develop.yml
Original file line number Diff line number Diff line change
Expand Up @@ -38,18 +38,18 @@ jobs:
- python-version: 3.8
api-key: STAGING_LABELBOX_API_KEY_2
da-test-key: DA_GCP_LABELBOX_API_KEY
# - python-version: 3.9
# api-key: STAGING_LABELBOX_API_KEY_3
# da-test-key: DA_GCP_LABELBOX_API_KEY
# - python-version: "3.10"
# api-key: STAGING_LABELBOX_API_KEY_4
# da-test-key: DA_GCP_LABELBOX_API_KEY
# - python-version: 3.11
# api-key: STAGING_LABELBOX_API_KEY
# da-test-key: DA_GCP_LABELBOX_API_KEY
# - python-version: 3.12
# api-key: STAGING_LABELBOX_API_KEY_5
# da-test-key: DA_GCP_LABELBOX_API_KEY
- python-version: 3.9
api-key: STAGING_LABELBOX_API_KEY_3
da-test-key: DA_GCP_LABELBOX_API_KEY
- python-version: "3.10"
api-key: STAGING_LABELBOX_API_KEY_4
da-test-key: DA_GCP_LABELBOX_API_KEY
- python-version: 3.11
api-key: STAGING_LABELBOX_API_KEY
da-test-key: DA_GCP_LABELBOX_API_KEY
- python-version: 3.12
api-key: STAGING_LABELBOX_API_KEY_5
da-test-key: DA_GCP_LABELBOX_API_KEY
uses: ./.github/workflows/python-package-shared.yml
with:
python-version: ${{ matrix.python-version }}
Expand Down
192 changes: 117 additions & 75 deletions libs/labelbox/tests/integration/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -342,32 +342,39 @@ def _upload_invalid_data_rows_for_dataset(dataset: Dataset):

return _upload_invalid_data_rows_for_dataset


@pytest.fixture
def prompt_response_generation_project_with_new_dataset(client: Client, rand_gen, request):
def prompt_response_generation_project_with_new_dataset(client: Client,
rand_gen, request):
"""fixture is parametrize and needs project_type in request"""
media_type = request.param
prompt_response_project = client.create_prompt_response_generation_project(name=f"{media_type.value}-{rand_gen(str)}",
dataset_name=f"{media_type.value}-{rand_gen(str)}",
data_row_count=1,
media_type=media_type)

prompt_response_project = client.create_prompt_response_generation_project(
name=f"{media_type.value}-{rand_gen(str)}",
dataset_name=f"{media_type.value}-{rand_gen(str)}",
data_row_count=1,
media_type=media_type)

yield prompt_response_project

prompt_response_project.delete()


@pytest.fixture
def prompt_response_generation_project_with_dataset_id(client: Client, dataset, rand_gen, request):
def prompt_response_generation_project_with_dataset_id(client: Client, dataset,
rand_gen, request):
"""fixture is parametrized and needs project_type in request"""
media_type = request.param
prompt_response_project = client.create_prompt_response_generation_project(name=f"{media_type.value}-{rand_gen(str)}",
dataset_id=dataset.uid,
data_row_count=1,
media_type=media_type)

prompt_response_project = client.create_prompt_response_generation_project(
name=f"{media_type.value}-{rand_gen(str)}",
dataset_id=dataset.uid,
data_row_count=1,
media_type=media_type)

yield prompt_response_project

prompt_response_project.delete()


@pytest.fixture
def response_creation_project(client: Client, rand_gen):
project_name = f"response-creation-project-{rand_gen(str)}"
Expand All @@ -377,87 +384,105 @@ def response_creation_project(client: Client, rand_gen):

project.delete()

@pytest.fixture

@pytest.fixture
def prompt_response_features(rand_gen):

prompt_text = PromptResponseClassification(class_type=PromptResponseClassification.Type.PROMPT,
name=f"{rand_gen(str)}-prompt text")

response_radio = PromptResponseClassification(class_type=PromptResponseClassification.Type.RESPONSE_RADIO,
name=f"{rand_gen(str)}-response radio classification",
options=[
ResponseOption(value=f"{rand_gen(str)}-first radio option answer"),
ResponseOption(value=f"{rand_gen(str)}-second radio option answer"),
])

response_checklist = PromptResponseClassification(class_type=PromptResponseClassification.Type.RESPONSE_CHECKLIST,
name=f"{rand_gen(str)}-response checklist classification",
options=[
ResponseOption(value=f"{rand_gen(str)}-first checklist option answer"),
ResponseOption(value=f"{rand_gen(str)}-second checklist option answer"),
])

response_text_with_char = PromptResponseClassification(class_type=PromptResponseClassification.Type.RESPONSE_TEXT,
name=f"{rand_gen(str)}-response text with character min and max",
character_min = 1,
character_max = 10)

response_text = PromptResponseClassification(class_type=PromptResponseClassification.Type.RESPONSE_TEXT,
name=f"{rand_gen(str)}-response text")

nested_response_radio = PromptResponseClassification(class_type=PromptResponseClassification.Type.RESPONSE_RADIO,
name=f"{rand_gen(str)}-nested response radio classification",
options=[
ResponseOption(f"{rand_gen(str)}-first_radio_answer",
options=[
PromptResponseClassification(
class_type=PromptResponseClassification.Type.RESPONSE_RADIO,
name=f"{rand_gen(str)}-sub_radio_question",
options=[ResponseOption(f"{rand_gen(str)}-first_sub_radio_answer")])
])
])

prompt_text = PromptResponseClassification(
class_type=PromptResponseClassification.Type.PROMPT,
name=f"{rand_gen(str)}-prompt text")

response_radio = PromptResponseClassification(
class_type=PromptResponseClassification.Type.RESPONSE_RADIO,
name=f"{rand_gen(str)}-response radio classification",
options=[
ResponseOption(value=f"{rand_gen(str)}-first radio option answer"),
ResponseOption(value=f"{rand_gen(str)}-second radio option answer"),
])

response_checklist = PromptResponseClassification(
class_type=PromptResponseClassification.Type.RESPONSE_CHECKLIST,
name=f"{rand_gen(str)}-response checklist classification",
options=[
ResponseOption(
value=f"{rand_gen(str)}-first checklist option answer"),
ResponseOption(
value=f"{rand_gen(str)}-second checklist option answer"),
])

response_text_with_char = PromptResponseClassification(
class_type=PromptResponseClassification.Type.RESPONSE_TEXT,
name=f"{rand_gen(str)}-response text with character min and max",
character_min=1,
character_max=10)

response_text = PromptResponseClassification(
class_type=PromptResponseClassification.Type.RESPONSE_TEXT,
name=f"{rand_gen(str)}-response text")

nested_response_radio = PromptResponseClassification(
class_type=PromptResponseClassification.Type.RESPONSE_RADIO,
name=f"{rand_gen(str)}-nested response radio classification",
options=[
ResponseOption(
f"{rand_gen(str)}-first_radio_answer",
options=[
PromptResponseClassification(
class_type=PromptResponseClassification.Type.
RESPONSE_RADIO,
name=f"{rand_gen(str)}-sub_radio_question",
options=[
ResponseOption(
f"{rand_gen(str)}-first_sub_radio_answer")
])
])
])
yield {
"prompts": [prompt_text],
"responses": [response_text, response_radio, response_checklist, response_text_with_char, nested_response_radio]
"responses": [
response_text, response_radio, response_checklist,
response_text_with_char, nested_response_radio
]
}


@pytest.fixture
def prompt_response_ontology(client: Client, rand_gen, prompt_response_features, request):
def prompt_response_ontology(client: Client, rand_gen, prompt_response_features,
request):
"""fixture is parametrize and needs project_type in request"""

project_type = request.param
if project_type == MediaType.LLMPromptCreation:
ontology_builder = OntologyBuilder(
tools=[],
classifications=prompt_response_features["prompts"])
tools=[], classifications=prompt_response_features["prompts"])
elif project_type == MediaType.LLMPromptResponseCreation:
ontology_builder = OntologyBuilder(
tools=[],
classifications=prompt_response_features["prompts"] + prompt_response_features["responses"])
classifications=prompt_response_features["prompts"] +
prompt_response_features["responses"])
else:
ontology_builder = OntologyBuilder(
tools=[],
classifications=prompt_response_features["responses"]
)

tools=[], classifications=prompt_response_features["responses"])

ontology_name = f"prompt-response-{rand_gen(str)}"

if project_type in MediaType:
ontology = client.create_ontology(
ontology_name,
ontology_builder.asdict(),
media_type=project_type)
ontology = client.create_ontology(ontology_name,
ontology_builder.asdict(),
media_type=project_type)
else:
ontology = client.create_ontology(
ontology_name,
ontology_builder.asdict(),
media_type=MediaType.Text,
ontology_kind=OntologyKind.ResponseCreation
)
ontology_kind=OntologyKind.ResponseCreation)
yield ontology

featureSchemaIds = [feature["featureSchemaId"] for feature in ontology.normalized["classifications"]]


featureSchemaIds = [
feature["featureSchemaId"]
for feature in ontology.normalized["classifications"]
]

try:
client.delete_unused_ontology(ontology.uid)
for featureSchemaId in featureSchemaIds:
Expand All @@ -466,6 +491,24 @@ def prompt_response_ontology(client: Client, rand_gen, prompt_response_features,
print(f"Failed to delete ontology {ontology.uid}: {str(e)}")


@pytest.fixture
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

These are actual changes. Above is just lint automated changes

def point():
return Tool(
tool=Tool.Type.POINT,
name="name",
color="#ff0000",
)


@pytest.fixture
def feature_schema(client, point):
created_feature_schema = client.upsert_feature_schema(point.asdict())

yield created_feature_schema
client.delete_unused_feature_schema(
created_feature_schema.normalized['featureSchemaId'])


@pytest.fixture
def chat_evaluation_ontology(client, rand_gen):
ontology_name = f"test-chat-evaluation-ontology-{rand_gen(str)}"
Expand Down Expand Up @@ -682,13 +725,12 @@ def offline_conversational_data_row(initial_dataset):

return data_row


@pytest.fixture
def response_data_row(initial_dataset):
text_asset = {
"row_data": "response sample text"
}
text_asset = {"row_data": "response sample text"}
data_row = initial_dataset.create_data_row(text_asset)

return data_row


Expand Down
18 changes: 4 additions & 14 deletions libs/labelbox/tests/integration/test_feature_schema.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,22 +86,12 @@ def test_throws_an_error_when_updating_not_existing_feature_schema(client):
client.update_feature_schema_title("doesntexist", "new title")


def test_creates_a_new_feature_schema(client):
created_feature_schema = client.upsert_feature_schema(point.asdict())
def test_creates_a_new_feature_schema(feature_schema):
assert feature_schema.uid is not None

assert created_feature_schema.uid is not None

client.delete_unused_feature_schema(
created_feature_schema.normalized['featureSchemaId'])


def test_updates_a_feature_schema(client):
tool = Tool(
tool=Tool.Type.POINT,
name="name",
color="#ff0000",
)
created_feature_schema = client.upsert_feature_schema(tool.asdict())
def test_updates_a_feature_schema(client, feature_schema):
created_feature_schema = feature_schema
tool_to_update = Tool(
tool=Tool.Type.POINT,
name="new name",
Expand Down
2 changes: 1 addition & 1 deletion libs/labelbox/tests/integration/test_labeling_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def test_request_labeling_service_moe_project(
) == LabelingServiceStatus.Requested


def test_request_labeling_service_incomplete_requirements(project, ontology):
def test_request_labeling_service_incomplete_requirements(ontology, project):
labeling_service = project.get_labeling_service(
) # project fixture is an Image type project
with pytest.raises(ResourceNotFoundError,
Expand Down
36 changes: 27 additions & 9 deletions libs/labelbox/tests/integration/test_ontology.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,16 +177,31 @@ def _get_attr_stringify_json(obj, attr):
return value


def test_feature_schema_create_read(client, rand_gen):
name = f"test-root-schema-{rand_gen(str)}"
feature_schema_cat_normalized = {
@pytest.fixture
def name_for_read(rand_gen):
yield f"test-root-schema-{rand_gen(str)}"


@pytest.fixture
def feature_schema_cat_normalized(name_for_read):
yield {
'tool': 'polygon',
'name': name,
'name': name_for_read,
'color': 'black',
'classifications': [],
}
created_feature_schema = client.create_feature_schema(
feature_schema_cat_normalized)


@pytest.fixture
def feature_schema_for_read(client, feature_schema_cat_normalized):
feature_schema = client.create_feature_schema(feature_schema_cat_normalized)
yield feature_schema
client.delete_unused_feature_schema(feature_schema.uid)


def test_feature_schema_create_read(client, feature_schema_for_read,
name_for_read):
created_feature_schema = feature_schema_for_read
queried_feature_schema = client.get_feature_schema(
created_feature_schema.uid)
for attr in Entity.FeatureSchema.fields():
Expand All @@ -195,9 +210,9 @@ def test_feature_schema_create_read(client, rand_gen):
queried_feature_schema, attr)

time.sleep(3) # Slight delay for searching
queried_feature_schemas = list(client.get_feature_schemas(name))
queried_feature_schemas = list(client.get_feature_schemas(name_for_read))
assert [feature_schema.name for feature_schema in queried_feature_schemas
] == [name]
] == [name_for_read]
queried_feature_schema = queried_feature_schemas[0]

for attr in Entity.FeatureSchema.fields():
Expand All @@ -206,7 +221,10 @@ def test_feature_schema_create_read(client, rand_gen):
queried_feature_schema, attr)


def test_ontology_create_read(client, rand_gen):
def test_ontology_create_read(
client,
rand_gen,
):
ontology_name = f"test-ontology-{rand_gen(str)}"
tool_name = f"test-ontology-tool-{rand_gen(str)}"
feature_schema_cat_normalized = {
Expand Down
Loading