diff --git a/flow360/__init__.py b/flow360/__init__.py index 5353ae195..8d37d73b9 100644 --- a/flow360/__init__.py +++ b/flow360/__init__.py @@ -137,6 +137,7 @@ from flow360.component.surface_mesh_v2 import SurfaceMeshV2 as SurfaceMesh from flow360.component.volume_mesh import VolumeMeshV2 as VolumeMesh from flow360.environment import Env +from flow360.plugins import report from flow360.version import __solver_version__, __version__ __all__ = [ @@ -253,4 +254,13 @@ "SurfaceSliceOutput", "SlaterPorousBleed", "migration", +<<<<<<< HEAD +======= + "Water", + "PointArray2D", + "StreamlineOutput", + "Transformation", + "WallRotation", + "report", +>>>>>>> 8ba5fb3b (adjusted report related docstrings and added report init (#1159)) ] diff --git a/flow360/plugins/report/__init__.py b/flow360/plugins/report/__init__.py new file mode 100644 index 000000000..feaceb736 --- /dev/null +++ b/flow360/plugins/report/__init__.py @@ -0,0 +1,67 @@ +"""Utilities for creating reports""" + +from flow360.plugins.report.report import ReportTemplate +from flow360.plugins.report.report_items import ( + Chart2D, + Chart3D, + FixedRangeLimit, + ManualLimit, + NonlinearResiduals, + PatternCaption, + Settings, + SubsetLimit, + Summary, + Table, +) +from flow360.plugins.report.utils import ( + Average, + DataItem, + Delta, + Expression, + GetAttribute, + Grouper, + Variable, +) +from flow360.plugins.report.uvf_shutter import ( + BottomCamera, + Camera, + FrontCamera, + FrontLeftBottomCamera, + FrontLeftTopCamera, + LeftCamera, + RearCamera, + RearLeftTopCamera, + RearRightBottomCamera, + TopCamera, +) + +__all__ = [ + "Average", + "BottomCamera", + "Camera", + "Chart2D", + "Chart3D", + "DataItem", + "Delta", + "Expression", + "FixedRangeLimit", + "FrontCamera", + "FrontLeftBottomCamera", + "FrontLeftTopCamera", + "GetAttribute", + "Grouper", + "LeftCamera", + "ManualLimit", + "NonlinearResiduals", + "PatternCaption", + "RearCamera", + "RearLeftTopCamera", + "RearRightBottomCamera", + "ReportTemplate", + "Settings", + "SubsetLimit", + "Summary", + "Table", + "TopCamera", + "Variable", +] diff --git a/flow360/plugins/report/report.py b/flow360/plugins/report/report.py index afe77b785..77673ab4d 100644 --- a/flow360/plugins/report/report.py +++ b/flow360/plugins/report/report.py @@ -135,23 +135,24 @@ class ReportTemplate(Flow360BaseModel): """ A model representing a report containing various components such as summaries, inputs, tables, and charts in both 2D and 3D. - - Parameters - ---------- - title: str, optional - Title of report, shown on the first page - items : List[Union[Summary, Inputs, Table, Chart2D, Chart3D]] - A list of report items, each of which can be a summary, input data, table, 2D chart, or 3D chart. - The `type` field acts as a discriminator for differentiating item types. - include_case_by_case : bool, default=True - Flag indicating whether to include a case-by-case analysis in the report. """ +<<<<<<< HEAD title: Optional[str] = None items: List[Union[Summary, Inputs, Table, Chart2D, Chart3D]] = pd.Field( discriminator="type_name" ) include_case_by_case: bool = False +======= + title: Optional[str] = pd.Field(None, description="Title of report, shown on the first page.") + items: List[ReportItemTypes] = pd.Field( + description="A list of report items, each of which can be a summary, input data, table, 2D chart, or 3D chart." + ) + include_case_by_case: bool = pd.Field( + False, + description="Flag indicating whether to include a case-by-case analysis in the report.", + ) +>>>>>>> 8ba5fb3b (adjusted report related docstrings and added report init (#1159)) settings: Optional[Settings] = Settings() @pd.model_validator(mode="after") diff --git a/flow360/plugins/report/report_items.py b/flow360/plugins/report/report_items.py index 9005fe929..6ad9d489d 100644 --- a/flow360/plugins/report/report_items.py +++ b/flow360/plugins/report/report_items.py @@ -96,20 +96,26 @@ class Settings(Flow360BaseModel): """ Settings for controlling output properties. - - Attributes - ---------- - dpi : PositiveInt, optional - The resolution in dots per inch (DPI) for generated images in report (A4 assumed). - If not specified, defaults to 300. """ +<<<<<<< HEAD dpi: Optional[pd.PositiveInt] = 300 +======= + # pylint: disable=fixme + # TODO: Create a setting class for each type of report items. + dpi: Optional[pd.PositiveInt] = Field( + 300, + description="The resolution in dots per inch (DPI) for generated images in report (A4 assumed).", + ) + dump_table_csv: Optional[pd.StrictBool] = Field( + False, description="If ``True``, :class:``Table`` data will be dumped into a csv file." + ) +>>>>>>> 8ba5fb3b (adjusted report related docstrings and added report init (#1159)) class ReportItem(Flow360BaseModel): """ - Base class for for all report items + Base class for for all report items. """ _requirements: List[str] = None @@ -136,18 +142,9 @@ def get_requirements(self): class Summary(ReportItem): """ Represents a summary item in a report. - - Parameters - ---------- - text : str, optional - The main content or text of the summary. - type_name : Literal["Summary"], default="Summary" - Indicates that this item is of type "Summary"; this field is immutable. - _requirements : List[str], default=[] - List of specific requirements associated with the summary item. """ - text: Optional[str] = None + text: Optional[str] = Field(None, description="The main content or text of the summary.") type_name: Literal["Summary"] = Field("Summary", frozen=True) _requirements: List[str] = [] @@ -167,7 +164,7 @@ def get_doc_item(self, context: ReportContext, settings: Settings = None) -> Non class Inputs(ReportItem): """ - Inputs is a wrapper for a specific Table setup that details key inputs from the simulation + Inputs is a wrapper for a specific Table setup that details key inputs from the simulation. """ type_name: Literal["Inputs"] = Field("Inputs", frozen=True) @@ -192,9 +189,11 @@ def get_doc_item(self, context: ReportContext, settings: Settings = None) -> Non def human_readable_formatter(value): - """Custom formatter that uses k/M suffixes with a human-readable style. + """ + Custom formatter that uses k/M suffixes with a human-readable style. For large numbers, it attempts to show a concise representation without scientific notation: + - For millions, it will show something like 225M (no decimals if >100), 22.5M (one decimal if between 10 and 100), or 2.3M (two decimals if <10). - For thousands, it follows a similar pattern for k. @@ -239,31 +238,23 @@ def strip_trailing_zeros(s): class Table(ReportItem): """ Represents a table within a report, with configurable data and headers. - - Parameters - ---------- - data : list[Union[str, Delta]] - A list of table data entries, which can be either strings or `Delta` objects. - section_title : Union[str, None] - The title of the table section. - headers : Union[list[str], None], optional - List of column headers for the table, default is None. - type_name : Literal["Table"], default="Table" - Specifies the type of report item as "Table"; this field is immutable. - select_indices : Optional[List[NonNegativeInt]], optional - Specific indices to select for the chart. - formatter : Optional - formatter can be: - single str (e.g. ".4g") - list of str of the same length as `data` """ - data: List[Union[str, Delta, DataItem]] - section_title: Union[str, None] - headers: Union[list[str], None] = None + data: List[Union[str, Delta, DataItem]] = Field( + description="A list of table data entries, which can be either strings or `Delta` objects." + ) + section_title: Union[str, None] = Field(description="The title of the table section.") + headers: Union[list[str], None] = Field( + None, description="List of column headers for the table, default is None." + ) type_name: Literal["Table"] = Field("Table", frozen=True) - select_indices: Optional[List[NonNegativeInt]] = None - formatter: Optional[Union[str, List[Union[str, None]]]] = None + select_indices: Optional[List[NonNegativeInt]] = Field( + None, description="Specific indices to select for the chart." + ) + formatter: Optional[Union[str, List[Union[str, None]]]] = Field( + None, + description='Formatter can be a single str (e.g. ".4g") or a list of str of the same length as ``data``', + ) @model_validator(mode="before") @classmethod @@ -404,10 +395,58 @@ def get_doc_item(self, context: ReportContext, settings: Settings = None) -> Non table.add_row(formatted) table.add_hline() +<<<<<<< HEAD +======= + if settings is not None and settings.dump_table_csv: + df = self.to_dataframe(context=context) + df.to_csv(f"{self.section_title}.csv", index=False) + + +class PatternCaption(Flow360BaseModel): + """ + Class for setting up chart caption. + """ + + pattern: str = Field( + default="[case.name]", + description="The caption pattern containing placeholders like [case.name] and [case.id]." + + " These placeholders will be replaced with the actual case name and ID when resolving the caption." + + ' For example, "The case is [case.name] with ID [case.id]". Defaults to ``"[case.name]"``.', + ) + type_name: Literal["PatternCaption"] = Field("PatternCaption", frozen=True) + + # pylint: disable=no-member + def resolve(self, case: "Case") -> str: + """ + Resolves the pattern to the actual caption string using the provided case object. + + Parameters + ---------- + case : Case + The case object containing `name` and `id` attributes. + + Returns + ------- + str + The resolved caption string with placeholders replaced by actual values. + + Examples + -------- + >>> caption = PatternCaption(pattern="The case is [case.name] with ID [case.id]") + >>> case = Case(name="Example", id=123) + >>> caption.resolve(case) + 'The case is Example with ID 123' + """ + caption = self.pattern.replace("[case.name]", case.name) + caption = caption.replace("[case.id]", str(case.id)) + return caption + +>>>>>>> 8ba5fb3b (adjusted report related docstrings and added report init (#1159)) class Chart(ReportItem): """ Represents a chart in a report, with options for layout and display properties. +<<<<<<< HEAD Parameters ---------- @@ -435,6 +474,33 @@ class Chart(ReportItem): separate_plots: Optional[bool] = None force_new_page: bool = False caption: Optional[str] = "" +======= + """ + + section_title: Optional[str] = Field(None, description="The title of the chart section.") + fig_name: Optional[FileNameStr] = Field( + None, + description="Name of the figure file or identifier for the chart (). Only '^[a-zA-Z0-9._-]+$' allowed.", + ) + fig_size: float = Field( + 0.7, description="Relative size of the figure as a fraction of text width." + ) + items_in_row: Union[int, None] = Field( + None, description="Number of items to display in a row within the chart section." + ) + select_indices: Optional[List[NonNegativeInt]] = Field( + None, description="Specific indices to select for the chart." + ) + separate_plots: Optional[bool] = Field( + None, description="If True, display as multiple plots; otherwise single plot." + ) + force_new_page: bool = Field( + False, description="If True, starts the chart on a new page in the report." + ) + caption: Optional[Union[str, List[str], PatternCaption]] = Field( + "", description="Caption to be shown for figures." + ) +>>>>>>> 8ba5fb3b (adjusted report related docstrings and added report init (#1159)) @model_validator(mode="after") def _check_chart_args(self) -> None: @@ -596,8 +662,10 @@ def _add_row_figure( doc.append(NoEscape(r"\end{figure}")) +# pylint: disable=no-member class PlotModel(BaseModel): """ +<<<<<<< HEAD PlotModel that holds data and ability to return matplotlib fig """ @@ -611,6 +679,51 @@ class PlotModel(BaseModel): backgroung_png: Optional[str] = None xlim: Optional[Tuple[float, float]] = None ylim: Optional[Tuple[float, float]] = None +======= + PlotModel that stores series data and configuration required to render + a matplotlib ``Figure``. + """ + + x_data: Union[List[float], List[List[float]]] = Field( + description="Values for the primary x-axis. Accepts a single list (one series)" + + " or a list of lists (multiple series)." + ) + y_data: Union[List[float], List[List[float]]] = Field( + description="Values for the primary y-axis, matching the shape of ``x_data``." + ) + x_label: str = Field(description="Text label for the primary x-axis.") + y_label: str = Field(description="Text label for the primary y-axis.") + secondary_x_data: Optional[Union[List[float], List[List[float]]]] = Field( + None, description="Alternate x-axis values used when plotting against a secondary axis." + ) + secondary_x_label: Optional[str] = Field( + None, + description="Label for the secondary x-axis (shown only if ``secondary_x_data`` is set).", + ) + legend: Optional[List[str]] = Field( + None, + description="Series names to appear in the plot legend. The length should equal the number of plotted series.", + ) + is_log: bool = Field( + False, description="If ``True``, the y-axis is drawn on a logarithmic scale." + ) + style: str = Field( + "-", + description='Matplotlib style or format string (e.g. ``"-"`` or ``"o--"``) applied to all data series.', + ) + backgroung_png: Optional[str] = Field( + None, description="Path to a PNG file placed behind the plot as a background image." + ) + xlim: Optional[Tuple[float, float]] = Field( + None, description="Axis limits for the x-axis as ``(xmin, xmax)``." + ) + ylim: Optional[Tuple[float, float]] = Field( + None, description="Axis limits for the y-axis as ``(ymin, ymax)``." + ) + grid: Optional[bool] = Field( + True, description="Show grid lines if ``True``, hide them if ``False``." + ) +>>>>>>> 8ba5fb3b (adjusted report related docstrings and added report init (#1159)) @field_validator("x_data", "y_data", mode="before") @classmethod @@ -650,6 +763,7 @@ def x_data_as_np(self): """ return [np.array(x_series) for x_series in self.x_data] + # pylint: disable=not-an-iterable @property def y_data_as_np(self): """ @@ -742,6 +856,7 @@ def get_plot(self): class Chart2D(Chart): """ +<<<<<<< HEAD Represents a 2D chart within a report, plotting x and y data. Parameters @@ -787,6 +902,113 @@ def get_requirements(self): Returns requirements for this item """ return get_requirements_from_data_path([self.x, self.y]) +======= + Class for setting up xlim and ylim in Chart2D by providing + a lower and upper value of the limits. + """ + + lower: float = Field(description="Absolute value of the lower limit of an axis.") + upper: float = Field(description="Absolute value of the upper limit of an axis.") + type_name: Literal["ManualLimit"] = Field("ManualLimit", frozen=True) + + +class SubsetLimit(Flow360BaseModel): + """ + Class for setting up ylim in Chart2D by providing + a subset of values and an offset, which will be applied + to the range of y values. + """ + + subset: Tuple[pd.NonNegativeFloat, pd.NonNegativeFloat] = Field( + description="Tuple of fractions between 0 and 1 describing the lower" + + " and upper range of the subset of values that will be used to calculate the ylim." + ) + offset: float = Field( + description='"Padding" that will be added to the top and bottom of the charts y_range.' + + " It scales with with calculated range of y values. For example, if range of y value is 10," + + ' an offset=0.3 will "expand" the range by 0.3*10 on both sides,' + + " resulting in a final range of y values equal to 16." + ) + type_name: Literal["SubsetLimit"] = Field("SubsetLimit", frozen=True) + + @pd.model_validator(mode="after") + def check_subset_values(self): + """ + Ensure that correct subset values are provided. + """ + lower, upper = self.subset + if not lower < 1 or not upper <= 1: + raise ValueError("Subset values need to be between 0 and 1 (inclusive).") + if not lower <= upper: + raise ValueError("Lower fraction of the subset cannot be higher than upper fraction.") + return self + + +class FixedRangeLimit(Flow360BaseModel): + """ + Class for setting up ylim in Chart2D by providing + a fixed range of y values and strategy for centering. + """ + + fixed_range: float = Field( + description="Range of absolute y values that will be visible on the chart." + + " For example, fixed_range=3 means that y_max - y_min = 3." + ) + center_strategy: Literal["last", "last_percent"] = Field( + "last", + description="Describes which values will be considered for calculating ylim." + + ' "last" means that the last value will be the center. "last_percent"' + + " means that the middle point between max and min y values" + + " in the specified center_fraction will be the center.", + ) + center_fraction: Optional[pd.PositiveFloat] = Field( + None, + description='Used alongside center_strategy="last_percent",' + + " describes values that will be taken into account for calculating ylim." + + " For example, center_fraction=0.3 means that the last 30% of data will be used.", + ) + type_name: Literal["FixedRangeLimit"] = Field("FixedRangeLimit", frozen=True) + + @pd.model_validator(mode="after") + def check_center_fraction(self): + """Ensure that correct center fraction value is provided.""" + if self.center_strategy == "last_percent" and self.center_fraction >= 1: + raise ValueError("Center fraction value needs to be between 0 and 1 (exclusive).") + return self + + +class BaseChart2D(Chart, metaclass=ABCMeta): + """ + Base class for Chart2D like objects - does not contain data. + """ + + operations: Optional[Union[List[OperationTypes], OperationTypes]] = Field( + None, description="List of operations to perform on the data." + ) + focus_x: Optional[ + Annotated[ + Tuple[float, float], + Field( + deprecated="focus_x is deprecated, your input was converted to a corresponding SubsetLimit. " + + "Please use ylim=SubsetLimit instead in the future.", + ), + ] + ] = None + xlim: Optional[Union[ManualLimit, Tuple[float, float]]] = Field( + None, description="Defines the range of x values that will be displayed on the chart." + ) + ylim: Optional[Union[ManualLimit, SubsetLimit, FixedRangeLimit, Tuple[float, float]]] = Field( + None, + description="Defines the range of y values that will be displayed on the chart." + + " This helps with highlighting a desired portion of the chart.", + ) + y_log: Optional[bool] = Field( + False, description="Sets the y axis to logarithmic scale. Defaults to ``False``." + ) + show_grid: Optional[bool] = Field( + True, description="Turns the gridlines on. Defaults to ``True``." + ) +>>>>>>> 8ba5fb3b (adjusted report related docstrings and added report init (#1159)) def is_log_plot(self): """ @@ -973,6 +1195,312 @@ def get_data(self, cases: List[Case], context: ReportContext) -> PlotModel: ylim=ylim, ) +<<<<<<< HEAD +======= + def _get_figures(self, cases, context: ReportContext): + file_names = [] + case_by_case, data_storage = context.case_by_case, context.data_storage + cbc_str = "_cbc_" if case_by_case else "_" + if self.separate_plots: + for case in cases: + file_name = os.path.join(data_storage, self.fig_name + cbc_str + case.id + ".pdf") + data = self.get_data([case], context) + fig = data.get_plot() + fig.savefig(file_name, format="pdf", bbox_inches="tight") + file_names.append(file_name) + plt.close() + + else: + file_name = os.path.join(data_storage, self.fig_name + cbc_str + "all_cases" + ".pdf") + data = self.get_data(cases, context) + fig = data.get_plot() + fig.savefig(file_name, format="pdf", bbox_inches="tight") + file_names.append(file_name) + plt.close() + + return file_names, data.x_label, data.y_label + + # pylint: disable=too-many-return-statements + def _handle_2d_caption( + self, case: Case = None, x_lab: str = None, y_lab: str = None, case_number: int = None + ): + """ + Handle captions for Chart2D. + """ + + if self.caption == "": + if self.items_in_row is not None: + return f"{bold(y_lab)} against {bold(x_lab)}." + if self.separate_plots is True: + return f"{bold(y_lab)} against {bold(x_lab)} for {bold(case.name)}." + if self.select_indices is not None: + return f"{bold(y_lab)} against {bold(x_lab)} for {bold('selected cases')}." + return f"{bold(y_lab)} against {bold(x_lab)} for {bold('all cases')}." + if self.separate_plots is True: + if isinstance(self.caption, List): + return escape_latex(self.caption[case_number]) + if isinstance(self.caption, PatternCaption): + return escape_latex(self.caption.resolve(case)) + return self.caption + + # pylint: disable=too-many-arguments,too-many-locals + def get_doc_item(self, context: ReportContext, settings: Settings = None) -> None: + """ + Returns doc item for chart. + """ + self._handle_new_page(context.doc) + self._handle_grid_input(context.cases) + self._handle_title(context.doc, context.section_func) + cases = self._filter_input_cases(context.cases, context.case_by_case) + self._check_caption_validity(cases) + + file_names, x_lab, y_lab = self._get_figures(cases, context) + + if self.items_in_row is not None: + caption = NoEscape(self._handle_2d_caption(x_lab=x_lab, y_lab=y_lab)) + self._add_row_figure(context.doc, file_names, caption, [case.name for case in cases]) + else: + if self.separate_plots is True: + for case_number, (case, file_name) in enumerate(zip(cases, file_names)): + caption = NoEscape( + self._handle_2d_caption( + case=case, x_lab=x_lab, y_lab=y_lab, case_number=case_number + ) + ) + self._add_figure(context.doc, file_name, caption) + else: + caption = NoEscape(self._handle_2d_caption(x_lab=x_lab, y_lab=y_lab)) + self._add_figure(context.doc, file_names[-1], caption) + + context.doc.append(NoEscape(r"\FloatBarrier")) + context.doc.append(NoEscape(r"\clearpage")) + + +class Chart2D(BaseChart2D): + """ + Represents a 2D chart within a report, plotting x and y data. + + Example + ------- + + - Create a chart of CL for an alpha sweep case, different turbulence models + + >>> Chart2D( + ... x="params/operating_condition/beta", + ... y=DataItem(data="total_forces/CL", operations=[Average(fraction=0.1)]), + ... section_title="CL vs alpha", + ... fig_name="cl_vs_alpha", + ... group_by=Grouper(group_by="params/models/Fluid/turbulence_model_solver/type_name"), + ... ) + + ==== + """ + + x: Union[DataItem, Delta, str] = Field( + description="The data source for the x-axis, which can be a string path, 'DataItem', a 'Delta' object." + ) + y: Union[DataItem, Delta, str, List[DataItem], List[Delta], List[str]] = Field( + description="The data source for the y-axis, which can be a string path," + + " 'DataItem', a 'Delta' object or their list." + ) + group_by: Optional[Union[str, Grouper]] = Field( + Grouper(group_by=None), + description="A grouper object or a string leading to the data by which the grouping should be done.", + ) + include: Optional[ + Annotated[ + List[str], + Field( + deprecated="Include and exclude are deprecated as Chart2D options, use DataItem instead." + ), + ] + ] = Field( + None, + description="List of boundaries to include in data. Applicable to:" + + " x_slicing_force_distribution, y_slicing_force_distribution, surface_forces.", + ) + exclude: Optional[ + Annotated[ + List[str], + Field( + deprecated="Include and exclude are deprecated as Chart2D options, use DataItem instead." + ), + ] + ] = Field( + None, + description="List of boundaries to exclude from data. Applicable to:" + + " x_slicing_force_distribution, y_slicing_force_distribution, surface_forces.", + ) + background: Union[Literal["geometry"], None] = Field( + None, + description='Background type for the chart; set to "geometry" or None. Defaults to ``None``.', + ) + _requirements: List[str] = [_requirements_mapping["total_forces"]] + type_name: Literal["Chart2D"] = Field("Chart2D", frozen=True) + + @pd.model_validator(mode="after") + def _handle_deprecated_include_exclude(self): + include = self.include + exclude = self.exclude + if (include is not None) or (exclude is not None): + self.include = None + self.exclude = None + self.x = self._overload_include_exclude(include, exclude, self.x) + if isinstance(self.y, List): + new_value = [] + for data_variable in self.y: + new_value.append( + self._overload_include_exclude(include, exclude, data_variable) + ) + self.y = new_value + else: + self.y = self._overload_include_exclude(include, exclude, self.y) + return self + + @pd.model_validator(mode="after") + def _create_grouper(self): + if isinstance(self.group_by, str): + self.group_by = Grouper(group_by=self.group_by) + return self + + @classmethod + def _overload_include_exclude(cls, include, exclude, data_variable): + if isinstance(data_variable, Delta): + raise Flow360ValidationError( + "Delta can not be used with exclude/include options. " + + "Specify the Delta data using DataItem." + ) + if not isinstance(data_variable, DataItem): + data_variable = DataItem(data=data_variable, include=include, exclude=exclude) + else: + data_variable.include = include + data_variable.exclude = exclude + return data_variable + + def get_requirements(self): + """ + Returns requirements for this item. + """ + if isinstance(self.y, list): + return get_requirements_from_data_path([self.x, *self.y]) + return get_requirements_from_data_path([self.x, self.y]) + + # pylint: disable=no-member + def _handle_data_with_units(self, x_data, y_data, x_label, y_label): + for idx, (x_series, y_series) in enumerate(zip(x_data, y_data)): + united_array_x = unyt.unyt_array(x_series) + united_array_y = unyt.unyt_array(y_series) + if united_array_x.units != unyt.dimensionless: + x_data[idx] = united_array_x + if united_array_y.units != unyt.dimensionless: + y_data[idx] = united_array_y + + if self._check_dimensions_consistency(x_data) is True: + x_unit = x_data[0].units + x_data = [data.value.tolist() for data in x_data] + x_label += f" [{x_unit}]" + + if self._check_dimensions_consistency(y_data) is True: + y_unit = y_data[0].units + y_data = [data.value.tolist() for data in y_data] + if not isinstance(self.y, list): + y_label += f" [{y_unit}]" + + return x_data, y_data, x_label, y_label + + def _handle_legend(self, cases, x_data, y_data): + if not self._is_series_data(cases[0], self.x): + return self.group_by.arrange_legend() + + if self._is_multiline_data(x_data, y_data): + x_data = [float(data) for data in x_data] + y_data = [float(data) for data in y_data] + legend = None + elif isinstance(self.y, list) and (len(self.y) > 1): + legend = [] + for case in cases: + for y in self.y: + if len(cases) > 1: + legend.append(f"{case.name} - {path_variable_name(str(y))}") + else: + legend.append(f"{path_variable_name(str(y))}") + else: + legend = [case.name for case in cases] + + return legend + + def _is_series_data(self, example_case, variable): + data_point = data_from_path(example_case, variable, None) + if isinstance(data_point, Iterable): + if isinstance(data_point, unyt_quantity) and data_point.shape == (): + return False + return True + return False + + def _validate_variable_format(self, example_case, x_variable, y_variables): + series = self._is_series_data(example_case, x_variable) + + for y in y_variables: + if series != self._is_series_data(example_case, y): + raise AttributeError( + "Variables incompatible - cannot plot point and series data on the same plot." + ) + + def _load_series(self, cases, x_label, y_variables): + x_data = [] + y_data = [] + for case in cases: + filter_physical_steps = isinstance(case.params.time_stepping, Unsteady) and ( + x_label in ["time", "physical_step"] + ) + for y in y_variables: + x_data.append( + data_from_path( + case, self.x, cases, filter_physical_steps_only=filter_physical_steps + ) + ) + y_data.append( + data_from_path(case, y, cases, filter_physical_steps_only=filter_physical_steps) + ) + + return x_data, y_data + + def _load_points(self, cases, y_variables): + x_data, y_data = self.group_by.initialize_arrays(cases, y_variables) + for case in cases: + for y in y_variables: + x_data_point = data_from_path(case, self.x, cases) + y_data_point = data_from_path(case, y, cases) + x_data, y_data = self.group_by.arrange_data( + case, x_data, y_data, x_data_point, y_data_point, y + ) + + return x_data, y_data + + def _load_data(self, cases): + x_label = path_variable_name(str(self.x)) + + if not isinstance(self.y, list): + y_label = path_variable_name(str(self.y)) + y_variables = [self.y] + else: + y_label = "value" + y_variables = self.y.copy() + + self._validate_variable_format(cases[0], self.x, y_variables) + + if self._is_series_data(cases[0], self.x): + x_data, y_data = self._load_series(cases, x_label, y_variables) + else: + x_data, y_data = self._load_points(cases, y_variables) + + x_data, y_data, x_label, y_label = self._handle_data_with_units( + x_data, y_data, x_label, y_label + ) + + return x_data, y_data, x_label, y_label + +>>>>>>> 8ba5fb3b (adjusted report related docstrings and added report init (#1159)) def _get_background_chart(self, x_data): if self.background == "geometry": dimension = np.amax(x_data[0]) - np.amin(x_data[0]) @@ -1026,6 +1554,7 @@ def _get_figures(self, cases, context: ReportContext): file_names.append(file_name) plt.close() +<<<<<<< HEAD else: file_name = os.path.join(data_storage, self.fig_name + cbc_str + "all_cases" + ".pdf") data = self.get_data(cases, context) @@ -1038,6 +1567,32 @@ def _get_figures(self, cases, context: ReportContext): # pylint: disable=too-many-arguments,too-many-locals def get_doc_item(self, context: ReportContext, settings: Settings = None) -> None: +======= +class NonlinearResiduals(BaseChart2D): + """ + Residuals is an object for showing the solution history of nonlinear residuals. + """ + + show_grid: Optional[bool] = Field( + True, description="If ``True``, grid lines are displayed on the plot. Defaults to ``True``." + ) + separate_plots: Optional[bool] = Field( + True, description="If ``True``, each residual component is plotted in a separate subplot." + ) + xlim: Optional[Union[ManualLimit, Tuple[float, float]]] = Field( + None, + description="Limits for the *x*-axis. Can be a tuple ``(xmin, xmax)`` or a `ManualLimit`.", + ) + section_title: Literal["Nonlinear residuals"] = Field("Nonlinear residuals", frozen=True) + x: Literal["nonlinear_residuals/pseudo_step"] = Field( + "nonlinear_residuals/pseudo_step", frozen=True + ) + y_log: Literal[True] = Field(True, frozen=True) + _requirements: List[str] = [_requirements_mapping["nonlinear_residuals"]] + type_name: Literal["NonlinearResiduals"] = Field("NonlinearResiduals", frozen=True) + + def get_requirements(self): +>>>>>>> 8ba5fb3b (adjusted report related docstrings and added report init (#1159)) """ returns doc item for chart """ @@ -1069,6 +1624,7 @@ def get_doc_item(self, context: ReportContext, settings: Settings = None) -> Non class Chart3D(Chart): """ Represents a 3D chart within a report, displaying a specific surface field. +<<<<<<< HEAD Parameters ---------- @@ -1085,9 +1641,13 @@ class Chart3D(Chart): Type of object to display in the 3D chart. exclude : List[str], optional Exclude boundaries from screenshot, +======= +>>>>>>> 8ba5fb3b (adjusted report related docstrings and added report init (#1159)) """ - field: Optional[Union[SurfaceFieldNames, str]] = None + field: Optional[Union[SurfaceFieldNames, str]] = Field( + None, description="The name of the field to display in the chart." + ) camera: Optional[ Union[ Camera, @@ -1101,14 +1661,32 @@ class Chart3D(Chart): RearRightBottomCamera, TopCamera, ] - ] = pd.Field(default=Camera(), discriminator="type") - limits: Optional[Union[Tuple[float, float], Tuple[DimensionedTypes, DimensionedTypes]]] = None - is_log_scale: bool = False - show: Union[ShutterObjectTypes, Literal["isosurface"]] - iso_field: Optional[Union[IsoSurfaceFieldNames, str]] = None - mode: Optional[Literal["contour", "lic"]] = "contour" - exclude: Optional[List[str]] = None - include: Optional[List[str]] = None + ] = pd.Field( + default=Camera(), description="Specify how the view will be set up.", discriminator="type" + ) + limits: Optional[Union[Tuple[float, float], Tuple[DimensionedTypes, DimensionedTypes]]] = Field( + None, description="Limits for the field values, specified as a tuple (min, max)." + ) + is_log_scale: bool = Field( + False, description="Applies a logarithmic scale to the colormap. Defaults to ``False``." + ) + show: ShutterObjectTypes = Field( + description="Type of object to display in the 3D chart. Note: ``qcriterion`` refers to an iso-surface" + + " that is created by default, whereas ``isosurface`` refers to iso-surfaces specified in simulation outputs." + ) + iso_field: Optional[Union[IsoSurfaceFieldNames, str]] = Field( + None, + description="Iso-surface fields to be displayed when ``isosurface`` is selected in ``show``.", + ) + mode: Optional[Literal["contour", "lic"]] = Field( + "contour", description="Field display mode, lic stands for line integral convolution." + ) + include: Optional[List[str]] = Field( + None, description="Boundaries to be included in the chart." + ) + exclude: Optional[List[str]] = Field( + None, description="Boundaries to be excluded from the chart." + ) type_name: Literal["Chart3D"] = Field("Chart3D", frozen=True) # pylint: disable=unsubscriptable-object diff --git a/flow360/plugins/report/utils.py b/flow360/plugins/report/utils.py index 8a2c6ed55..739de72ef 100644 --- a/flow360/plugins/report/utils.py +++ b/flow360/plugins/report/utils.py @@ -344,6 +344,44 @@ def calculate( """ +<<<<<<< HEAD +======= +class GetAttribute(GenericOperation): + """ + Retrieve an attribute from a data object. + + This operation extracts the attribute specified by `attr_name` from the provided data object + using Python's built-in `getattr` function. If the attribute is not found, an `AttributeError` + is raised, providing a clear error message. + + Methods + ------- + calculate(data, case, cases, variables, new_variable_name) + Retrieves the attribute specified by `attr_name` from the given data object. + Returns a tuple containing the original data, the cases list, and the extracted attribute value. + """ + + attr_name: str = pd.Field( + description="The name of the attribute to retrieve from the data object." + ) + type_name: Literal["GetAttribute"] = pd.Field("GetAttribute", frozen=True) + + def calculate( + self, data, case, cases, variables, new_variable_name + ): # pylint: disable=too-many-arguments + """ + Getting attribute on the provided data. + """ + + try: + result = getattr(data, self.attr_name) + except AttributeError as err: + raise AttributeError(f"Attribute {self.attr_name} not found in {type(data)=}") from err + + return data, cases, result + + +>>>>>>> 8ba5fb3b (adjusted report related docstrings and added report init (#1159)) class Average(GenericOperation): """ Represents an averaging operation on simulation results. @@ -351,22 +389,6 @@ class Average(GenericOperation): This operation calculates the average of a given data set over a specified range of steps, time, or fraction of the dataset. - Attributes - ---------- - start_step : Optional[pd.NonNegativeInt] - The starting step for averaging. If not specified, averaging starts from the beginning. - end_step : Optional[pd.NonNegativeInt] - The ending step for averaging. If not specified, averaging continues to the end. - start_time : Optional[pd.NonNegativeFloat] - The starting time for averaging. If not specified, averaging starts from the beginning. - end_time : Optional[pd.NonNegativeFloat] - The ending time for averaging. If not specified, averaging continues to the end. - fraction : Optional[pd.PositiveFloat] - The fraction of the dataset to be averaged, ranging from 0 to 1. - Only the fraction-based method is implemented. - type_name : Literal["Average"] - A literal string indicating the operation type. - Raises ------ NotImplementedError @@ -378,11 +400,28 @@ class Average(GenericOperation): result = avg.calculate(data, case, cases, variables, new_variable_name) """ - start_step: Optional[pd.NonNegativeInt] = None - end_step: Optional[pd.NonNegativeInt] = None - start_time: Optional[pd.NonNegativeFloat] = None - end_time: Optional[pd.NonNegativeFloat] = None - fraction: Optional[pd.PositiveFloat] = pd.Field(None, le=1) + start_step: Optional[pd.NonNegativeInt] = pd.Field( + None, + description="The starting step for averaging. If not specified, averaging starts from the beginning.", + ) + end_step: Optional[pd.NonNegativeInt] = pd.Field( + None, + description="The ending step for averaging. If not specified, averaging continues to the end.", + ) + start_time: Optional[pd.NonNegativeFloat] = pd.Field( + None, + description="The starting time for averaging. If not specified, averaging starts from the beginning.", + ) + end_time: Optional[pd.NonNegativeFloat] = pd.Field( + None, + description="The ending time for averaging. If not specified, averaging continues to the end.", + ) + fraction: Optional[pd.PositiveFloat] = pd.Field( + None, + le=1, + description="The fraction of the dataset to be averaged, ranging from 0 to 1." + + " Only the fraction-based method is implemented.", + ) type_name: Literal["Average"] = pd.Field("Average", frozen=True) model_config = pd.ConfigDict( @@ -419,8 +458,8 @@ class Variable(Flow360BaseModel): Variable model used in expressions """ - name: str - data: str + name: str = pd.Field(description="Name of the variable.") + data: str = pd.Field(description="Data contained within the variable.") class Expression(GenericOperation): @@ -431,13 +470,6 @@ class Expression(GenericOperation): variables extracted from simulation results. The results of the expression evaluation can be added as a new column to a dataframe for further analysis. - Attributes - ---------- - expr : str - The mathematical expression to evaluate. It should be written in a syntax - compatible with the `numexpr` library, using variable names that correspond - to columns in the dataframe or user-defined variables. - Example ------- expr = Expression(expr="totalCD * area") @@ -452,7 +484,11 @@ class Expression(GenericOperation): If the data type is unsupported by the `calculate` method. """ - expr: str + expr: str = pd.Field( + description="The mathematical expression to evaluate. It should be written in a syntax compatible" + + " with the `numexpr` library, using variable names that correspond to" + + " columns in the dataframe or user-defined variables." + ) type_name: Literal["Expression"] = pd.Field("Expression", frozen=True) @classmethod @@ -561,6 +597,7 @@ def calculate( OperationTypes = Annotated[Union[Average, Expression], pd.Field(discriminator="type_name")] +# pylint: disable=no-member class DataItem(Flow360BaseModel): """ Represents a retrievable data item that can be post-processed. @@ -569,36 +606,36 @@ class DataItem(Flow360BaseModel): - Excluding specific boundaries (if applicable). - Applying one or more post-processing operations (e.g., mathematical expressions, averaging). - Introducing additional variables for use in these operations. + """ - Parameters - ---------- - data : str - Path to the data item to retrieve from a `Case`. The path can include nested attributes - and dictionary keys (e.g., "results.surface_forces"). - title : str, optional - A human-readable title for this data item. If omitted, the title defaults to the - last component of the `data` path. - include : list[str], optional - A list of boundaries to include in the retrieved data (e.g., certain surfaces). Only - applicable to some data types, such as surface forces or slicing force distributions. - exclude : list[str], optional - A list of boundaries to exclude from the retrieved data (e.g., certain surfaces). Only - applicable to some data types, such as surface forces or slicing force distributions. - operations : list[OperationTypes], optional - A list of operations to apply to the retrieved data. Supported operations include: - `Expression` and `Average`. - variables : list[Variable], optional - Additional user-defined variables that may be referenced in the `Expression` operations. - type_name : Literal["DataItem"] - A literal string identifying the type of the item, set to "DataItem". - """ - - data: str - title: Optional[str] = None - include: Optional[List[str]] = None - exclude: Optional[List[str]] = None - operations: Optional[List[OperationTypes]] = None - variables: Optional[List[Variable]] = None + data: str = pd.Field( + description="Path to the data item to retrieve from a `Case`." + + ' The path can include nested attributes and dictionary keys (e.g., "results.surface_forces").' + ) + title: Optional[str] = pd.Field( + None, + description="A human-readable title for this data item." + + " If omitted, the title defaults to the last component of the `data` path.", + ) + include: Optional[List[str]] = pd.Field( + None, + description="Boundaries to be included in the retrieved data (e.g., certain surfaces)." + + " Only applicable to some data types, such as surface forces or slicing force distributions.", + ) + exclude: Optional[List[str]] = pd.Field( + None, + description="Boundaries to be excluded from the retrieved data (e.g., certain surfaces)." + + " Only applicable to some data types, such as surface forces or slicing force distributions.", + ) + operations: Optional[List[OperationTypes]] = pd.Field( + None, + description="A list of operations to apply to the retrieved data." + + " Supported operations include: `Expression` and `Average`.", + ) + variables: Optional[List[Variable]] = pd.Field( + None, + description="Additional user-defined variables that may be referenced in the `Expression` operations.", + ) type_name: Literal["DataItem"] = pd.Field("DataItem", frozen=True) @pd.model_validator(mode="before") @@ -686,17 +723,14 @@ def __str__(self): class Delta(Flow360BaseModel): """ Represents a delta calculation between a reference case and a target case based on specified data. - - Parameters - ---------- - data : str - Path to the data item used for delta calculation. - ref_index : Optional[NonNegativeInt], default=0 - Index of the reference case in the list of cases for comparison. """ - data: Union[str, DataItem] - ref_index: Optional[pd.NonNegativeInt] = 0 + data: Union[str, DataItem] = pd.Field( + description="Path to the data item used for delta calculation." + ) + ref_index: Optional[pd.NonNegativeInt] = pd.Field( + 0, description="Index of the reference case in the list of cases for comparison." + ) type_name: Literal["Delta"] = pd.Field("Delta", frozen=True) def calculate(self, case: Case, cases: List[Case]) -> float: @@ -753,6 +787,146 @@ def __init__(self, *args, width_argument=NoEscape(r"\linewidth"), **kwargs): super().__init__(*args, start_arguments=width_argument, **kwargs) +<<<<<<< HEAD +======= +class Grouper(Flow360BaseModel): + """ + Class for objects responsible for grouping data into series in Chart2D. + + Example + ------- + - Data will be grouped by each turbulence model and then by the first tag, + if the first tag is "a" or "b" the data point will be assigned to group "bucket0", + if the first tag is "c" the data point will be assigned to "bucket1" + + >>> Grouper( + ... group_by=["params/models/Fluid/turbulence_model_solver/type_name", "info/tags/0"], + ... buckets=[None, {"bucket0": ["a", "b"], "bucket1": ["c"]}], + ... ) + + ==== + """ + + group_by: Union[str, List[str], None, List[None]] = pd.Field( + description="The path to the data attribute (or paths to attributes in case of multi-level grouping)" + + " by which the grouping should be done." + ) + buckets: Optional[Union[dict[str, List], List[Union[dict[str, List], None]]]] = pd.Field( + None, + description="Dictionaries where key represents the name of the group and value is the list of values," + + " that belong to the group. If all the values should be unique, enter None for the corresponding bucket.", + ) + _series_assignments: List[List[str]] = pd.PrivateAttr(default=None) + + @pd.model_validator(mode="after") + def _handle_singular_inputs(self): + if not isinstance(self.group_by, List): + self.group_by = [self.group_by] + if not isinstance(self.buckets, List): + self.buckets = [self.buckets] * len(self.group_by) + return self + + @pd.model_validator(mode="after") + def _check_argument_lengths(self): + if self.buckets is not None and len(self.group_by) != len(self.buckets): + raise pd.ValidationError( + "group_by and buckets must be the same length. " + + "If a category should not be grouped into buckets enter None in the bucket's place." + ) + return self + + def _get_possible_assignments(self, category, cases): + assignments = [] + for case in cases: + assignment = data_from_path(case, category, cases) + if assignment not in assignments: + assignments.append(assignment) + return assignments + + def initialize_arrays(self, cases, y_variables): + """ + Initializes data structures for x_data and y_data. + """ + self._series_assignments = [[path_variable_name(str(y))] for y in y_variables] + + if self.group_by != [None]: + for category, bucket in zip(self.group_by, self.buckets): + if bucket is not None: + grouping_attributes = bucket.keys() + else: + grouping_attributes = self._get_possible_assignments(category, cases) + + new_assignments = [] + for assignment in self._series_assignments: + for attribute in grouping_attributes: + new_assignments.append(assignment + [attribute]) + + self._series_assignments = new_assignments + + x_data = [[] for _ in range(len(self._series_assignments))] + y_data = [[] for _ in range(len(self._series_assignments))] + return x_data, y_data + + # pylint: disable=inconsistent-return-statements + def _is_in_bucket(self, bucket_criteria, attribute) -> bool: + for criterion in bucket_criteria: + if attribute == criterion: + return True + if callable(criterion): + crit_result = criterion(attribute) + if isinstance(crit_result, bool): + return crit_result + raise AttributeError( + f"Bucket criterion must return bool, current returned is {type(crit_result)}." + ) + + # pylint: disable=too-many-arguments + def arrange_data(self, case, x_data, y_data, x_data_point, y_data_point, y_variable): + """ + Sorts the data into appropriate series based on the case. + """ + + point_attributes = [path_variable_name(str(y_variable))] + + if self.group_by != [None]: + for category, bucket in zip(self.group_by, self.buckets): + attribute = data_from_path(case, category) + if bucket is not None: + for key, value in bucket.items(): + if self._is_in_bucket(value, attribute): + point_attributes.append(key) + else: + point_attributes.append(attribute) + + for idx, assignment in enumerate(self._series_assignments): + if point_attributes == assignment: + x_data[idx].append(x_data_point) + y_data[idx].append(y_data_point) + return x_data, y_data + + return x_data, y_data + + def arrange_legend(self): + """ + Creates the legend for the defined grouping. + """ + legend = [] + assignments = self._series_assignments.copy() + + assignments_array = np.array(assignments) + if np.all(assignments_array[:, 0] == assignments_array[0, 0]): + assignments = assignments_array[:, 1:].tolist() + + if assignments is None: + return None + + for assignment in assignments: + legend.append(" - ".join(assignment)) + + return legend + + +>>>>>>> 8ba5fb3b (adjusted report related docstrings and added report init (#1159)) def generate_colorbar_from_image( image_filename=os.path.join(here, "img", "colorbar_rainbow_banded_30.png"), limits: Tuple[float, float] = (0, 1), diff --git a/flow360/plugins/report/uvf_shutter.py b/flow360/plugins/report/uvf_shutter.py index 4e7a8a6e5..ac614e676 100644 --- a/flow360/plugins/report/uvf_shutter.py +++ b/flow360/plugins/report/uvf_shutter.py @@ -34,7 +34,7 @@ class ShutterRequestBaseModel(Flow360BaseModel): """ -ShutterObjectTypes = Literal["slices", "qcriterion", "isosurfaces", "boundaries", "edges"] +ShutterObjectTypes = Literal["slices", "qcriterion", "isosurface", "boundaries"] class Resource(Flow360BaseModel): @@ -187,162 +187,138 @@ class SetLICPayload(Flow360BaseModel): class Camera(Flow360BaseModel): """ Represents the camera configuration payload. + """ - Attributes - ---------- - position : Vector3 - Camera eye position, think of the eye position as a position on the unit sphere centered at the `lookAt`. - The units are in length units used in geometry or volume mesh. - up : Vector3 - Up orientation of the camera. - look_at : Vector3 - Target point the camera will look at from the position. Default: center of bbox - The units are in length units used in geometry or volume mesh. - pan_target : Vector3 or None - Position to pan the viewport center to; if undefined, the default is `look_at`. - The units are in length units used in geometry or volume mesh. - dimension_dir : {'width', 'height', 'diagonal'} - The direction `dimension_size_model_units` is for. - dimension : float - The camera zoom will be set such that the extents of the scene's projection is this number of model units for - the applicable `dimension_dir`. The units are in length units used in geometry or volume mesh. - """ - - position: Optional[Tuple[float, float, float]] = (-1, -1, 1) - up: Optional[Tuple[float, float, float]] = (0, 0, 1) - look_at: Optional[Tuple[float, float, float]] = None - pan_target: Optional[Tuple[float, float, float]] = None + position: Optional[Tuple[float, float, float]] = pd.Field( + (-1, -1, 1), + description="Camera eye position, think of the eye position as a position on the unit sphere" + + " centered at the `lookAt`. The units are in length units used in geometry or volume mesh.", + ) + up: Optional[Tuple[float, float, float]] = pd.Field( + (0, 0, 1), description="Up orientation of the camera." + ) + look_at: Optional[Tuple[float, float, float]] = pd.Field( + None, + description="Target point the camera will look at from the position. Default: center of bbox." + + " The units are in length units used in geometry or volume mesh.", + ) + pan_target: Optional[Tuple[float, float, float]] = pd.Field( + None, + description="Position to pan the viewport center to; if undefined, the default is `look_at`." + + " The units are in length units used in geometry or volume mesh.", + ) dimension_dir: Optional[Literal["width", "height", "diagonal"]] = pd.Field( - "width", alias="dimensionDirection" + "width", + alias="dimensionDirection", + description="The direction `dimension_size_model_units` is for.", + ) + dimension: Optional[float] = pd.Field( + None, + alias="dimensionSizeModelUnits", + description="The camera zoom will be set such that the extents of the scene's projection is this number" + + " of model units for the applicable `dimension_dir`." + + " The units are in length units used in geometry or volume mesh.", ) - dimension: Optional[float] = pd.Field(None, alias="dimensionSizeModelUnits") - type: Literal["Camera"] = "Camera" + type: Literal["Camera"] = pd.Field("Camera", frozen=True) class TopCamera(Camera): """ Camera looking down from above (along +Z). - Position: (0, 0, 1) - Look-at: (0, 0, 0) - Up: (0, 1, 0) """ - position: Tuple[float, float, float] = (0.0, 0.0, 1.0) - look_at: Tuple[float, float, float] = (0.0, 0.0, 0.0) - up: Tuple[float, float, float] = (0.0, 1.0, 0.0) - type: Literal["TopCamera"] = "TopCamera" + position: Tuple[float, float, float] = pd.Field((0.0, 0.0, 1.0)) + look_at: Tuple[float, float, float] = pd.Field((0.0, 0.0, 0.0)) + up: Tuple[float, float, float] = pd.Field((0.0, 1.0, 0.0)) + type: Literal["TopCamera"] = pd.Field("TopCamera") class LeftCamera(Camera): """ Camera looking from the positive Y side toward the origin (i.e. along -Y). - Position: (0, -1, 0) - Look-at: (0, 0, 0) - Up: (0, 0, 1) """ - type: Literal["LeftCamera"] = "LeftCamera" - position: Tuple[float, float, float] = (0.0, -1.0, 0.0) - look_at: Tuple[float, float, float] = (0.0, 0.0, 0.0) - up: Tuple[float, float, float] = (0.0, 0.0, 1.0) + position: Tuple[float, float, float] = pd.Field((0.0, -1.0, 0.0)) + look_at: Tuple[float, float, float] = pd.Field((0.0, 0.0, 0.0)) + up: Tuple[float, float, float] = pd.Field((0.0, 0.0, 1.0)) + type: Literal["LeftCamera"] = pd.Field("LeftCamera") class RearCamera(Camera): """ Camera looking from negative X toward the origin (i.e. along +X). - Position: (1, 0, 0) - Look-at: (0, 0, 0) - Up: (0, 0, 1) """ - position: Tuple[float, float, float] = (1.0, 0.0, 0.0) - look_at: Tuple[float, float, float] = (0.0, 0.0, 0.0) - up: Tuple[float, float, float] = (0.0, 0.0, 1.0) - type: Literal["RearCamera"] = "RearCamera" + position: Tuple[float, float, float] = pd.Field((1.0, 0.0, 0.0)) + look_at: Tuple[float, float, float] = pd.Field((0.0, 0.0, 0.0)) + up: Tuple[float, float, float] = pd.Field((0.0, 0.0, 1.0)) + type: Literal["RearCamera"] = pd.Field("RearCamera") class FrontCamera(Camera): """ Camera looking from positive X side toward the origin (i.e. along -X). - Position: (-1, 0, 0) - Look-at: (0, 0, 0) - Up: (0, 0, 1) """ - position: Tuple[float, float, float] = (-1.0, 0.0, 0.0) - look_at: Tuple[float, float, float] = (0.0, 0.0, 0.0) - up: Tuple[float, float, float] = (0.0, 0.0, 1.0) - type: Literal["FrontCamera"] = "FrontCamera" + position: Tuple[float, float, float] = pd.Field((-1.0, 0.0, 0.0)) + look_at: Tuple[float, float, float] = pd.Field((0.0, 0.0, 0.0)) + up: Tuple[float, float, float] = pd.Field((0.0, 0.0, 1.0)) + type: Literal["FrontCamera"] = pd.Field("FrontCamera") class BottomCamera(Camera): """ Camera looking up from below (along -Z). - Position: (0, 0, -1) - Look-at: (0, 0, 0) - Up: (0, -1, 0) """ - position: Tuple[float, float, float] = (0.0, 0.0, -1.0) - look_at: Tuple[float, float, float] = (0.0, 0.0, 0.0) - up: Tuple[float, float, float] = (0.0, -1.0, 0.0) - type: Literal["BottomCamera"] = "BottomCamera" + position: Tuple[float, float, float] = pd.Field((0.0, 0.0, -1.0)) + look_at: Tuple[float, float, float] = pd.Field((0.0, 0.0, 0.0)) + up: Tuple[float, float, float] = pd.Field((0.0, -1.0, 0.0)) + type: Literal["BottomCamera"] = pd.Field("BottomCamera") class FrontLeftBottomCamera(Camera): """ - Camera placed front-left-bottom, diagonally looking at the model. - Position: (-1, -1, -1) - Look-at: (0, 0, 0) - Up: (0, 0, 1) + Camera placed front-left-bottom, diagonally looking at the model.] """ - position: Tuple[float, float, float] = (-1.0, -1.0, -1.0) - look_at: Tuple[float, float, float] = (0.0, 0.0, 0.0) - up: Tuple[float, float, float] = (0.0, 0.0, 1.0) - type: Literal["FrontLeftBottomCamera"] = "FrontLeftBottomCamera" + position: Tuple[float, float, float] = pd.Field((-1.0, -1.0, -1.0)) + look_at: Tuple[float, float, float] = pd.Field((0.0, 0.0, 0.0)) + up: Tuple[float, float, float] = pd.Field((0.0, 0.0, 1.0)) + type: Literal["FrontLeftBottomCamera"] = pd.Field("FrontLeftBottomCamera") class RearRightBottomCamera(Camera): """ Camera placed rear-right-bottom, diagonally looking at the model. - Position: (1, 1, -1) - Look-at: (0, 0, 0) - Up: (0, 0, 1) """ - position: Tuple[float, float, float] = (1.0, 1.0, -1.0) - look_at: Tuple[float, float, float] = (0.0, 0.0, 0.0) - up: Tuple[float, float, float] = (0.0, 0.0, 1.0) - type: Literal["RearRightBottomCamera"] = "RearRightBottomCamera" + position: Tuple[float, float, float] = pd.Field((1.0, 1.0, -1.0)) + look_at: Tuple[float, float, float] = pd.Field((0.0, 0.0, 0.0)) + up: Tuple[float, float, float] = pd.Field((0.0, 0.0, 1.0)) + type: Literal["RearRightBottomCamera"] = pd.Field("RearRightBottomCamera") class FrontLeftTopCamera(Camera): """ Camera placed front-left-top, diagonally looking at the model. - Position: (-1, -1, 1) - Look-at: (0, 0, 0) - Up: (0, 0, 1) """ - position: Tuple[float, float, float] = (-1.0, -1.0, 1.0) - look_at: Tuple[float, float, float] = (0.0, 0.0, 0.0) - up: Tuple[float, float, float] = (0.0, 0.0, 1.0) - type: Literal["FrontLeftTopCamera"] = "FrontLeftTopCamera" + position: Tuple[float, float, float] = pd.Field((-1.0, -1.0, 1.0)) + look_at: Tuple[float, float, float] = pd.Field((0.0, 0.0, 0.0)) + up: Tuple[float, float, float] = pd.Field((0.0, 0.0, 1.0)) + type: Literal["FrontLeftTopCamera"] = pd.Field("FrontLeftTopCamera") class RearLeftTopCamera(Camera): """ Camera placed rear-left-top, diagonally looking at the model. - Position: (1, -1, 1) - Look-at: (0, 0, 0) - Up: (0, 0, 1) """ - position: Tuple[float, float, float] = (1.0, -1.0, 1.0) - look_at: Tuple[float, float, float] = (0.0, 0.0, 0.0) - up: Tuple[float, float, float] = (0.0, 0.0, 1.0) - type: Literal["RearLeftTopCamera"] = "RearLeftTopCamera" + position: Tuple[float, float, float] = pd.Field((1.0, -1.0, 1.0)) + look_at: Tuple[float, float, float] = pd.Field((0.0, 0.0, 0.0)) + up: Tuple[float, float, float] = pd.Field((0.0, 0.0, 1.0)) + type: Literal["RearLeftTopCamera"] = pd.Field("RearLeftTopCamera") class SetCameraPayload(Camera):