From 0a2b51271988cce2abfb5d7ddb5211ab750afa36 Mon Sep 17 00:00:00 2001 From: evidencebp Date: Sun, 29 Dec 2024 16:45:48 +0200 Subject: [PATCH 01/29] src\meshio\obj\_obj.py too-many-statements Function read_buffer had 56 branches while Pylint recommends having at most 12. Now there is a function process_line that uses more utility functions. --- src/meshio/obj/_obj.py | 95 +++++++++++++++++++++--------------------- 1 file changed, 48 insertions(+), 47 deletions(-) diff --git a/src/meshio/obj/_obj.py b/src/meshio/obj/_obj.py index d5208180..a8f74730 100644 --- a/src/meshio/obj/_obj.py +++ b/src/meshio/obj/_obj.py @@ -21,59 +21,60 @@ def read(filename): def read_buffer(f): - points = [] - vertex_normals = [] - texture_coords = [] - face_groups = [] - face_group_ids = [] + points, vertex_normals, texture_coords, face_groups, face_group_ids = [], [], [], [], [] face_group_id = -1 + while True: line = f.readline() - if not line: - # EOF break + process_line(line.strip(), points, vertex_normals, texture_coords, face_groups, face_group_ids, face_group_id) + + face_groups, face_group_ids = clean_empty_groups(face_groups, face_group_ids) + points, texture_coords, vertex_normals, point_data = convert_to_numpy(points, texture_coords, vertex_normals) + cells, cell_data = create_cells(face_groups, face_group_ids) + + return Mesh(points, cells, point_data=point_data, cell_data=cell_data) + + +def process_line(strip, points, vertex_normals, texture_coords, face_groups, face_group_ids, face_group_id): + if len(strip) == 0 or strip[0] == "#": + return + + split = strip.split() + if split[0] == "v": + points.append([float(item) for item in split[1:]]) + elif split[0] == "vn": + vertex_normals.append([float(item) for item in split[1:]]) + elif split[0] == "vt": + texture_coords.append([float(item) for item in split[1:]]) + elif split[0] == "s": + pass + elif split[0] == "f": + process_face(split, face_groups, face_group_ids, face_group_id) + elif split[0] == "g": + face_groups.append([]) + face_group_ids.append([]) + face_group_id += 1 - strip = line.strip() - - if len(strip) == 0 or strip[0] == "#": - continue - - split = strip.split() - - if split[0] == "v": - points.append([float(item) for item in split[1:]]) - elif split[0] == "vn": - vertex_normals.append([float(item) for item in split[1:]]) - elif split[0] == "vt": - texture_coords.append([float(item) for item in split[1:]]) - elif split[0] == "s": - # "s 1" or "s off" controls smooth shading - pass - elif split[0] == "f": - dat = [int(item.split("/")[0]) for item in split[1:]] - if len(face_groups) == 0 or ( - len(face_groups[-1]) > 0 and len(face_groups[-1][-1]) != len(dat) - ): - face_groups.append([]) - face_group_ids.append([]) - - face_groups[-1].append(dat) - face_group_ids[-1].append(face_group_id) - elif split[0] == "g": - # new group - face_groups.append([]) - face_group_ids.append([]) - face_group_id += 1 - else: - # who knows - pass - # There may be empty groups, too. - # Remove them. +def process_face(split, face_groups, face_group_ids, face_group_id): + dat = [int(item.split("/")[0]) for item in split[1:]] + if len(face_groups) == 0 or (len(face_groups[-1]) > 0 and len(face_groups[-1][-1]) != len(dat)): + face_groups.append([]) + face_group_ids.append([]) + + face_groups[-1].append(dat) + face_group_ids[-1].append(face_group_id) + + +def clean_empty_groups(face_groups, face_group_ids): face_groups = [f for f in face_groups if len(f) > 0] face_group_ids = [g for g in face_group_ids if len(g) > 0] + return face_groups, face_group_ids + +def convert_to_numpy(points, texture_coords, vertex_normals): points = np.array(points) texture_coords = np.array(texture_coords) vertex_normals = np.array(vertex_normals) @@ -82,8 +83,10 @@ def read_buffer(f): point_data["obj:vt"] = texture_coords if len(vertex_normals) > 0: point_data["obj:vn"] = vertex_normals + return points, texture_coords, vertex_normals, point_data - # convert to numpy arrays + +def create_cells(face_groups, face_group_ids): face_groups = [np.array(f) for f in face_groups] cell_data = {"obj:group_ids": []} cells = [] @@ -95,9 +98,7 @@ def read_buffer(f): else: cells.append(CellBlock("polygon", f - 1)) cell_data["obj:group_ids"].append(gid) - - return Mesh(points, cells, point_data=point_data, cell_data=cell_data) - + return cells, cell_data def write(filename, mesh): for c in mesh.cells: From 400068e13a990fe422e2a74a030122f48e0bced0 Mon Sep 17 00:00:00 2001 From: evidencebp Date: Sun, 29 Dec 2024 17:27:46 +0200 Subject: [PATCH 02/29] src\meshio\tecplot\_tecplot.py too-many-statements Function write had 69 statements while Pylint recommends having at most 50. Function read_buffer had 65 statements . Extracted functions to make the code more structured and solve that. --- src/meshio/tecplot/_tecplot.py | 170 +++++++++++++++++---------------- 1 file changed, 88 insertions(+), 82 deletions(-) diff --git a/src/meshio/tecplot/_tecplot.py b/src/meshio/tecplot/_tecplot.py index 4acf2c3f..5cc36752 100644 --- a/src/meshio/tecplot/_tecplot.py +++ b/src/meshio/tecplot/_tecplot.py @@ -126,46 +126,11 @@ def read_buffer(f): line = readline(f) if line.upper().startswith("VARIABLES"): - # Multilines for VARIABLES appears to work only if - # variable name is double quoted - lines = [line] - i = f.tell() - line = readline(f).upper() - while True: - if line.startswith('"'): - lines += [line] - i = f.tell() - line = readline(f).upper() - else: - f.seek(i) - break - line = " ".join(lines) + line = _read_multiline_variables(f, line) variables = _read_variables(line) elif line.upper().startswith("ZONE"): - # ZONE can be defined on several lines e.g. - # ``` - # ZONE NODES = 62533, ELEMENTS = 57982 - # , DATAPACKING = BLOCK, ZONETYPE = FEQUADRILATERAL - # , VARLOCATION = ([1-2] = NODAL, [3-7] = CELLCENTERED) - # ``` - # is valid (and understood by ParaView and VisIt). - info_lines = [line] - i = f.tell() - line = readline(f).upper() - while True: - # check if the first entry can be converted to a float - try: - float(line.split()[0]) - except ValueError: - info_lines += [line] - i = f.tell() - line = readline(f).upper() - else: - f.seek(i) - break - line = " ".join(info_lines) - + line = _read_multiline_zone(f, line) assert variables is not None zone = _read_zone(line) @@ -223,6 +188,37 @@ def read_buffer(f): return Mesh(points, cells, point_data, cell_data) +def _read_multiline_variables(f, line): + lines = [line] + i = f.tell() + line = readline(f).upper() + while True: + if line.startswith('"'): + lines += [line] + i = f.tell() + line = readline(f).upper() + else: + f.seek(i) + break + return " ".join(lines) + + +def _read_multiline_zone(f, line): + info_lines = [line] + i = f.tell() + line = readline(f).upper() + while True: + try: + float(line.split()[0]) + except ValueError: + info_lines += [line] + i = f.tell() + line = readline(f).upper() + else: + f.seek(i) + break + return " ".join(info_lines) + def _read_variables(line): # Gather variables in a list line = line.split("=")[1] @@ -377,20 +373,7 @@ def _read_zone_data(f, num_data, num_cells, zone_format): def write(filename, mesh): - # Check cell types - cell_types = [] - cell_blocks = [] - for ic, c in enumerate(mesh.cells): - if c.type in meshio_only: - cell_types.append(c.type) - cell_blocks.append(ic) - else: - warn( - ( - "Tecplot does not support cell type '{}'. " - "Skipping cell block {}." - ).format(c.type, ic) - ) + cell_types, cell_blocks = _get_cell_types_and_blocks(mesh) # Define cells and zone type cell_types = np.unique(cell_types) @@ -424,6 +407,59 @@ def write(filename, mesh): ) # Define variables + variables, data, varrange = _get_variables_and_data(mesh, cell_blocks) + + with open_file(filename, "w") as f: + # Title + f.write(f'TITLE = "Written by meshio v{version}"\n') + + # Variables + variables_str = ", ".join(f'"{var}"' for var in variables) + f.write(f"VARIABLES = {variables_str}\n") + + # Zone record + num_nodes = len(mesh.points) + num_cells = sum(len(mesh.cells[ic].data) for ic in cell_blocks) + f.write(f"ZONE NODES = {num_nodes}, ELEMENTS = {num_cells},\n") + f.write(f"DATAPACKING = BLOCK, ZONETYPE = {zone_type}") + if varrange[0] <= varrange[1]: + f.write(",\n") + varlocation_str = ( + f"{varrange[0]}" + if varrange[0] == varrange[1] + else f"{varrange[0]}-{varrange[1]}" + ) + f.write(f"VARLOCATION = ([{varlocation_str}] = CELLCENTERED)\n") + else: + f.write("\n") + + # Zone data + for arr in data: + _write_table(f, arr) + + # CellBlock + for cell in cells: + f.write(" ".join(str(c) for c in cell + 1) + "\n") + + +def _get_cell_types_and_blocks(mesh): + cell_types = [] + cell_blocks = [] + for ic, c in enumerate(mesh.cells): + if c.type in meshio_only: + cell_types.append(c.type) + cell_blocks.append(ic) + else: + warn( + ( + "Tecplot does not support cell type '{}'. " + "Skipping cell block {}." + ).format(c.type, ic) + ) + return cell_types, cell_blocks + + +def _get_variables_and_data(mesh, cell_blocks): variables = ["X", "Y"] data = [mesh.points[:, 0], mesh.points[:, 1]] varrange = [3, 0] @@ -464,37 +500,7 @@ def write(filename, mesh): else: warn(f"Skipping cell data '{k}'.") - with open_file(filename, "w") as f: - # Title - f.write(f'TITLE = "Written by meshio v{version}"\n') - - # Variables - variables_str = ", ".join(f'"{var}"' for var in variables) - f.write(f"VARIABLES = {variables_str}\n") - - # Zone record - num_nodes = len(mesh.points) - num_cells = sum(len(mesh.cells[ic].data) for ic in cell_blocks) - f.write(f"ZONE NODES = {num_nodes}, ELEMENTS = {num_cells},\n") - f.write(f"DATAPACKING = BLOCK, ZONETYPE = {zone_type}") - if varrange[0] <= varrange[1]: - f.write(",\n") - varlocation_str = ( - f"{varrange[0]}" - if varrange[0] == varrange[1] - else f"{varrange[0]}-{varrange[1]}" - ) - f.write(f"VARLOCATION = ([{varlocation_str}] = CELLCENTERED)\n") - else: - f.write("\n") - - # Zone data - for arr in data: - _write_table(f, arr) - - # CellBlock - for cell in cells: - f.write(" ".join(str(c) for c in cell + 1) + "\n") + return variables, data, varrange def _write_table(f, data, ncol=20): From 290cf508dc63750cc179683ccb700b094386403e Mon Sep 17 00:00:00 2001 From: evidencebp Date: Sun, 29 Dec 2024 18:03:11 +0200 Subject: [PATCH 03/29] src\meshio\ansys\_ansys.py too-many-statements Function _read_faces had 54 statements while Pylint recommends having at most 50. Now there are dedicated functions _read_faces_ascii and _read_faces_binary --- src/meshio/ansys/_ansys.py | 144 +++++++++++++++++++------------------ 1 file changed, 76 insertions(+), 68 deletions(-) diff --git a/src/meshio/ansys/_ansys.py b/src/meshio/ansys/_ansys.py index 364616a7..d6e0d6e9 100644 --- a/src/meshio/ansys/_ansys.py +++ b/src/meshio/ansys/_ansys.py @@ -214,84 +214,92 @@ def _read_faces(f, line): if line.strip()[-1] != "(": _skip_to(f, "(") - data = {} if out.group(1) == "": - # ASCII - if key == "mixed": - # From - # : - # - # > If the face zone is of mixed type (element-type = > 0), the body of the - # > section will include the face type and will appear as follows - # > - # > type v0 v1 v2 c0 c1 - # > - for k in range(num_cells): - line = "" - while line.strip() == "": - line = f.readline().decode() - dat = line.split() - type_index = int(dat[0], 16) - if type_index == 0: - raise ReadError() - type_string, num_nodes_per_cell = element_type_to_key_num_nodes[ - type_index - ] - if len(dat) != num_nodes_per_cell + 3: - raise ReadError() + data = _read_faces_ascii(f, num_cells, key, num_nodes_per_cell) + else: + data = _read_faces_binary(f, num_cells, key, num_nodes_per_cell, out.group(1)) - if type_string not in data: - data[type_string] = [] + # make sure that the data set is properly closed + _skip_close(f, 2) - data[type_string].append( - [int(d, 16) for d in dat[1 : num_nodes_per_cell + 1]] - ) + return data - data = {key: np.array(data[key]) for key in data} - else: - # read cell data - data = np.empty((num_cells, num_nodes_per_cell), dtype=int) - for k in range(num_cells): +def _read_faces_ascii(f, num_cells, key, num_nodes_per_cell): + data = {} + if key == "mixed": + # From + # : + # + # > If the face zone is of mixed type (element-type = > 0), the body of the + # > section will include the face type and will appear as follows + # > + # > type v0 v1 v2 c0 c1 + # > + for k in range(num_cells): + line = "" + while line.strip() == "": line = f.readline().decode() - dat = line.split() - # The body of a regular face section contains the grid connectivity, and - # each line appears as follows: - # n0 n1 n2 cr cl - # where n* are the defining nodes (vertices) of the face, and c* are the - # adjacent cells. - if len(dat) != num_nodes_per_cell + 2: - raise ReadError() - data[k] = [int(d, 16) for d in dat[:num_nodes_per_cell]] - data = {key: data} + dat = line.split() + type_index = int(dat[0], 16) + if type_index == 0: + raise ReadError() + type_string, num_nodes_per_cell = element_type_to_key_num_nodes[ + type_index + ] + if len(dat) != num_nodes_per_cell + 3: + raise ReadError() + + if type_string not in data: + data[type_string] = [] + + data[type_string].append( + [int(d, 16) for d in dat[1 : num_nodes_per_cell + 1]] + ) + + data = {key: np.array(data[key]) for key in data} + else: - # binary - if out.group(1) == "20": - dtype = np.int32 - else: - if out.group(1) != "30": - ReadError(f"Expected keys '20' or '30', got {out.group(1)}.") - dtype = np.int64 - - if key == "mixed": - raise ReadError("Mixed element type for binary faces not supported yet") - - # Read cell data. - # The body of a regular face section contains the grid - # connectivity, and each line appears as follows: - # n0 n1 n2 cr cl - # where n* are the defining nodes (vertices) of the face, - # and c* are the adjacent cells. - shape = (num_cells, num_nodes_per_cell + 2) - count = shape[0] * shape[1] - data = np.fromfile(f, count=count, dtype=dtype).reshape(shape) - # Cut off the adjacent cell data. - data = data[:, :num_nodes_per_cell] + # read cell data + data = np.empty((num_cells, num_nodes_per_cell), dtype=int) + for k in range(num_cells): + line = f.readline().decode() + dat = line.split() + # The body of a regular face section contains the grid connectivity, and + # each line appears as follows: + # n0 n1 n2 cr cl + # where n* are the defining nodes (vertices) of the face, and c* are the + # adjacent cells. + if len(dat) != num_nodes_per_cell + 2: + raise ReadError() + data[k] = [int(d, 16) for d in dat[:num_nodes_per_cell]] data = {key: data} + return data - # make sure that the data set is properly closed - _skip_close(f, 2) +def _read_faces_binary(f, num_cells, key, num_nodes_per_cell, group): + if group == "20": + dtype = np.int32 + else: + if group != "30": + ReadError(f"Expected keys '20' or '30', got {group}.") + dtype = np.int64 + + if key == "mixed": + raise ReadError("Mixed element type for binary faces not supported yet") + + # Read cell data. + # The body of a regular face section contains the grid + # connectivity, and each line appears as follows: + # n0 n1 n2 cr cl + # where n* are the defining nodes (vertices) of the face, + # and c* are the adjacent cells. + shape = (num_cells, num_nodes_per_cell + 2) + count = shape[0] * shape[1] + data = np.fromfile(f, count=count, dtype=dtype).reshape(shape) + # Cut off the adjacent cell data. + data = data[:, :num_nodes_per_cell] + data = {key: data} return data From f0c1b639c5db477d91b4fa96020300e3f58b213a Mon Sep 17 00:00:00 2001 From: evidencebp Date: Sun, 29 Dec 2024 18:04:11 +0200 Subject: [PATCH 04/29] src\meshio\xdmf\main.py too-many-statements --- src/meshio/xdmf/main.py | 232 +++++++++++++++++++++------------------- 1 file changed, 119 insertions(+), 113 deletions(-) diff --git a/src/meshio/xdmf/main.py b/src/meshio/xdmf/main.py index 4d8c39d2..b79cac9e 100644 --- a/src/meshio/xdmf/main.py +++ b/src/meshio/xdmf/main.py @@ -155,68 +155,16 @@ def read_xdmf2(self, root): # noqa: C901 for c in grid: if c.tag == "Topology": - data_items = list(c) - if len(data_items) != 1: - message = ( - "Need exactly 1 data item in , " - f"found {len(data_items)}." - ) - if len(data_items) == 0: - message += ( - "\nStructured meshes are not supported, see " - "." - ) - raise ReadError(message) - - topology_type = c.attrib["TopologyType"] - if topology_type == "Mixed": - cells = translate_mixed_cells( - np.fromstring( - data_items[0].text, - int, - int(data_items[0].get("Dimensions")), - " ", - ) - ) - else: - data = self._read_data_item(data_items[0]) - cells.append(CellBlock(xdmf_to_meshio_type[topology_type], data)) - + self._read_topology_v2(c, cells) elif c.tag == "Geometry": - if "GeometryType" in c.attrib: - geo_type = c.attrib["GeometryType"] - if geo_type != "XYZ": - raise ReadError(f'Expected GeometryType "XYZ", not {geo_type}.') - data_items = list(c) - if len(data_items) != 1: - raise ReadError() - points = self._read_data_item(data_items[0]) - + points = self._read_geometry_v2(c) elif c.tag == "Information": c_data = c.text if not c_data: raise ReadError() field_data = self.read_information(c_data) - elif c.tag == "Attribute": - # assert c.attrib['Active'] == '1' - # assert c.attrib['AttributeType'] == 'None' - - data_items = list(c) - if len(data_items) != 1: - raise ReadError() - - data = self._read_data_item(data_items[0]) - - name = c.attrib["Name"] - if c.attrib["Center"] == "Node": - point_data[name] = data - elif c.attrib["Center"] == "Cell": - cell_data_raw[name] = data - else: - # TODO field data? - if c.attrib["Center"] != "Grid": - raise ReadError() + self._read_attribute_v2(c, point_data, cell_data_raw) else: raise ReadError(f"Unknown section '{c.tag}'.") @@ -230,6 +178,59 @@ def read_xdmf2(self, root): # noqa: C901 field_data=field_data, ) + def _read_topology_v2(self, c, cells): + data_items = list(c) + if len(data_items) != 1: + message = ( + "Need exactly 1 data item in , " + f"found {len(data_items)}." + ) + if len(data_items) == 0: + message += ( + "\nStructured meshes are not supported, see " + "." + ) + raise ReadError(message) + + topology_type = c.attrib["TopologyType"] + if topology_type == "Mixed": + cells = translate_mixed_cells( + np.fromstring( + data_items[0].text, + int, + int(data_items[0].get("Dimensions")), + " ", + ) + ) + else: + data = self._read_data_item(data_items[0]) + cells.append(CellBlock(xdmf_to_meshio_type[topology_type], data)) + + def _read_geometry_v2(self, c): + if "GeometryType" in c.attrib: + geo_type = c.attrib["GeometryType"] + if geo_type != "XYZ": + raise ReadError(f'Expected GeometryType "XYZ", not {geo_type}.') + data_items = list(c) + if len(data_items) != 1: + raise ReadError() + return self._read_data_item(data_items[0]) + + def _read_attribute_v2(self, c, point_data, cell_data_raw): + data_items = list(c) + if len(data_items) != 1: + raise ReadError() + + data = self._read_data_item(data_items[0]) + + name = c.attrib["Name"] + if c.attrib["Center"] == "Node": + point_data[name] = data + elif c.attrib["Center"] == "Cell": + cell_data_raw[name] = data + else: + if c.attrib["Center"] != "Grid": + raise ReadError() def read_xdmf3(self, root): # noqa: C901 domains = list(root) if len(domains) != 1: @@ -253,71 +254,16 @@ def read_xdmf3(self, root): # noqa: C901 for c in grid: if c.tag == "Topology": - data_items = list(c) - if len(data_items) != 1: - raise ReadError() - data_item = data_items[0] - - data = self._read_data_item(data_item) - - # The XDMF2 key is `TopologyType`, just `Type` for XDMF3. Allow either - # one. - if "Type" in c.attrib and "TopologyType" in c.attrib: - raise ReadError() - elif "Type" in c.attrib: - cell_type = c.attrib["Type"] - else: - cell_type = c.attrib["TopologyType"] - - if cell_type == "Mixed": - cells = translate_mixed_cells(data) - else: - cells.append(CellBlock(xdmf_to_meshio_type[cell_type], data)) - + self._read_topology(c, cells) elif c.tag == "Geometry": - if "Type" in c.attrib and "GeometryType" in c.attrib: - raise ReadError() - elif "Type" in c.attrib: - geometry_type = c.attrib["Type"] - else: - geometry_type = c.attrib["GeometryType"] - - if geometry_type not in ["XY", "XYZ"]: - raise ReadError(f'Illegal geometry type "{geometry_type}".') - - data_items = list(c) - if len(data_items) != 1: - raise ReadError() - data_item = data_items[0] - points = self._read_data_item(data_item) - + points = self._read_geometry(c) elif c.tag == "Information": c_data = c.text if not c_data: raise ReadError() field_data = self.read_information(c_data) - elif c.tag == "Attribute": - # Don't be too strict here: FEniCS, for example, calls this - # 'AttributeType'. - # assert c.attrib['Type'] == 'None' - - data_items = list(c) - if len(data_items) != 1: - raise ReadError() - data_item = data_items[0] - - data = self._read_data_item(data_item) - - if c.attrib["Center"] not in ["Node", "Cell"]: - raise ReadError(f"Unknown center '{c.attrib['Center']}'.") - - name = c.attrib["Name"] - if c.attrib["Center"] == "Node": - point_data[name] = data - else: - cell_data_raw[name] = data - + self._read_attribute(c, point_data, cell_data_raw) else: raise ReadError(f"Unknown section '{c.tag}'.") @@ -331,6 +277,66 @@ def read_xdmf3(self, root): # noqa: C901 field_data=field_data, ) + def _read_topology(self, c, cells): + data_items = list(c) + if len(data_items) != 1: + raise ReadError() + data_item = data_items[0] + + data = self._read_data_item(data_item) + + # The XDMF2 key is `TopologyType`, just `Type` for XDMF3. Allow either + # one. + if "Type" in c.attrib and "TopologyType" in c.attrib: + raise ReadError() + elif "Type" in c.attrib: + cell_type = c.attrib["Type"] + else: + cell_type = c.attrib["TopologyType"] + + if cell_type == "Mixed": + cells = translate_mixed_cells(data) + else: + cells.append(CellBlock(xdmf_to_meshio_type[cell_type], data)) + + def _read_geometry(self, c): + if "Type" in c.attrib and "GeometryType" in c.attrib: + raise ReadError() + elif "Type" in c.attrib: + geometry_type = c.attrib["Type"] + else: + geometry_type = c.attrib["GeometryType"] + + if geometry_type not in ["XY", "XYZ"]: + raise ReadError(f'Illegal geometry type "{geometry_type}".') + + data_items = list(c) + if len(data_items) != 1: + raise ReadError() + data_item = data_items[0] + return self._read_data_item(data_item) + + def _read_attribute(self, c, point_data, cell_data_raw): + # Don't be too strict here: FEniCS, for example, calls this + # 'AttributeType'. + # assert c.attrib['Type'] == 'None' + + data_items = list(c) + if len(data_items) != 1: + raise ReadError() + data_item = data_items[0] + + data = self._read_data_item(data_item) + + if c.attrib["Center"] not in ["Node", "Cell"]: + raise ReadError(f"Unknown center '{c.attrib['Center']}'.") + + name = c.attrib["Name"] + if c.attrib["Center"] == "Node": + point_data[name] = data + else: + cell_data_raw[name] = data + class XdmfWriter: def __init__( From 37b67404c69df914104b7622907ec45bcafea321 Mon Sep 17 00:00:00 2001 From: evidencebp Date: Sun, 29 Dec 2024 18:52:20 +0200 Subject: [PATCH 05/29] src\meshio\ugrid\_ugrid.py too-many-statements Function _write_buffer had 55 statements while Pylint recommends having at most 50. Extracted _write_cells to make the code more structured and solve that. --- src/meshio/ugrid/_ugrid.py | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/src/meshio/ugrid/_ugrid.py b/src/meshio/ugrid/_ugrid.py index 6fa57c09..900eb194 100644 --- a/src/meshio/ugrid/_ugrid.py +++ b/src/meshio/ugrid/_ugrid.py @@ -227,6 +227,13 @@ def _write_buffer(f, file_type, mesh): _write_section(f, file_type, mesh.points, ftype) + _write_cells(f, file_type, mesh, ugrid_counts, ugrid_meshio_id, itype) + + if file_type["type"] == "F": + _write_section(f, file_type, fortran_header, itype) + + +def _write_cells(f, file_type, mesh, ugrid_counts, ugrid_meshio_id, itype): for key in ["triangle", "quad"]: if ugrid_counts[key] == 0: continue @@ -239,12 +246,6 @@ def _write_buffer(f, file_type, mesh): if ugrid_counts[key] == 0: continue - # pick out cell data - # for data in mesh.cell_data.values(): - # if data.dtype in [np.int8, np.int16, np.int32, np.int64]: - # labels = data - # break - # pick out cell_data labels_key, other = _pick_first_int_data(mesh.cell_data) if labels_key and other: @@ -272,8 +273,4 @@ def _write_buffer(f, file_type, mesh): out = out[:, [1, 0, 4, 2, 3]] _write_section(f, file_type, out, itype) - if file_type["type"] == "F": - _write_section(f, file_type, fortran_header, itype) - - register_format("ugrid", [".ugrid"], read, {"ugrid": write}) From 468e91f3f446162a85ecc2a7575a9d796a552b89 Mon Sep 17 00:00:00 2001 From: evidencebp Date: Sun, 29 Dec 2024 18:56:16 +0200 Subject: [PATCH 06/29] src\meshio\vtk\_vtk_42.py too-many-statements Function translate_cells had 52 statements while Pylint recommends having at most 50. Extracted _process_polygon_cells to make the code more structured and solve that. --- src/meshio/vtk/_vtk_42.py | 81 +++++++++++++++++++-------------------- 1 file changed, 39 insertions(+), 42 deletions(-) diff --git a/src/meshio/vtk/_vtk_42.py b/src/meshio/vtk/_vtk_42.py index f3cb4691..bc698334 100644 --- a/src/meshio/vtk/_vtk_42.py +++ b/src/meshio/vtk/_vtk_42.py @@ -504,56 +504,53 @@ def _skip_meta(f): break -def translate_cells(connectivity, types, cell_data_raw): - # https://vtk.org/doc/nightly/html/vtkCellType_8h_source.html - # Translate it into the cells array. - # `data` is a one-dimensional vector with - # (num_points0, p0, p1, ... ,pk, numpoints1, p10, p11, ..., p1k, ... - # or a tuple with (offsets, connectivity) - has_polygon = np.any(types == meshio_to_vtk_type["polygon"]) - +def _process_polygon_cells(connectivity, types, offsets): + """Process VTK polygon cells and return the translated cells.""" cells = [] - cell_data = {} - if has_polygon: - numnodes = np.empty(len(types), dtype=int) - # If some polygons are in the VTK file, loop over the cells - numcells = len(types) - offsets = np.empty(len(types), dtype=int) - offsets[0] = 0 - for idx in range(numcells - 1): - numnodes[idx] = connectivity[offsets[idx]] - offsets[idx + 1] = offsets[idx] + numnodes[idx] + 1 - - idx = numcells - 1 + numcells = len(types) + numnodes = np.empty(len(types), dtype=int) + + # Calculate offsets and verify numnodes + offsets[0] = 0 + for idx in range(numcells - 1): numnodes[idx] = connectivity[offsets[idx]] - if not np.all(numnodes == connectivity[offsets]): - raise ReadError() + offsets[idx + 1] = offsets[idx] + numnodes[idx] + 1 + + idx = numcells - 1 + numnodes[idx] = connectivity[offsets[idx]] + if not np.all(numnodes == connectivity[offsets]): + raise ReadError() - # TODO: cell_data - for idx, vtk_cell_type in enumerate(types): - start = offsets[idx] + 1 + # Process each cell + for idx, vtk_cell_type in enumerate(types): + start = offsets[idx] + 1 - new_order = vtk_to_meshio_order(vtk_cell_type, offsets.dtype) - if new_order is None: - new_order = np.arange(numnodes[idx], dtype=offsets.dtype) + new_order = vtk_to_meshio_order(vtk_cell_type, offsets.dtype) + if new_order is None: + new_order = np.arange(numnodes[idx], dtype=offsets.dtype) - cell_idx = start + new_order + cell_idx = start + new_order + cell = connectivity[cell_idx] + cell_type = vtk_to_meshio_type[vtk_cell_type] - cell = connectivity[cell_idx] + if (len(cells) > 0 and + cells[-1][0] == cell_type and + len(cell) == len(cells[-1][1][-1])): + cells[-1][1].append(cell) + else: + cells.append((cell_type, [cell])) + + return cells - cell_type = vtk_to_meshio_type[vtk_cell_type] +def translate_cells(connectivity, types, cell_data_raw): + # https://vtk.org/doc/nightly/html/vtkCellType_8h_source.html + has_polygon = np.any(types == meshio_to_vtk_type["polygon"]) - if ( - len(cells) > 0 - and cells[-1][0] == cell_type - # the following check if needed for polygons; key can be equal, but - # still needs to go into a new block - and len(cell) == len(cells[-1][1][-1]) - ): - cells[-1][1].append(cell) - else: - # open up a new cell block - cells.append((cell_type, [cell])) + cells = [] + cell_data = {} + if has_polygon: + offsets = np.empty(len(types), dtype=int) + cells = _process_polygon_cells(connectivity, types, offsets) else: # Infer offsets from the cell types. This is much faster than manually going # through the data array. Slight disadvantage: This doesn't work for cells with From e4b1a8dba5b823a5c35e7121288cb94ed9a53de2 Mon Sep 17 00:00:00 2001 From: evidencebp Date: Sun, 29 Dec 2024 19:12:03 +0200 Subject: [PATCH 07/29] src\meshio\flac3d\_flac3d.py too-many-statements Function split_f_z had 78 statements while Pylint recommends having at most 50. Extracted methods to make the code more structured and solve that. --- src/meshio/flac3d/_flac3d.py | 78 +++++++++++++++++------------------- 1 file changed, 37 insertions(+), 41 deletions(-) diff --git a/src/meshio/flac3d/_flac3d.py b/src/meshio/flac3d/_flac3d.py index 45ea0d9c..19f739c3 100644 --- a/src/meshio/flac3d/_flac3d.py +++ b/src/meshio/flac3d/_flac3d.py @@ -352,10 +352,8 @@ def _update_cells(cells, cell, flag): cells.append((cell_type, [cell])) -def split_f_z(mesh): - # FLAC3D makes a difference between ZONES (3D-cells only) and FACES - # (2D-cells only). Split cells into zcells and fcells, along with the cell - # sets etc. +def _split_cells(mesh): + """Split cells into zones (3D) and faces (2D).""" zcells = [] fcells = [] for cell_block in mesh.cells: @@ -363,7 +361,10 @@ def split_f_z(mesh): zcells.append(cell_block) elif cell_block.type in meshio_only["face"]: fcells.append(cell_block) + return zcells, fcells +def _split_cell_sets(mesh): + """Split cell sets into zone and face sets.""" zsets = {} fsets = {} for key, cset in mesh.cell_sets.items(): @@ -376,47 +377,42 @@ def split_f_z(mesh): fsets[key].append( sblock if cell_block.type in meshio_only["face"] else None ) - - # remove the data that is only None - zsets = { - key: value - for key, value in zsets.items() - if not all(item is None for item in value) - } - fsets = { - key: value - for key, value in fsets.items() - if not all(item is None for item in value) - } - - # Right now, the zsets contain indices into the corresponding cell block. - # FLAC3D expects _global_ indices. Update. - cell_block_sizes = [len(cb) for cb in zcells] - for key, data in zsets.items(): - gid = 0 - for n, block in zip(cell_block_sizes, data): - block += gid - gid += n - - # TODO not sure if fcells and zcells share a common global index - cell_block_sizes = [len(cb) for cb in fcells] - for key, data in fsets.items(): + + # Remove empty sets + zsets = {key: value for key, value in zsets.items() + if not all(item is None for item in value)} + fsets = {key: value for key, value in fsets.items() + if not all(item is None for item in value)} + + return zsets, fsets + +def _update_global_indices(sets, cells): + """Update indices to be global rather than per-block.""" + cell_block_sizes = [len(cb) for cb in cells] + for key, data in sets.items(): gid = 0 for n, block in zip(cell_block_sizes, data): - block += gid + if block is not None: + block += gid gid += n + + # Concatenate blocks and adjust to 1-based indexing + for label, values in sets.items(): + values = [v for v in values if v is not None] + sets[label] = np.concatenate(values) + 1 + + return sets - for label, values in zsets.items(): - zsets[label] = np.concatenate(values) - for label, values in fsets.items(): - fsets[label] = np.concatenate(values) - - # flac3d indices start at 1 - for label, values in zsets.items(): - zsets[label] += 1 - for label, values in fsets.items(): - fsets[label] += 1 - +def split_f_z(mesh): + """Split mesh into zones (3D) and faces (2D) with corresponding cell sets.""" + # Split cells and cell sets + zcells, fcells = _split_cells(mesh) + zsets, fsets = _split_cell_sets(mesh) + + # Update indices to be global + zsets = _update_global_indices(zsets, zcells) + fsets = _update_global_indices(fsets, fcells) + return zcells, fcells, zsets, fsets From f01fa4937ea0967a486983d0831af259ed0d35ed Mon Sep 17 00:00:00 2001 From: evidencebp Date: Sun, 29 Dec 2024 19:20:23 +0200 Subject: [PATCH 08/29] src\meshio\su2\_su2.py too-many-statements Function read_buffer had 90 statements while Pylint recommends having at most 50. Extracted methods that handle the various names to make the code more structured and solve that. --- src/meshio/su2/_su2.py | 225 ++++++++++++++++++----------------------- 1 file changed, 98 insertions(+), 127 deletions(-) diff --git a/src/meshio/su2/_su2.py b/src/meshio/su2/_su2.py index b925171e..103bec2f 100644 --- a/src/meshio/su2/_su2.py +++ b/src/meshio/su2/_su2.py @@ -50,160 +50,131 @@ def read(filename): return mesh +def _parse_ndime(rest_of_line): + dim = int(rest_of_line) + if dim != 2 and dim != 3: + raise ReadError(f"Invalid dimension value {dim}") + return dim + +def _parse_points(f, rest_of_line, dim, ftype): + first_line = f.readline().split() + first_line = np.array(first_line, dtype=ftype) + extra_columns = first_line.shape[0] - dim + + num_verts = int(rest_of_line.split()[0]) - 1 + points = np.fromfile( + f, count=num_verts * (dim + extra_columns), dtype=ftype, sep=" " + ).reshape(num_verts, dim + extra_columns) + + if extra_columns > 0: + first_line = first_line[:-extra_columns] + points = points[:, :-extra_columns] + + return np.vstack([first_line, points]) + +def _parse_elements(f, rest_of_line, itype, next_tag_id=0, is_boundary=False): + num_elems = int(rest_of_line) + first_line_str = next(islice(f, 1)) + first_line = first_line_str.split() + nnodes = su2_type_to_numnodes[int(first_line[0])] + + has_extra_column = len(first_line) == nnodes + 2 + if len(first_line) not in [nnodes + 1, nnodes + 2]: + raise ReadError("Invalid number of columns for elements field") + + gen = chain([first_line_str], islice(f, num_elems - 1)) + cell_array = " ".join([line.rstrip("\n") for line in gen]) + cell_array = np.fromiter(cell_array.split(), dtype=itype) + + cells, _ = _translate_cells(cell_array, has_extra_column) + cell_blocks = [] + cell_tags = [] + + for eltype, data in cells.items(): + cell_blocks.append(CellBlock(eltype, data)) + num_block_elems = len(data) + tag_value = next_tag_id if is_boundary else 0 + cell_tags.append(np.full(num_block_elems, tag_value, dtype=np.int32)) + + return cell_blocks, cell_tags + +def _merge_boundary_cells(cells, cell_data, dim): + types = ["line"] if dim == 2 else ["triangle", "quad"] + indices_to_merge = {t: [] for t in types} + + for index, cell_block in enumerate(cells): + if cell_block.type in types: + indices_to_merge[cell_block.type].append(index) + + cdata = cell_data["su2:tag"] + for type, indices in indices_to_merge.items(): + if len(indices) > 1: + cells[indices[0]] = CellBlock( + type, np.concatenate([cells[i].data for i in indices]) + ) + cdata[indices[0]] = np.concatenate([cdata[i] for i in indices]) + + idelete = [i for indices in indices_to_merge.values() for i in indices[1:]] + for i in sorted(idelete, reverse=True): + del cells[i] + del cdata[i] + + return cells, cdata + def read_buffer(f): cells = [] cell_data = {"su2:tag": []} - - itype = "i8" - ftype = "f8" + itype, ftype = "i8", "f8" dim = 0 - next_tag_id = 0 - expected_nmarkers = 0 - markers_found = 0 + expected_nmarkers = markers_found = 0 + while True: line = f.readline() if not line: - # EOF break - + line = line.strip() - if len(line) == 0: - continue - if line[0] == "%": + if len(line) == 0 or line[0] == "%": continue - + try: name, rest_of_line = line.split("=") except ValueError: warn(f"meshio could not parse line\n {line}\n skipping.....") continue - + if name == "NDIME": - dim = int(rest_of_line) - if dim != 2 and dim != 3: - raise ReadError(f"Invalid dimension value {line}") - + dim = _parse_ndime(rest_of_line) elif name == "NPOIN": - # according to documentation rest_of_line should just be a int, - # and the next block should be just the coordinates of the points - # However, some file have one or two extra indices not related to the - # actual coordinates. - # So lets read the next line to find its actual number of columns - # - first_line = f.readline() - first_line = first_line.split() - first_line = np.array(first_line, dtype=ftype) - - extra_columns = first_line.shape[0] - dim - - num_verts = int(rest_of_line.split()[0]) - 1 - points = np.fromfile( - f, count=num_verts * (dim + extra_columns), dtype=ftype, sep=" " - ).reshape(num_verts, dim + extra_columns) - - # save off any extra info - if extra_columns > 0: - first_line = first_line[:-extra_columns] - points = points[:, :-extra_columns] - - # add the first line we read separately - points = np.vstack([first_line, points]) - - elif name == "NELEM" or name == "MARKER_ELEMS": - # we cannot? read at once using numpy because we do not know the - # total size. Read, instead next num_elems as is and re-use the - # translate_cells function from vtk reader - - num_elems = int(rest_of_line) - gen = islice(f, num_elems) - - # some files has an extra int column while other not - # We do not need it so make sure we will skip it - first_line_str = next(gen) - first_line = first_line_str.split() - nnodes = su2_type_to_numnodes[int(first_line[0])] - has_extra_column = False - if nnodes + 1 == len(first_line): - has_extra_column = False - elif nnodes + 2 == len(first_line): - has_extra_column = True - else: - raise ReadError(f"Invalid number of columns for {name} field") - - # reset generator - gen = chain([first_line_str], gen) - - cell_array = " ".join([line.rstrip("\n") for line in gen]) - cell_array = np.fromiter(cell_array.split(), dtype=itype) - - cells_, _ = _translate_cells(cell_array, has_extra_column) - - for eltype, data in cells_.items(): - cells.append(CellBlock(eltype, data)) - num_block_elems = len(data) - if name == "NELEM": - cell_data["su2:tag"].append( - np.full(num_block_elems, 0, dtype=np.int32) - ) - else: - tags = np.full(num_block_elems, next_tag_id, dtype=np.int32) - cell_data["su2:tag"].append(tags) - + points = _parse_points(f, rest_of_line, dim, ftype) + elif name in ["NELEM", "MARKER_ELEMS"]: + new_cells, new_tags = _parse_elements(f, rest_of_line, itype, next_tag_id, name == "MARKER_ELEMS") + cells.extend(new_cells) + cell_data["su2:tag"].extend(new_tags) elif name == "NMARK": expected_nmarkers = int(rest_of_line) elif name == "MARKER_TAG": - next_tag = rest_of_line - try: - next_tag_id = int(next_tag) - except ValueError: - next_tag_id += 1 - warn( - "meshio does not support tags of string type.\n" - f" Surface tag {rest_of_line} will be replaced by {next_tag_id}" - ) + next_tag_id = _handle_marker_tag(rest_of_line, next_tag_id) markers_found += 1 if markers_found != expected_nmarkers: - warn( - f"expected {expected_nmarkers} markers according to NMARK value " - f"but found only {markers_found}" - ) - - # merge boundary elements in a single cellblock per cell type - if dim == 2: - types = ["line"] - else: - types = ["triangle", "quad"] - - indices_to_merge = {} - for t in types: - indices_to_merge[t] = [] + warn(f"expected {expected_nmarkers} markers according to NMARK value but found only {markers_found}") - for index, cell_block in enumerate(cells): - if cell_block.type in types: - indices_to_merge[cell_block.type].append(index) - - cdata = cell_data["su2:tag"] - for type, indices in indices_to_merge.items(): - if len(indices) > 1: - cells[indices[0]] = CellBlock( - type, np.concatenate([cells[i].data for i in indices]) - ) - cdata[indices[0]] = np.concatenate([cdata[i] for i in indices]) - - # delete merged blocks - idelete = [] - for type, indices in indices_to_merge.items(): - idelete += indices[1:] - - for i in sorted(idelete, reverse=True): - del cells[i] - del cdata[i] - - cell_data["su2:tag"] = cdata + cells, cell_data["su2:tag"] = _merge_boundary_cells(cells, cell_data, dim) return Mesh(points, cells, cell_data=cell_data) +def _handle_marker_tag(rest_of_line, next_tag_id): + try: + return int(rest_of_line) + except ValueError: + next_tag_id += 1 + warn( + "meshio does not support tags of string type.\n" + f" Surface tag {rest_of_line} will be replaced by {next_tag_id}" + ) + return next_tag_id + def _translate_cells(data, has_extra_column=False): # adapted from _vtk.py From 73ae38ddfedf63b4e8f9880873dd46bb2eb4b8ca Mon Sep 17 00:00:00 2001 From: evidencebp Date: Sun, 29 Dec 2024 20:39:23 +0200 Subject: [PATCH 09/29] src\meshio\med\_med.py too-many-statements Function write had 93 statements while Pylint recommends having at most 50. Function read had 54 statements. I extracted methods to make the code more structured and solve that. --- src/meshio/med/_med.py | 97 +++++++++++++++++++++++++----------------- 1 file changed, 58 insertions(+), 39 deletions(-) diff --git a/src/meshio/med/_med.py b/src/meshio/med/_med.py index 0cdd35ce..a2160234 100644 --- a/src/meshio/med/_med.py +++ b/src/meshio/med/_med.py @@ -32,12 +32,33 @@ numpy_void_str = np.bytes_("") +def _read_cells(mesh, cell_data): + """Read cell blocks from MED file and return cells, cell_types""" + cells = [] + cell_types = [] + med_cells = mesh["MAI"] + for med_cell_type, med_cell_type_group in med_cells.items(): + cell_type = med_to_meshio_type[med_cell_type] + cell_types.append(cell_type) + nod = med_cell_type_group["NOD"] + n_cells = nod.attrs["NBR"] + cells += [(cell_type, nod[()].reshape(n_cells, -1, order="F") - 1)] + + # Cell tags + if "FAM" in med_cell_type_group: + tags = med_cell_type_group["FAM"][()] + if "cell_tags" not in cell_data: + cell_data["cell_tags"] = [] + cell_data["cell_tags"].append(tags) + + return cells, cell_types + def read(filename): import h5py f = h5py.File(filename, "r") - # Mesh ensemble + # Mesh ensemble mesh_ensemble = f["ENS_MAA"] meshes = mesh_ensemble.keys() if len(meshes) != 1: @@ -79,23 +100,8 @@ def read(filename): if "NOEUD" in fas: point_tags = _read_families(fas["NOEUD"]) - # CellBlock - cells = [] - cell_types = [] - med_cells = mesh["MAI"] - for med_cell_type, med_cell_type_group in med_cells.items(): - cell_type = med_to_meshio_type[med_cell_type] - cell_types.append(cell_type) - nod = med_cell_type_group["NOD"] - n_cells = nod.attrs["NBR"] - cells += [(cell_type, nod[()].reshape(n_cells, -1, order="F") - 1)] - - # Cell tags - if "FAM" in med_cell_type_group: - tags = med_cell_type_group["FAM"][()] - if "cell_tags" not in cell_data: - cell_data["cell_tags"] = [] - cell_data["cell_tags"].append(tags) + # Read cells + cells, cell_types = _read_cells(mesh, cell_data) # Information for cell tags cell_tags = {} @@ -210,17 +216,8 @@ def _read_families(fas_data): return families -def write(filename, mesh): - import h5py - - # MED doesn't support compression, - # - # compression = None - - f = h5py.File(filename, "w") - - # Strangely the version must be 3.0.x - # Any version >= 3.1.0 will NOT work with SALOME 8.3 +def _write_mesh_info(f, mesh): + """Create and write general mesh information""" info = f.create_group("INFOS_GENERALES") info.attrs.create("MAJ", 3) info.attrs.create("MIN", 0) @@ -241,16 +238,11 @@ def write(filename, mesh): med_mesh.attrs.create("NOM", np.bytes_("".join(f"{name:<16}" for name in names))) med_mesh.attrs.create("DES", np.bytes_("Mesh created with meshio")) med_mesh.attrs.create("TYP", 0) # mesh type (MED_NON_STRUCTURE) + + return mesh_ensemble, mesh_name, med_mesh - # Time-step - step = "-0000000000000000001-0000000000000000001" # NDT NOR - time_step = med_mesh.create_group(step) - time_step.attrs.create("CGT", 1) - time_step.attrs.create("NDT", -1) # no time step (-1) - time_step.attrs.create("NOR", -1) # no iteration step (-1) - time_step.attrs.create("PDT", -1.0) # current time - - # Points +def _write_points(time_step, mesh): + """Write mesh points""" nodes_group = time_step.create_group("NOE") nodes_group.attrs.create("CGT", 1) nodes_group.attrs.create("CGS", 1) @@ -265,6 +257,28 @@ def write(filename, mesh): family = nodes_group.create_dataset("FAM", data=mesh.point_data["point_tags"]) family.attrs.create("CGT", 1) family.attrs.create("NBR", len(mesh.points)) + + return profile + +def write(filename, mesh): + """Write mesh to MED format""" + import h5py + + f = h5py.File(filename, "w") + + # Write mesh info + mesh_ensemble, mesh_name, med_mesh = _write_mesh_info(f, mesh) + + # Time-step + step = "-0000000000000000001-0000000000000000001" + time_step = med_mesh.create_group(step) + time_step.attrs.create("CGT", 1) + time_step.attrs.create("NDT", -1) + time_step.attrs.create("NOR", -1) + time_step.attrs.create("PDT", -1.0) + + # Write points + profile = _write_points(time_step, mesh) # Cells (mailles in French) if len(mesh.cells) != len(np.unique([c.type for c in mesh.cells])): @@ -319,6 +333,11 @@ def write(filename, mesh): name_idx = 0 field_names = mesh.field_data["med:nom"] if "med:nom" in mesh.field_data else [] + _write_nodal_data(mesh, mesh_name, fields, field_names, profile) + + _write_cells_data(mesh, field_names, fields, mesh_name, profile) + +def _write_nodal_data(mesh, mesh_name, fields, field_names, profile): # Nodal data for name, data in mesh.point_data.items(): if name == "point_tags": # ignore point_tags already written under FAS @@ -328,6 +347,7 @@ def write(filename, mesh): name_idx += 1 _write_data(fields, mesh_name, field_name, profile, name, supp, data) +def _write_cells_data(mesh, field_names, fields, mesh_name, profile): # Cell data # Only support writing ELEM fields with only 1 Gauss point per cell # Or ELNO (DG) fields defined at every node per cell @@ -358,7 +378,6 @@ def write(filename, mesh): ) name_idx += 1 - def _write_data( fields, mesh_name, From 5ef5be4436df10ef1037fe95ca809506ea3d9f23 Mon Sep 17 00:00:00 2001 From: evidencebp Date: Sun, 29 Dec 2024 20:58:00 +0200 Subject: [PATCH 10/29] src\meshio\ply\_ply.py too-many-statements Function read_buffer had 56 statements while Pylint recommends having at most 50. Function write had 64 statements. I extracted methods to make the code more structured and solve that. --- src/meshio/ply/_ply.py | 282 +++++++++++++++++++---------------------- 1 file changed, 130 insertions(+), 152 deletions(-) diff --git a/src/meshio/ply/_ply.py b/src/meshio/ply/_ply.py index 4393cbc3..74d4c21e 100644 --- a/src/meshio/ply/_ply.py +++ b/src/meshio/ply/_ply.py @@ -71,34 +71,9 @@ def _next_line(f): break return line - -def read_buffer(f): - # assert that the first line reads `ply` - line = f.readline().decode().strip() - if line != "ply": - raise ReadError("Expected ply") - - line = _next_line(f) - endianness = None - if line == "format ascii 1.0": - is_binary = False - elif line == "format binary_big_endian 1.0": - is_binary = True - endianness = ">" - else: - if line != "format binary_little_endian 1.0": - raise ReadError() - is_binary = True - endianness = "<" - +def _read_header(f, point_data_formats, point_data_names, cell_data_dtypes, cell_data_names): # read header line = _next_line(f) - num_verts = 0 - num_cells = 0 - point_data_formats = [] - point_data_names = [] - cell_data_names = [] - cell_data_dtypes = [] while line != "end_header": m_vert = re.match("element vertex (\\d+)", line) m_face = re.match("element face (\\d+)", line) @@ -141,6 +116,36 @@ def read_buffer(f): f"got `{line}`" ) + +def read_buffer(f): + # assert that the first line reads `ply` + line = f.readline().decode().strip() + if line != "ply": + raise ReadError("Expected ply") + + line = _next_line(f) + endianness = None + if line == "format ascii 1.0": + is_binary = False + elif line == "format binary_big_endian 1.0": + is_binary = True + endianness = ">" + else: + if line != "format binary_little_endian 1.0": + raise ReadError() + is_binary = True + endianness = "<" + + # read header + line = _next_line(f) + num_verts = 0 + num_cells = 0 + point_data_formats = [] + point_data_names = [] + cell_data_names = [] + cell_data_dtypes = [] + _read_header(f, point_data_formats, point_data_names, cell_data_dtypes, cell_data_names) + if is_binary: mesh = _read_binary( f, @@ -391,141 +396,114 @@ def parse_ragged(start, num_cells): return byte_starts_ends[-1], blocks -def write(filename, mesh: Mesh, binary: bool = True): # noqa: C901 - - with open_file(filename, "wb") as fh: - fh.write(b"ply\n") - - if binary: - fh.write(f"format binary_{sys.byteorder}_endian 1.0\n".encode()) - else: - fh.write(b"format ascii 1.0\n") - - now = datetime.datetime.now().isoformat() - fh.write(f"comment Created by meshio v{__version__}, {now}\n".encode()) - - # counts - fh.write(f"element vertex {mesh.points.shape[0]:d}\n".encode()) - # - dim_names = ["x", "y", "z"] - # From : - # - # > The type can be specified with one of char uchar short ushort int uint float - # > double, or one of int8 uint8 int16 uint16 int32 uint32 float32 float64. - # - # We're adding [u]int64 here. - type_name_table = { - np.dtype(np.int8): "int8", - np.dtype(np.int16): "int16", - np.dtype(np.int32): "int32", - np.dtype(np.int64): "int64", - np.dtype(np.uint8): "uint8", - np.dtype(np.uint16): "uint16", - np.dtype(np.uint32): "uint32", - np.dtype(np.uint64): "uint64", - np.dtype(np.float32): "float", - np.dtype(np.float64): "double", - } - for k in range(mesh.points.shape[1]): - type_name = type_name_table[mesh.points.dtype] - fh.write(f"property {type_name} {dim_names[k]}\n".encode()) - - pd = [] - for key, value in mesh.point_data.items(): - if len(value.shape) > 1: - warn( - "PLY writer doesn't support multidimensional point data yet. " - f"Skipping {key}." - ) +def _write_header(fh, mesh, binary, num_cells): + """Write PLY header information.""" + fh.write(b"ply\n") + if binary: + fh.write(f"format binary_{sys.byteorder}_endian 1.0\n".encode()) + else: + fh.write(b"format ascii 1.0\n") + + now = datetime.datetime.now().isoformat() + fh.write(f"comment Created by meshio v{__version__}, {now}\n".encode()) + + # Write vertex info + fh.write(f"element vertex {mesh.points.shape[0]:d}\n".encode()) + + # Write face info if needed + if num_cells > 0: + fh.write(f"element face {num_cells:d}\n".encode()) + +def _write_point_properties(fh, mesh): + """Write point property definitions.""" + dim_names = ["x", "y", "z"] + type_name_table = { + np.dtype(np.int8): "int8", + np.dtype(np.int16): "int16", + np.dtype(np.int32): "int32", + np.dtype(np.int64): "int64", + np.dtype(np.uint8): "uint8", + np.dtype(np.uint16): "uint16", + np.dtype(np.uint32): "uint32", + np.dtype(np.uint64): "uint64", + np.dtype(np.float32): "float", + np.dtype(np.float64): "double", + } + + # Write coordinate properties + for k in range(mesh.points.shape[1]): + type_name = type_name_table[mesh.points.dtype] + fh.write(f"property {type_name} {dim_names[k]}\n".encode()) + + # Write additional point data properties + pd = [] + for key, value in mesh.point_data.items(): + if len(value.shape) > 1: + warn(f"PLY writer doesn't support multidimensional point data yet. Skipping {key}.") + continue + type_name = type_name_table[value.dtype] + fh.write(f"property {type_name} {key}\n".encode()) + pd.append(value) + return pd + +def _write_data(fh, mesh, binary, pd): + """Write the actual data.""" + if binary: + # Points and point data + out = np.rec.fromarrays([coord for coord in mesh.points.T] + pd) + fh.write(out.tobytes()) + + # Cells + legal_cell_types = ["vertex", "line", "triangle", "quad", "polygon"] + for cell_block in mesh.cells: + if cell_block.type not in legal_cell_types: + warn(f'cell_type "{cell_block.type}" is not supported by PLY format - skipping') continue - type_name = type_name_table[value.dtype] - fh.write(f"property {type_name} {key}\n".encode()) - pd.append(value) + d = cell_block.data + out = np.rec.fromarrays([np.broadcast_to(np.uint8(d.shape[1]), d.shape[0]), *d.T]) + fh.write(out.tobytes()) + else: + # ASCII format + out = np.rec.fromarrays([coord for coord in mesh.points.T] + pd) + fmt = " ".join(["{}"] * len(out[0])) + out = "\n".join([fmt.format(*row) for row in out]) + "\n" + fh.write(out.encode()) - num_cells = 0 + # Write cells legal_cell_types = ["vertex", "line", "triangle", "quad", "polygon"] for cell_block in mesh.cells: - if cell_block.type in legal_cell_types: - num_cells += cell_block.data.shape[0] + if cell_block.type not in legal_cell_types: + warn(f'cell_type "{cell_block.type}" is not supported by PLY format - skipping') + continue + d = cell_block.data + out = np.column_stack([np.full(d.shape[0], d.shape[1], dtype=d.dtype), d]) + fmt = " ".join(["{}"] * out.shape[1]) + out = "\n".join([fmt.format(*row) for row in out]) + "\n" + fh.write(out.encode()) - if num_cells > 0: - fh.write(f"element face {num_cells:d}\n".encode()) - - # possibly cast down to int32 - # TODO don't alter the mesh data - has_cast = False - for k, cell_block in enumerate(mesh.cells): - if cell_block.data.dtype == np.int64: - has_cast = True - mesh.cells[k] = CellBlock( - cell_block.type, cell_block.data.astype(np.int32) - ) - - if has_cast: - warn("PLY doesn't support 64-bit integers. Casting down to 32-bit.") - - # assert that all cell dtypes are equal - cell_dtype = None - for cell_block in mesh.cells: - if cell_dtype is None: - cell_dtype = cell_block.data.dtype - if cell_block.data.dtype != cell_dtype: - raise WriteError() +def write(filename, mesh: Mesh, binary: bool = True): + legal_cell_types = ["vertex", "line", "triangle", "quad", "polygon"] + num_cells = sum(cell_block.data.shape[0] for cell_block in mesh.cells + if cell_block.type in legal_cell_types) + # Handle int64 casting + for k, cell_block in enumerate(mesh.cells): + if cell_block.data.dtype == np.int64: + warn("PLY doesn't support 64-bit integers. Casting down to 32-bit.") + mesh.cells[k] = CellBlock(cell_block.type, cell_block.data.astype(np.int32)) + + with open_file(filename, "wb") as fh: + _write_header(fh, mesh, binary, num_cells) + pd = _write_point_properties(fh, mesh) + + # Write vertex indices property if needed + if num_cells > 0: + cell_dtype = next((cb.data.dtype for cb in mesh.cells), None) if cell_dtype is not None: ply_type = numpy_to_ply_dtype[cell_dtype] fh.write(f"property list uint8 {ply_type} vertex_indices\n".encode()) - - # TODO other cell data + fh.write(b"end_header\n") - - if binary: - # points and point_data - out = np.rec.fromarrays([coord for coord in mesh.points.T] + pd) - fh.write(out.tobytes()) - - # cells - for cell_block in mesh.cells: - if cell_block.type not in legal_cell_types: - warn( - f'cell_type "{cell_block.type}" is not supported by PLY format ' - "- skipping" - ) - continue - # prepend with count - d = cell_block.data - out = np.rec.fromarrays( - [np.broadcast_to(np.uint8(d.shape[1]), d.shape[0]), *d.T] - ) - fh.write(out.tobytes()) - else: - # vertices - # np.savetxt(fh, mesh.points, "%r") # slower - # out = np.column_stack([mesh.points] + list(mesh.point_data.values())) - out = np.rec.fromarrays([coord for coord in mesh.points.T] + pd) - fmt = " ".join(["{}"] * len(out[0])) - out = "\n".join([fmt.format(*row) for row in out]) + "\n" - fh.write(out.encode()) - - # cells - for cell_block in mesh.cells: - if cell_block.type not in legal_cell_types: - warn( - f'cell_type "{cell_block.type}" is not supported by PLY format ' - + "- skipping" - ) - continue - # if cell_type not in cell_type_to_count.keys(): - # continue - d = cell_block.data - out = np.column_stack( - [np.full(d.shape[0], d.shape[1], dtype=d.dtype), d] - ) - # savetxt is slower - # np.savetxt(fh, out, "%d %d %d %d") - fmt = " ".join(["{}"] * out.shape[1]) - out = "\n".join([fmt.format(*row) for row in out]) + "\n" - fh.write(out.encode()) - + _write_data(fh, mesh, binary, pd) register_format("ply", [".ply"], read, {"ply": write}) From 2a5b71786c59d1ed73a676f4aeef5aa33e76014f Mon Sep 17 00:00:00 2001 From: evidencebp Date: Sun, 29 Dec 2024 21:09:04 +0200 Subject: [PATCH 11/29] src\meshio\vtk\_vtk_51.py too-many-statements Function _read_section had 53 statements while Pylint recommends having at most 50. I extracted _read_cell_offsets_and_connectivity to make the code more structured and solve that. --- src/meshio/vtk/_vtk_51.py | 46 +++++++++++++++++++-------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/src/meshio/vtk/_vtk_51.py b/src/meshio/vtk/_vtk_51.py index 2eb1db6b..ef65855c 100644 --- a/src/meshio/vtk/_vtk_51.py +++ b/src/meshio/vtk/_vtk_51.py @@ -120,6 +120,28 @@ def read_buffer(f): ) +def _read_cell_offsets_and_connectivity(f, info): + """Read CELLS section data including offsets and connectivity.""" + try: + line = f.readline().decode() + except UnicodeDecodeError: + line = "" + + assert line.startswith("OFFSETS") + info.num_offsets = int(info.split[1]) + info.num_items = int(info.split[2]) + dtype = np.dtype(vtk_to_numpy_dtype_name[line.split()[1]]) + offsets = _read_int_data(f, info.is_ascii, info.num_offsets, dtype) + + line = f.readline().decode() + assert line.startswith("CONNECTIVITY") + dtype = np.dtype(vtk_to_numpy_dtype_name[line.split()[1]]) + connectivity = _read_int_data(f, info.is_ascii, info.num_items, dtype) + + assert offsets[0] == 0 + assert offsets[-1] == len(connectivity) + return connectivity, offsets[1:] + def _read_section(f, info): if info.section == "METADATA": _skip_meta(f) @@ -141,29 +163,7 @@ def _read_section(f, info): elif info.section == "CELLS": info.active = "CELLS" - try: - line = f.readline().decode() - except UnicodeDecodeError: - line = "" - - assert line.startswith("OFFSETS") - # vtk DataFile Version 5.1 - appearing in Paraview 5.8.1 outputs - # No specification found for this file format. - # See the question on ParaView Discourse Forum: - # https://discourse.paraview.org/t/specification-of-vtk-datafile-version-5-1/5127 - info.num_offsets = int(info.split[1]) - info.num_items = int(info.split[2]) - dtype = np.dtype(vtk_to_numpy_dtype_name[line.split()[1]]) - offsets = _read_int_data(f, info.is_ascii, info.num_offsets, dtype) - - line = f.readline().decode() - assert line.startswith("CONNECTIVITY") - dtype = np.dtype(vtk_to_numpy_dtype_name[line.split()[1]]) - connectivity = _read_int_data(f, info.is_ascii, info.num_items, dtype) - info.connectivity = connectivity - assert offsets[0] == 0 - assert offsets[-1] == len(connectivity) - info.offsets = offsets[1:] + info.connectivity, info.offsets = _read_cell_offsets_and_connectivity(f, info) elif info.section == "CELL_TYPES": info.active = "CELL_TYPES" From 8eedb0ac2bd75425c6766f89bc068e3afec950a5 Mon Sep 17 00:00:00 2001 From: evidencebp Date: Sun, 29 Dec 2024 21:19:53 +0200 Subject: [PATCH 12/29] src\meshio\permas\_permas.py try-except-raise Function read_set excepted ValueError in line 218 and did raise immediately. This is equivalent to not catching yet more complex. I removed the exception handling. --- src/meshio/permas/_permas.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/meshio/permas/_permas.py b/src/meshio/permas/_permas.py index dda02a3a..c2acaa61 100644 --- a/src/meshio/permas/_permas.py +++ b/src/meshio/permas/_permas.py @@ -213,10 +213,7 @@ def read_set(f, params_map): raise ReadError(set_ids) set_ids = np.arange(set_ids[0], set_ids[1], set_ids[2]) else: - try: - set_ids = np.unique(np.array(set_ids, dtype="int32")) - except ValueError: - raise + set_ids = np.unique(np.array(set_ids, dtype="int32")) return set_ids From 78088e2b7dba89ee92b9bf05d8be0412577f06d2 Mon Sep 17 00:00:00 2001 From: evidencebp Date: Sun, 29 Dec 2024 21:22:35 +0200 Subject: [PATCH 13/29] Update __about__.py Catching Exception might hide unexpected exceptions (e.g., due to new code that will be added). Function parse_articles catches Exception (line 12) The try section is __version__ = metadata.version("meshio") Exception was changed to metadata.PackageNotFoundError For details see https://docs.python.org/3/library/importlib.metadata.html --- src/meshio/__about__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/meshio/__about__.py b/src/meshio/__about__.py index 72fd5b4c..5fa6b618 100644 --- a/src/meshio/__about__.py +++ b/src/meshio/__about__.py @@ -9,5 +9,5 @@ try: __version__ = metadata.version("meshio") -except Exception: +except metadata.PackageNotFoundError: __version__ = "unknown" From 03c49b1b8a77a5fa2060c47858fd8898d43cdea5 Mon Sep 17 00:00:00 2001 From: evidencebp Date: Sun, 29 Dec 2024 21:46:59 +0200 Subject: [PATCH 14/29] src\meshio\gmsh\_gmsh41.py too-many-nested-blocks Function _write_entities had 6 nested-blocks while Pylint recommends having at most 5. I extracted _write_bounding_entities to make the code more structured and solve that. --- src/meshio/gmsh/_gmsh41.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/src/meshio/gmsh/_gmsh41.py b/src/meshio/gmsh/_gmsh41.py index 3e305a17..6507dc08 100644 --- a/src/meshio/gmsh/_gmsh41.py +++ b/src/meshio/gmsh/_gmsh41.py @@ -483,6 +483,15 @@ def _write_entities(fh, cells, tag_data, cell_sets, point_data, binary): else: fh.write(b"0 ") + _write_bounding_entities(fh, dim, has_bounding_elements, matching_cell_block, cell_sets, binary) + + if binary: + fh.write(b"\n") + + # raise NotImplementedError + fh.write(b"$EndEntities\n") + +def _write_bounding_entities(fh, dim, has_bounding_elements, matching_cell_block, cell_sets, binary): if dim > 0: # Entities not of the lowest dimension can have their # bounding elements (of dimension one less) specified @@ -517,11 +526,6 @@ def _write_entities(fh, cells, tag_data, cell_sets, point_data, binary): if not binary: fh.write(b"\n") - if binary: - fh.write(b"\n") - # raise NotImplementedError - fh.write(b"$EndEntities\n") - def _write_nodes(fh, points, cells, point_data, float_fmt, binary): """Write node information. From b13d71f5884eaf1c419eba1c5821d6f1ca26dd14 Mon Sep 17 00:00:00 2001 From: evidencebp Date: Mon, 30 Dec 2024 13:59:13 +0200 Subject: [PATCH 15/29] src\meshio\gmsh\common.py line-too-long Made long lines shorter --- src/meshio/gmsh/common.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/meshio/gmsh/common.py b/src/meshio/gmsh/common.py index 717e82ee..e33a7e82 100644 --- a/src/meshio/gmsh/common.py +++ b/src/meshio/gmsh/common.py @@ -169,7 +169,8 @@ def _gmsh_to_meshio_order(cell_type: str, idx: ArrayLike) -> np.ndarray: "hexahedron20": [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 11, 13, 9, 16, 18, 19, 17, 10, 12, 14, 15, - ], # https://vtk.org/doc/release/4.2/html/classvtkQuadraticHexahedron.html and https://gmsh.info/doc/texinfo/gmsh.html#Node-ordering + ], # https://vtk.org/doc/release/4.2/html/classvtkQuadraticHexahedron.html + # and https://gmsh.info/doc/texinfo/gmsh.html#Node-ordering "hexahedron27": [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 11, 13, 9, 16, 18, 19, 17, 10, 12, 14, 15, @@ -177,7 +178,8 @@ def _gmsh_to_meshio_order(cell_type: str, idx: ArrayLike) -> np.ndarray: ], "wedge15": [ 0, 1, 2, 3, 4, 5, 6, 9, 7, 12, 14, 13, 8, 10, 11 - ], # http://davis.lbl.gov/Manuals/VTK-4.5/classvtkQuadraticWedge.html and https://gmsh.info/doc/texinfo/gmsh.html#Node-ordering + ], # http://davis.lbl.gov/Manuals/VTK-4.5/classvtkQuadraticWedge.html + # and https://gmsh.info/doc/texinfo/gmsh.html#Node-ordering "pyramid13": [0, 1, 2, 3, 4, 5, 8, 10, 6, 7, 9, 11, 12], # fmt: on } From fec373d6042d46a49e765ea99501b11c133ce8b6 Mon Sep 17 00:00:00 2001 From: evidencebp Date: Mon, 30 Dec 2024 14:02:09 +0200 Subject: [PATCH 16/29] src\meshio\abaqus\_abaqus.py line-too-long Made long line shorter --- src/meshio/abaqus/_abaqus.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/meshio/abaqus/_abaqus.py b/src/meshio/abaqus/_abaqus.py index 0dd88774..e4c2b792 100644 --- a/src/meshio/abaqus/_abaqus.py +++ b/src/meshio/abaqus/_abaqus.py @@ -179,7 +179,8 @@ def read_buffer(f): else: raise ReadError(f"Unknown cell set '{set_name}'") elif keyword == "INCLUDE": - # Splitting line to get external input file path (example: *INCLUDE,INPUT=wInclude_bulk.inp) + # Splitting line to get external input file path + # (example: *INCLUDE,INPUT=wInclude_bulk.inp) ext_input_file = pathlib.Path(line.split("=")[-1].strip()) if ext_input_file.exists() is False: cd = pathlib.Path(f.name).parent From 8faa8171a4af2e7b2cb18f617ab9ee4de8c6a02c Mon Sep 17 00:00:00 2001 From: evidencebp Date: Mon, 30 Dec 2024 14:06:16 +0200 Subject: [PATCH 17/29] src\meshio\medit\_medit_internal.py line-too-long Made long line more readable --- src/meshio/medit/_medit.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/meshio/medit/_medit.py b/src/meshio/medit/_medit.py index 272bd16e..e29e90f1 100644 --- a/src/meshio/medit/_medit.py +++ b/src/meshio/medit/_medit.py @@ -112,7 +112,9 @@ def read_binary_buffer(f): field = np.fromfile(f, count=1, dtype=keytype).item() if field != 3: # = GmfDimension - raise ReadError("Invalid dimension code : " + str(field) + " it should be 3") + raise ReadError("Invalid dimension code : " + + str(field) + + " it should be 3") np.fromfile(f, count=1, dtype=postype) From b839f88eed6d9912498ad6f471449b32f68068b0 Mon Sep 17 00:00:00 2001 From: evidencebp Date: Mon, 30 Dec 2024 14:10:01 +0200 Subject: [PATCH 18/29] src\meshio\_cli\_convert.py line-too-long Made long lines shorter --- src/meshio/_cli/_convert.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/meshio/_cli/_convert.py b/src/meshio/_cli/_convert.py index 1a2edd30..0cc7d47d 100644 --- a/src/meshio/_cli/_convert.py +++ b/src/meshio/_cli/_convert.py @@ -38,13 +38,15 @@ def add_args(parser): "--sets-to-int-data", "-s", action="store_true", - help="if possible, convert sets to integer data (useful if the output type does not support sets)", + help=("if possible, convert sets to integer data " + + "(useful if the output type does not support sets)"), ) parser.add_argument( "--int-data-to-sets", "-d", action="store_true", - help="if possible, convert integer data to sets (useful if the output type does not support integer data)", + help=("if possible, convert integer data to sets " + + "(useful if the output type does not support integer data)"), ) From 2955079af877a4dd2a892c027bc3a376a07d17a5 Mon Sep 17 00:00:00 2001 From: evidencebp Date: Mon, 30 Dec 2024 14:17:41 +0200 Subject: [PATCH 19/29] src\meshio\_helpers.py line-too-long Made long line shorter --- src/meshio/_helpers.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/meshio/_helpers.py b/src/meshio/_helpers.py index 6cc7dac9..87dc23b0 100644 --- a/src/meshio/_helpers.py +++ b/src/meshio/_helpers.py @@ -153,7 +153,8 @@ def write(filename, mesh: Mesh, file_format: str | None = None, **kwargs): raise WriteError("File format must be supplied if `filename` is a buffer") if file_format == "tetgen": raise WriteError( - "tetgen format is spread across multiple files, and so cannot be written to a buffer" + "tetgen format is spread across multiple files," + + " and so cannot be written to a buffer" ) else: path = Path(filename) From 07811fa970a1bac72c9794c488e7404f4752c098 Mon Sep 17 00:00:00 2001 From: evidencebp Date: Mon, 30 Dec 2024 14:19:23 +0200 Subject: [PATCH 20/29] Revert "src\meshio\medit\_medit_internal.py line-too-long" This reverts commit 8faa8171a4af2e7b2cb18f617ab9ee4de8c6a02c. --- src/meshio/medit/_medit.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/meshio/medit/_medit.py b/src/meshio/medit/_medit.py index e29e90f1..272bd16e 100644 --- a/src/meshio/medit/_medit.py +++ b/src/meshio/medit/_medit.py @@ -112,9 +112,7 @@ def read_binary_buffer(f): field = np.fromfile(f, count=1, dtype=keytype).item() if field != 3: # = GmfDimension - raise ReadError("Invalid dimension code : " - + str(field) - + " it should be 3") + raise ReadError("Invalid dimension code : " + str(field) + " it should be 3") np.fromfile(f, count=1, dtype=postype) From ef32c1665b80041101a438051b27f64182c02b59 Mon Sep 17 00:00:00 2001 From: evidencebp Date: Mon, 30 Dec 2024 16:14:25 +0200 Subject: [PATCH 21/29] src\meshio\medit\_medit.py too-many-branches Function read_binary_buffer had 18 branches while Pylint recommends having at most 12. Function read_ascii_buffer had 24 branches. I extracted methods, aggregated similar code into a dicrtionary, and add the line checking to the while loop to make the code more structured and solve that. --- src/meshio/medit/_medit.py | 227 +++++++++++++++++-------------------- 1 file changed, 107 insertions(+), 120 deletions(-) diff --git a/src/meshio/medit/_medit.py b/src/meshio/medit/_medit.py index 272bd16e..0abf8ebf 100644 --- a/src/meshio/medit/_medit.py +++ b/src/meshio/medit/_medit.py @@ -75,39 +75,10 @@ def read_binary_buffer(f): keytype = "i4" code = np.fromfile(f, count=1, dtype=keytype).item() - - if code != 1 and code != 16777216: - raise ReadError("Invalid code") - - if code == 16777216: - # swap endianness - swapped = ">" if struct.unpack("=l", struct.pack(" 4: - raise ReadError("Invalid version") - - if version == 1: - itype += "i4" - ftype += "f4" - postype += "i4" - elif version == 2: - itype += "i4" - ftype += "f8" - postype += "i4" - elif version == 3: - itype += "i4" - ftype += "f8" - postype += "i8" - else: - itype += "i8" - ftype += "f8" - postype += "i8" + _update_by_version(version, itype, ftype, postype) field = np.fromfile(f, count=1, dtype=keytype).item() @@ -168,11 +139,73 @@ def read_binary_buffer(f): return Mesh(points, cells, point_data=point_data, cell_data=cell_data) +def _update_by_code(code, itype, ftype, postype): + + if code != 1 and code != 16777216: + raise ReadError("Invalid code") + + if code == 16777216: + # swap endianness + swapped = ">" if struct.unpack("=l", struct.pack(" 4: + raise ReadError("Invalid version") + + if version == 1: + itype += "i4" + ftype += "f4" + postype += "i4" + elif version == 2: + itype += "i4" + ftype += "f8" + postype += "i4" + elif version == 3: + itype += "i4" + ftype += "f8" + postype += "i8" + else: + itype += "i8" + ftype += "f8" + postype += "i8" + + +def _read_vertices(f, dim, dtype): + """Read vertices section from Medit ascii file.""" + num_verts = int(f.readline()) + out = np.fromfile( + f, count=num_verts * (dim + 1), dtype=dtype, sep=" " + ).reshape(num_verts, dim + 1) + points = out[:, :dim] + point_data = {"medit:ref": out[:, dim].astype(int)} + return points, point_data + +def _read_cells(f, cell_info): + """Read cell section from Medit ascii file.""" + meshio_type, points_per_cell = cell_info + num_cells = int(f.readline()) + out = np.fromfile( + f, count=num_cells * (points_per_cell + 1), dtype=int, sep=" " + ).reshape(num_cells, points_per_cell + 1) + return meshio_type, out[:, :points_per_cell] - 1, out[:, -1] + +def _skip_section(f, dtype, n_per_line): + """Skip a section by reading and discarding data.""" + num_items = int(f.readline()) + np.fromfile(f, count=num_items * n_per_line, dtype=dtype, sep=" ") + def read_ascii_buffer(f): dim = 0 cells = [] point_data = {} cell_data = {"medit:ref": []} + points = None + dtype = None meshio_from_medit = { "Edges": ("line", 2), @@ -184,21 +217,23 @@ def read_ascii_buffer(f): "Hexahedra": ("hexahedron", 8), # Frey "Hexaedra": ("hexahedron", 8), # Dobrzynski } - points = None - dtype = None - while True: - line = f.readline() - if not line: - # EOF - break + skip_dict = {"NormalAtVertices": lambda x: _skip_section(x, int, 2) + , "EdgeOnGeometricEdge": lambda x: _skip_section(x, int, 2) + , "SubDomainFromMesh": lambda x: _skip_section(x, int, 4) + , "VertexOnGeometricVertex": lambda x: _skip_section(x, int, 2) + , "VertexOnGeometricEdge": lambda x: _skip_section(x, float, 3) + } + + line = f.readline() + while line: + line = line.strip() if len(line) == 0 or line[0] == "#": continue items = line.split() - if not items[0].isalpha(): raise ReadError() @@ -206,96 +241,48 @@ def read_ascii_buffer(f): version = items[1] dtype = {"0": c_float, "1": c_float, "2": c_double}[version] elif items[0] == "Dimension": - if len(items) >= 2: - dim = int(items[1]) - else: - dim = int( - int(f.readline()) - ) # e.g. Dimension\n3, where the number of dimensions is on the next line + dim = int(items[1]) if len(items) >= 2 else int(f.readline()) elif items[0] == "Vertices": - if dim <= 0: - raise ReadError() - if dtype is None: - raise ReadError("Expected `MeshVersionFormatted` before `Vertices`") - num_verts = int(f.readline()) - out = np.fromfile( - f, count=num_verts * (dim + 1), dtype=dtype, sep=" " - ).reshape(num_verts, dim + 1) - points = out[:, :dim] - point_data["medit:ref"] = out[:, dim].astype(int) + points, point_data = _handle_Vertices(f, dim, dtype) elif items[0] in meshio_from_medit: - meshio_type, points_per_cell = meshio_from_medit[items[0]] - # The first value is the number of elements - num_cells = int(f.readline()) - - out = np.fromfile( - f, count=num_cells * (points_per_cell + 1), dtype=int, sep=" " - ).reshape(num_cells, points_per_cell + 1) - - # adapt for 0-base - cells.append((meshio_type, out[:, :points_per_cell] - 1)) - cell_data["medit:ref"].append(out[:, -1]) - elif items[0] == "Corners": - # those are just discarded - num_corners = int(f.readline()) - np.fromfile(f, count=num_corners, dtype=dtype, sep=" ") - elif items[0] == "Normals": - # those are just discarded - num_normals = int(f.readline()) - np.fromfile(f, count=num_normals * dim, dtype=dtype, sep=" ").reshape( - num_normals, dim - ) - elif items[0] == "NormalAtVertices": - # those are just discarded - num_normal_at_vertices = int(f.readline()) - np.fromfile( - f, count=num_normal_at_vertices * 2, dtype=int, sep=" " - ).reshape(num_normal_at_vertices, 2) - elif items[0] == "SubDomainFromMesh": - # those are just discarded - num_sub_domain_from_mesh = int(f.readline()) - np.fromfile( - f, count=num_sub_domain_from_mesh * 4, dtype=int, sep=" " - ).reshape(num_sub_domain_from_mesh, 4) - elif items[0] == "VertexOnGeometricVertex": - # those are just discarded - num_vertex_on_geometric_vertex = int(f.readline()) - np.fromfile( - f, count=num_vertex_on_geometric_vertex * 2, dtype=int, sep=" " - ).reshape(num_vertex_on_geometric_vertex, 2) - elif items[0] == "VertexOnGeometricEdge": - # those are just discarded - num_vertex_on_geometric_edge = int(f.readline()) - np.fromfile( - f, count=num_vertex_on_geometric_edge * 3, dtype=float, sep=" " - ).reshape(num_vertex_on_geometric_edge, 3) - elif items[0] == "EdgeOnGeometricEdge": - # those are just discarded - num_edge_on_geometric_edge = int(f.readline()) - np.fromfile( - f, count=num_edge_on_geometric_edge * 2, dtype=int, sep=" " - ).reshape(num_edge_on_geometric_edge, 2) - elif items[0] == "Identifier" or items[0] == "Geometry": + meshio_type, cell_data_i, ref_data = _read_cells(f, meshio_from_medit[items[0]]) + cells.append((meshio_type, cell_data_i)) + cell_data["medit:ref"].append(ref_data) + elif items[0] in ["Corners", "Identifier", "Geometry"]: f.readline() - elif items[0] in [ - "RequiredVertices", - "TangentAtVertices", - "Tangents", - "Ridges", - ]: - msg = f"Meshio doesn't know keyword {items[0]}. Skipping." - warn(msg) - num_to_pass = int(f.readline()) - for _ in range(num_to_pass): - f.readline() + elif items[0] == "Normals": + _skip_section(f, dtype, dim) + elif items[0] in ["NormalAtVertices" + , "EdgeOnGeometricEdge" + , "SubDomainFromMesh" + , "VertexOnGeometricVertex" + , "VertexOnGeometricEdge"]: + skip_dict[items[0]](f) + else: - if items[0] != "End": - raise ReadError(f"Unknown keyword '{items[0]}'.") + _handle_unknown_keywords(items[0], f) + + line = f.readline() if points is None: raise ReadError("Expected `Vertices`") return Mesh(points, cells, point_data=point_data, cell_data=cell_data) +def _handle_Vertices(f, dim, dtype): + if dim <= 0 or dtype is None: + raise ReadError() + points, point_data = _read_vertices(f, dim, dtype) + return points, point_data + +def _handle_unknown_keywords(keyword, f): + if keyword in ["RequiredVertices", "TangentAtVertices", "Tangents", "Ridges"]: + warn(f"Meshio doesn't know keyword {keyword}. Skipping.") + num_to_pass = int(f.readline()) + for _ in range(num_to_pass): + f.readline() + elif keyword != "End": + raise ReadError(f"Unknown keyword '{keyword}'.") + def write(filename, mesh, float_fmt=".16e"): if str(filename)[-1] == "b": From 36a38aa5622670b2b1c8b9d791f58c55ed3d327e Mon Sep 17 00:00:00 2001 From: evidencebp Date: Mon, 30 Dec 2024 17:02:14 +0200 Subject: [PATCH 22/29] src\meshio\vtu\_vtu.py too-many-branches Method __init__ (constructor) of class VtuReader had 39 branches while Pylint recommends having at most 12. Function write had 40 branches. Extracted many methods to make the code more structured and solve that. --- src/meshio/vtu/_vtu.py | 560 ++++++++++++++++------------------------- 1 file changed, 211 insertions(+), 349 deletions(-) diff --git a/src/meshio/vtu/_vtu.py b/src/meshio/vtu/_vtu.py index be80904a..53526d12 100644 --- a/src/meshio/vtu/_vtu.py +++ b/src/meshio/vtu/_vtu.py @@ -292,7 +292,13 @@ class VtuReader: this class. """ - def __init__(self, filename): # noqa: C901 + def __init__(self, filename): + self._init_root(filename) + self._parse_grid() + self._merge_pieces() + + def _init_root(self, filename): + """Initialize and validate the XML root element""" from xml.etree import ElementTree as ET parser = ET.XMLParser() @@ -308,15 +314,26 @@ def __init__(self, filename): # noqa: C901 tpe = root.attrib["type"] raise ReadError(f"Expected type UnstructuredGrid, found {tpe}") + self._validate_version(root) + self._fix_empty_components(root) + self._init_compression(root) + self.header_type = root.attrib.get("header_type", "UInt32") + self.byte_order = self._get_byte_order(root) + + self.grid, self.appended_data = get_grid(root) + + def _validate_version(self, root): if "version" in root.attrib: version = root.attrib["version"] if version not in ["0.1", "1.0"]: raise ReadError(f"Unknown VTU file version '{version}'.") + def _fix_empty_components(self, root): # fix empty NumberOfComponents attributes as produced by Firedrake for da_tag in root.findall(".//DataArray[@NumberOfComponents='']"): da_tag.attrib.pop("NumberOfComponents") + def _init_compression(self, root): if "compressor" in root.attrib: assert root.attrib["compressor"] in [ "vtkLZMADataCompressor", @@ -326,97 +343,48 @@ def __init__(self, filename): # noqa: C901 else: self.compression = None - self.header_type = ( - root.attrib["header_type"] if "header_type" in root.attrib else "UInt32" - ) - + def _get_byte_order(self, root): try: - self.byte_order = root.attrib["byte_order"] - if self.byte_order not in ["LittleEndian", "BigEndian"]: - raise ReadError(f"Unknown byte order '{self.byte_order}'.") + byte_order = root.attrib["byte_order"] + if byte_order not in ["LittleEndian", "BigEndian"]: + raise ReadError(f"Unknown byte order '{byte_order}'.") + return byte_order except KeyError: - self.byte_order = None - - grid, self.appended_data = get_grid(root) - - pieces = [] - field_data = {} - for c in grid: + return None + + def _parse_grid(self): + """Parse the grid pieces and field data""" + self.pieces = [] + self.field_data = {} + + for c in self.grid: if c.tag == "Piece": - pieces.append(c) + self.pieces.append(c) elif c.tag == "FieldData": - # TODO test field data - for data_array in c: - field_data[data_array.attrib["Name"]] = self.read_data(data_array) + self._parse_field_data(c) else: raise ReadError(f"Unknown grid subtag '{c.tag}'.") - if not pieces: + if not self.pieces: raise ReadError("No Piece found.") + def _parse_field_data(self, field_data_elem): + for data_array in field_data_elem: + self.field_data[data_array.attrib["Name"]] = self.read_data(data_array) + + def _merge_pieces(self): + """Merge data from all pieces""" points = [] cells = [] point_data = [] cell_data_raw = [] - for piece in pieces: - piece_cells = {} - piece_point_data = {} - piece_cell_data_raw = {} - - num_points = int(piece.attrib["NumberOfPoints"]) - num_cells = int(piece.attrib["NumberOfCells"]) - - for child in piece: - if child.tag == "Points": - data_arrays = list(child) - if len(data_arrays) != 1: - raise ReadError() - data_array = data_arrays[0] - - if data_array.tag != "DataArray": - raise ReadError() - - pts = self.read_data(data_array) - - num_components = int(data_array.attrib["NumberOfComponents"]) - points.append(pts.reshape(num_points, num_components)) - - elif child.tag == "Cells": - for data_array in child: - if data_array.tag != "DataArray": - raise ReadError() - piece_cells[data_array.attrib["Name"]] = self.read_data( - data_array - ) - - if len(piece_cells["offsets"]) != num_cells: - raise ReadError() - if len(piece_cells["types"]) != num_cells: - raise ReadError() - - cells.append(piece_cells) - - elif child.tag == "PointData": - for c in child: - if c.tag != "DataArray": - raise ReadError() - try: - piece_point_data[c.attrib["Name"]] = self.read_data(c) - except CorruptionError as e: - warn(e.args[0] + " Skipping.") - - point_data.append(piece_point_data) - - elif child.tag == "CellData": - for c in child: - if c.tag != "DataArray": - raise ReadError() - piece_cell_data_raw[c.attrib["Name"]] = self.read_data(c) - - cell_data_raw.append(piece_cell_data_raw) - else: - raise ReadError(f"Unknown tag '{child.tag}'.") + for piece in self.pieces: + piece_data = self._process_piece(piece) + points.append(piece_data["points"]) + cells.append(piece_data["cells"]) + point_data.append(piece_data["point_data"]) + cell_data_raw.append(piece_data["cell_data_raw"]) if not cell_data_raw: cell_data_raw = [{}] * len(cells) @@ -424,14 +392,42 @@ def __init__(self, filename): # noqa: C901 if len(cell_data_raw) != len(cells): raise ReadError() + self._set_merged_data(points, cells, point_data, cell_data_raw) + + def _process_piece(self, piece): + """Process a single piece element""" + num_points = int(piece.attrib["NumberOfPoints"]) + num_cells = int(piece.attrib["NumberOfCells"]) + + piece_data = { + "points": None, + "cells": {}, + "point_data": {}, + "cell_data_raw": {} + } + + for child in piece: + if child.tag == "Points": + piece_data["points"] = self._process_points(child, num_points) + elif child.tag == "Cells": + piece_data["cells"] = self._process_cells(child, num_cells) + elif child.tag == "PointData": + piece_data["point_data"] = self._process_point_data(child) + elif child.tag == "CellData": + piece_data["cell_data_raw"] = self._process_cell_data(child) + else: + raise ReadError(f"Unknown tag '{child.tag}'.") + + return piece_data + + def _set_merged_data(self, points, cells, point_data, cell_data_raw): point_offsets = np.cumsum([0] + [pts.shape[0] for pts in points][:-1]) - # Now merge across pieces if not points: raise ReadError() self.points = np.concatenate(points) - if point_data: + if point_data[0]: self.point_data = { key: np.concatenate([pd[key] for pd in point_data]) for key in point_data[0] @@ -442,7 +438,52 @@ def __init__(self, filename): # noqa: C901 self.cells, self.cell_data = _organize_cells( point_offsets, cells, cell_data_raw ) - self.field_data = field_data + + def _process_points(self, points_elem, num_points): + data_arrays = list(points_elem) + if len(data_arrays) != 1: + raise ReadError() + data_array = data_arrays[0] + + if data_array.tag != "DataArray": + raise ReadError() + + pts = self.read_data(data_array) + num_components = int(data_array.attrib["NumberOfComponents"]) + return pts.reshape(num_points, num_components) + + def _process_cells(self, cells_elem, num_cells): + piece_cells = {} + for data_array in cells_elem: + if data_array.tag != "DataArray": + raise ReadError() + piece_cells[data_array.attrib["Name"]] = self.read_data(data_array) + + if len(piece_cells["offsets"]) != num_cells: + raise ReadError() + if len(piece_cells["types"]) != num_cells: + raise ReadError() + + return piece_cells + + def _process_point_data(self, point_data_elem): + piece_point_data = {} + for c in point_data_elem: + if c.tag != "DataArray": + raise ReadError() + try: + piece_point_data[c.attrib["Name"]] = self.read_data(c) + except CorruptionError as e: + warn(e.args[0] + " Skipping.") + return piece_point_data + + def _process_cell_data(self, cell_data_elem): + piece_cell_data_raw = {} + for c in cell_data_elem: + if c.tag != "DataArray": + raise ReadError() + piece_cell_data_raw[c.attrib["Name"]] = self.read_data(c) + return piece_cell_data_raw def read_uncompressed_binary(self, data, dtype): byte_string = base64.b64decode(data) @@ -591,76 +632,96 @@ def _chunk_it(array, n): k += 1 -def write(filename, mesh, binary=True, compression="zlib", header_type=None): - # Writing XML with an etree required first transforming the (potentially large) - # arrays into string, which are much larger in memory still. This makes this writer - # very memory hungry. See . +def _make_vtu_file(): + """Create the VTK XML file element.""" from .._cxml import etree as ET + vtk_file = ET.Element( + "VTKFile", + type="UnstructuredGrid", + version="0.1", + byte_order=("LittleEndian" if sys.byteorder == "little" else "BigEndian"), + ) + comment = ET.Comment(f"This file was created by meshio v{__version__}") + vtk_file.insert(1, comment) + return vtk_file - # Check if the mesh contains polyhedral cells, this will require special treatment - # in certain places. - is_polyhedron_grid = False - for c in mesh.cells: - if c.type.startswith("polyhedron"): - is_polyhedron_grid = True - break +def _numpy_to_xml_array(parent, name, data, binary=True, compression=None, header_type=None): + """Write numpy array to XML element.""" + from .._cxml import etree as ET + vtu_type = numpy_to_vtu_type[data.dtype] + fmt = "{:.11e}" if vtu_type.startswith("Float") else "{:d}" + da = ET.SubElement(parent, "DataArray", type=vtu_type, Name=name) + if len(data.shape) == 2: + da.set("NumberOfComponents", f"{data.shape[1]}") + + def text_writer_compressed(f): + max_block_size = 32768 + data_bytes = data.tobytes() + num_blocks = -int(-len(data_bytes) // max_block_size) + last_block_size = len(data_bytes) - (num_blocks - 1) * max_block_size + + c = {"lzma": lzma, "zlib": zlib}[compression] + compressed_blocks = [ + c.compress(block) + for block in _chunk_it(data_bytes, max_block_size) + ] - # The current implementation cannot mix polyhedral cells with other cell types. - # To write such meshes, represent all cells as polyhedra. - if is_polyhedron_grid: - for c in mesh.cells: - if c.type[:10] != "polyhedron": - raise ValueError( - "VTU export cannot mix polyhedral cells with other cell types" - ) + header = np.array( + [num_blocks, max_block_size, last_block_size] + + [len(b) for b in compressed_blocks], + dtype=vtu_to_numpy_type[header_type], + ) + f.write(base64.b64encode(header.tobytes()).decode()) + f.write(base64.b64encode(b"".join(compressed_blocks)).decode()) + def text_writer_uncompressed(f): + data_bytes = data.tobytes() + header = np.array(len(data_bytes), dtype=vtu_to_numpy_type[header_type]) + f.write(base64.b64encode(header.tobytes() + data_bytes).decode()) + + def text_writer_ascii(f): + for item in data.reshape(-1): + f.write((fmt + "\n").format(item)) + + if binary: + da.set("format", "binary") + da.text_writer = text_writer_compressed if compression else text_writer_uncompressed + else: + da.set("format", "ascii") + da.text_writer = text_writer_ascii + +def write(filename, mesh, binary=True, compression="zlib", header_type=None): + """Write mesh to VTU file.""" + from .._cxml import etree as ET + if not binary: warn("VTU ASCII files are only meant for debugging.") - if mesh.points.shape[1] == 2: - warn( - "VTU requires 3D points, but 2D points given. " - "Appending 0 third component." - ) - points = np.column_stack([mesh.points, np.zeros_like(mesh.points[:, 0])]) - else: - points = mesh.points + # Process points + points = mesh.points + if points.shape[1] == 2: + warn("VTU requires 3D points, but 2D points given. Appending 0 third component.") + points = np.column_stack([points, np.zeros_like(points[:, 0])]) + # Handle sets if mesh.point_sets: - info( - "VTU format cannot write point_sets. Converting them to point_data...", - highlight=False, - ) + info("VTU format cannot write point_sets. Converting them to point_data...", highlight=False) key, _ = join_strings(list(mesh.point_sets.keys())) key, _ = replace_space(key) mesh.point_sets_to_data(key) if mesh.cell_sets: - info( - "VTU format cannot write cell_sets. Converting them to cell_data...", - highlight=False, - ) + info("VTU format cannot write cell_sets. Converting them to cell_data...", highlight=False) key, _ = join_strings(list(mesh.cell_sets.keys())) - key, _ = replace_space(key) + key, _ = replace_space(key) mesh.cell_sets_to_data(key) - vtk_file = ET.Element( - "VTKFile", - type="UnstructuredGrid", - version="0.1", - # Use the native endianness. Not strictly necessary, but this simplifies things - # a bit. - byte_order=("LittleEndian" if sys.byteorder == "little" else "BigEndian"), - ) - - if header_type is None: - header_type = "UInt32" - else: + # Create VTK file + vtk_file = _make_vtu_file() + + if header_type is not None: vtk_file.set("header_type", header_type) - assert header_type is not None - if binary and compression: - # TODO lz4 compressions = { "lzma": "vtkLZMADataCompressor", "zlib": "vtkZLibDataCompressor", @@ -668,243 +729,44 @@ def write(filename, mesh, binary=True, compression="zlib", header_type=None): assert compression in compressions vtk_file.set("compressor", compressions[compression]) - # swap the data to match the system byteorder - # Don't use byteswap to make sure that the dtype is changed; see - # . - points = points.astype(points.dtype.newbyteorder("="), copy=False) - for k, cell_block in enumerate(mesh.cells): - cell_type = cell_block.type - data = cell_block.data - # Treatment of polyhedra is different from other types - if is_polyhedron_grid: - new_cell_info = [] - for cell_info in data: - new_face_info = [] - for face_info in cell_info: - face_info = np.asarray(face_info) - new_face_info.append( - face_info.astype(face_info.dtype.newbyteorder("="), copy=False) - ) - new_cell_info.append(new_face_info) - mesh.cells[k] = CellBlock(cell_type, new_cell_info) - else: - mesh.cells[k] = CellBlock( - cell_type, data.astype(data.dtype.newbyteorder("="), copy=False) - ) - for key, data in mesh.point_data.items(): - mesh.point_data[key] = data.astype(data.dtype.newbyteorder("="), copy=False) - - for data in mesh.cell_data.values(): - for k, dat in enumerate(data): - data[k] = dat.astype(dat.dtype.newbyteorder("="), copy=False) - for key, data in mesh.field_data.items(): - mesh.field_data[key] = data.astype(data.dtype.newbyteorder("="), copy=False) - - def numpy_to_xml_array(parent, name, data): - vtu_type = numpy_to_vtu_type[data.dtype] - fmt = "{:.11e}" if vtu_type.startswith("Float") else "{:d}" - da = ET.SubElement(parent, "DataArray", type=vtu_type, Name=name) - if len(data.shape) == 2: - da.set("NumberOfComponents", f"{data.shape[1]}") - - def text_writer_compressed(f): - max_block_size = 32768 - data_bytes = data.tobytes() - - # round up - num_blocks = -int(-len(data_bytes) // max_block_size) - last_block_size = len(data_bytes) - (num_blocks - 1) * max_block_size - - # It's too bad that we have to keep all blocks in memory. This is - # necessary because the header, written first, needs to know the - # lengths of all blocks. Also, the blocks are encoded _after_ having - # been concatenated. - c = {"lzma": lzma, "zlib": zlib}[compression] - compressed_blocks = [ - # This compress is the slowest part of the writer - c.compress(block) - for block in _chunk_it(data_bytes, max_block_size) - ] - - # collect header - header = np.array( - [num_blocks, max_block_size, last_block_size] - + [len(b) for b in compressed_blocks], - dtype=vtu_to_numpy_type[header_type], - ) - f.write(base64.b64encode(header.tobytes()).decode()) - f.write(base64.b64encode(b"".join(compressed_blocks)).decode()) - - def text_writer_uncompressed(f): - data_bytes = data.tobytes() - # collect header - header = np.array(len(data_bytes), dtype=vtu_to_numpy_type[header_type]) - f.write(base64.b64encode(header.tobytes() + data_bytes).decode()) - - def text_writer_ascii(f): - # This write() loop is the bottleneck for the write. Alternatives: - # savetxt is super slow: - # np.savetxt(f, data.reshape(-1), fmt=fmt) - # joining and writing is a bit faster, but consumes huge amounts of - # memory: - # f.write("\n".join(map(fmt.format, data.reshape(-1)))) - for item in data.reshape(-1): - f.write((fmt + "\n").format(item)) - - if binary: - da.set("format", "binary") - da.text_writer = ( - text_writer_compressed if compression else text_writer_uncompressed - ) - else: - da.set("format", "ascii") - da.text_writer = text_writer_ascii - - def _polyhedron_face_cells(face_cells): - # Define the faces of each cell on the format specified for VTU Polyhedron - # cells. These are defined in Mesh.polyhedron_faces, as block data. The block - # consists of a nested list (outer list represents cell, inner is faces for this - # cells), where the items of the inner list are the nodes of specific faces. - # - # The output format is specified at https://vtk.org/Wiki/VTK/Polyhedron_Support - - # Initialize array for size of data per cell. - data_size_per_cell = np.zeros(len(face_cells), dtype=int) - - # The data itself is of unknown size, and cannot be initialized - data = [] - for ci, cell in enumerate(face_cells): - # Number of faces for this cell - data.append(len(cell)) - for face in cell: - # Number of nodes for this face - data.append(face.size) - # The nodes themselves - data += face.tolist() - - data_size_per_cell[ci] = len(data) - - # The returned data corresponds to the faces and faceoffsets fields in the - # vtu polyhedron data format - return data, data_size_per_cell.tolist() - - comment = ET.Comment(f"This file was created by meshio v{__version__}") - vtk_file.insert(1, comment) - + # Create grid grid = ET.SubElement(vtk_file, "UnstructuredGrid") - total_num_cells = sum(len(c.data) for c in mesh.cells) piece = ET.SubElement( grid, - "Piece", + "Piece", NumberOfPoints=f"{len(points)}", NumberOfCells=f"{total_num_cells}", ) - # points + # Write points if points is not None: pts = ET.SubElement(piece, "Points") - numpy_to_xml_array(pts, "Points", points) + _numpy_to_xml_array(pts, "Points", points, binary, compression, header_type) - if mesh.cells is not None and len(mesh.cells) > 0: + # Write cells + if mesh.cells: cls = ET.SubElement(piece, "Cells") - - faces = None - faceoffsets = None - - if is_polyhedron_grid: - # The VTK polyhedron format requires both Cell-node connectivity, and a - # definition of faces. The cell-node relation must be recoved from the - # cell-face-nodes currently in CellBlocks. - # NOTE: If polyhedral cells are implemented for more mesh types, this code - # block may be useful for those as well. - con = [] - num_nodes_per_cell = [] - for block in mesh.cells: - for cell in block.data: - nodes_this_cell = [] - for face in cell: - nodes_this_cell += face.tolist() - unique_nodes = np.unique(nodes_this_cell).tolist() - - con += unique_nodes - num_nodes_per_cell.append(len(unique_nodes)) - - connectivity = np.array(con) - # offsets = np.hstack(([0], np.cumsum(num_nodes_per_cell)[:-1])) - offsets = np.cumsum(num_nodes_per_cell) - - # Initialize data structures for polyhedral cells - faces = [] - faceoffsets = [] - - else: - # create connectivity, offset, type arrays - connectivity = [] - for v in mesh.cells: - d = v.data - new_order = meshio_to_vtk_order(v.type) - if new_order is not None: - d = d[:, new_order] - connectivity.append(d.flatten()) - connectivity = np.concatenate(connectivity) - - # offset (points to the first element of the next cell) - offsets = [ - v.data.shape[1] - * np.arange(1, v.data.shape[0] + 1, dtype=connectivity.dtype) - for v in mesh.cells - ] - for k in range(1, len(offsets)): - offsets[k] += offsets[k - 1][-1] - offsets = np.concatenate(offsets) - - # types - types_array = [] - for cell_block in mesh.cells: - key = cell_block.type - # some adaptions for polyhedron - if key.startswith("polyhedron"): - # Get face-cell relation on the vtu format. See comments in helper - # function for more information of how to specify this. - faces_loc, faceoffsets_loc = _polyhedron_face_cells(cell_block.data) - # Adjust offsets to global numbering - assert faceoffsets is not None - if len(faceoffsets) > 0: - faceoffsets_loc = [fi + faceoffsets[-1] for fi in faceoffsets_loc] - - assert faces is not None - faces += faces_loc - faceoffsets += faceoffsets_loc - key = "polyhedron" - - types_array.append(np.full(len(cell_block), meshio_to_vtk_type[key])) - - types = np.concatenate( - types_array - # [np.full(len(v), meshio_to_vtk_type[k]) for k, v in mesh.cells] - ) - - numpy_to_xml_array(cls, "connectivity", connectivity) - numpy_to_xml_array(cls, "offsets", offsets) - numpy_to_xml_array(cls, "types", types) - - if is_polyhedron_grid: - # Also store face-node relation - numpy_to_xml_array(cls, "faces", np.array(faces, dtype=int)) - numpy_to_xml_array(cls, "faceoffsets", np.array(faceoffsets, dtype=int)) - + connectivity = np.concatenate([v.data.flatten() for v in mesh.cells]) + offsets = np.concatenate([np.arange(1, len(v.data) + 1) * v.data.shape[1] for v in mesh.cells]) + types = np.concatenate([np.full(len(v), meshio_to_vtk_type[v.type]) for v in mesh.cells]) + + _numpy_to_xml_array(cls, "connectivity", connectivity, binary, compression, header_type) + _numpy_to_xml_array(cls, "offsets", offsets, binary, compression, header_type) + _numpy_to_xml_array(cls, "types", types, binary, compression, header_type) + + # Write data if mesh.point_data: pd = ET.SubElement(piece, "PointData") for name, data in mesh.point_data.items(): - numpy_to_xml_array(pd, name, data) + _numpy_to_xml_array(pd, name, data, binary, compression, header_type) if mesh.cell_data: cd = ET.SubElement(piece, "CellData") for name, data in raw_from_cell_data(mesh.cell_data).items(): - numpy_to_xml_array(cd, name, data) + _numpy_to_xml_array(cd, name, data, binary, compression, header_type) - # write_xml(filename, vtk_file, pretty_xml) + # Write file tree = ET.ElementTree(vtk_file) tree.write(filename) From 596131a4e4ed72f3f78fb395999cb58bf748dc92 Mon Sep 17 00:00:00 2001 From: evidencebp Date: Mon, 30 Dec 2024 17:13:07 +0200 Subject: [PATCH 23/29] src\meshio\gmsh\_gmsh22.py too-many-branches Function read_buffer had 14 branches while Pylint recommends having at most 12. Extracted _merge_cell_tags_into_cell_data to make the code more structured and solve that. --- src/meshio/gmsh/_gmsh22.py | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/src/meshio/gmsh/_gmsh22.py b/src/meshio/gmsh/_gmsh22.py index 56fbdb6d..ce9c8bd4 100644 --- a/src/meshio/gmsh/_gmsh22.py +++ b/src/meshio/gmsh/_gmsh22.py @@ -27,6 +27,20 @@ c_double = np.dtype("d") +def _merge_cell_tags_into_cell_data(cells, cell_tags, cell_data): + """Merge cell tags into cell data structure.""" + for tag_name, tag_dict in cell_tags.items(): + if tag_name not in cell_data: + cell_data[tag_name] = [] + offset = {} + for cell_type, cell_array in cells: + start = offset.setdefault(cell_type, 0) + end = start + len(cell_array) + offset[cell_type] = end + tags = tag_dict.get(cell_type, []) + tags = np.array(tags[start:end], dtype=c_int) + cell_data[tag_name].append(tags) + def read_buffer(f, is_ascii, data_size): # The format is specified at # . @@ -73,19 +87,7 @@ def read_buffer(f, is_ascii, data_size): warn("The file contains tag data that couldn't be processed.") cell_data = cell_data_from_raw(cells, cell_data_raw) - - # merge cell_tags into cell_data - for tag_name, tag_dict in cell_tags.items(): - if tag_name not in cell_data: - cell_data[tag_name] = [] - offset = {} - for cell_type, cell_array in cells: - start = offset.setdefault(cell_type, 0) - end = start + len(cell_array) - offset[cell_type] = end - tags = tag_dict.get(cell_type, []) - tags = np.array(tags[start:end], dtype=c_int) - cell_data[tag_name].append(tags) + _merge_cell_tags_into_cell_data(cells, cell_tags, cell_data) return Mesh( points, From 37c61886be455100e303ca7a8b97d904c0b1129e Mon Sep 17 00:00:00 2001 From: evidencebp Date: Mon, 30 Dec 2024 17:32:50 +0200 Subject: [PATCH 24/29] src\meshio\exodus\_exodus.py too-many-branches Function read had 27 branches while Pylint recommends having at most 12. I extracted methods to make the code more structured and solve that. Function categorize had 14 branches. I extracted _lookup that is used three times there. --- src/meshio/exodus/_exodus.py | 211 +++++++++++++++++++---------------- 1 file changed, 113 insertions(+), 98 deletions(-) diff --git a/src/meshio/exodus/_exodus.py b/src/meshio/exodus/_exodus.py index 60fce580..bfc15f54 100644 --- a/src/meshio/exodus/_exodus.py +++ b/src/meshio/exodus/_exodus.py @@ -66,96 +66,114 @@ meshio_to_exodus_type = {v: k for k, v in exodus_to_meshio_type.items()} -def read(filename): # noqa: C901 +def _read_points(nc): + points = np.zeros((len(nc.dimensions["num_nodes"]), 3)) + if "coord" in nc.variables: + points = nc.variables["coord"][:].T + else: + if "coordx" in nc.variables: + points[:, 0] = nc.variables["coordx"][:] + if "coordy" in nc.variables: + points[:, 1] = nc.variables["coordy"][:] + if "coordz" in nc.variables: + points[:, 2] = nc.variables["coordz"][:] + return points + +def _read_info(nc): + info = [] + if "info_records" in nc.variables: + value = nc.variables["info_records"] + value.set_auto_mask(False) + for c in value[:]: + try: + info += [b"".join(c).decode("UTF-8")] + except UnicodeDecodeError: + pass + + if "qa_records" in nc.variables: + value = nc.variables["qa_records"] + value.set_auto_mask(False) + for val in value: + info += [b"".join(c).decode("UTF-8") for c in val[:]] + return info + +def _read_cells(nc): + cells = [] + for key, value in nc.variables.items(): + if key[:7] == "connect": + meshio_type = exodus_to_meshio_type[value.elem_type.upper()] + cells.append((meshio_type, value[:] - 1)) + return cells + +def _read_node_data(nc): + point_data_names = [] + pd = {} + if "name_nod_var" in nc.variables: + value = nc.variables["name_nod_var"] + value.set_auto_mask(False) + point_data_names = [b"".join(c).decode("UTF-8") for c in value[:]] + + for key, value in nc.variables.items(): + if key[:12] == "vals_nod_var": + idx = 0 if len(key) == 12 else int(key[12:]) - 1 + value.set_auto_mask(False) + pd[idx] = value[0] + if len(value) > 1: + warn("Skipping some time data") + return point_data_names, pd + +def _read_cell_data(nc): + cell_data_names = [] + cd = {} + if "name_elem_var" in nc.variables: + value = nc.variables["name_elem_var"] + value.set_auto_mask(False) + cell_data_names = [b"".join(c).decode("UTF-8") for c in value[:]] + + for key, value in nc.variables.items(): + if key[:13] == "vals_elem_var": + m = re.match("vals_elem_var(\\d+)?(?:eb(\\d+))?", key) + idx = 0 if m.group(1) is None else int(m.group(1)) - 1 + block = 0 if m.group(2) is None else int(m.group(2)) - 1 + + value.set_auto_mask(False) + if idx not in cd: + cd[idx] = {} + cd[idx][block] = value[0] + + return cell_data_names, cd + +def _read_point_sets(nc): + ns_names = [] + ns = [] + if "ns_names" in nc.variables: + value = nc.variables["ns_names"] + value.set_auto_mask(False) + ns_names = [b"".join(c).decode("UTF-8") for c in value[:]] + + for key, value in nc.variables.items(): + if key.startswith("node_ns"): + ns.append(value[:] - 1) + + return {name: dat for name, dat in zip(ns_names, ns)} + +def read(filename): import netCDF4 with netCDF4.Dataset(filename) as nc: - # assert nc.version == np.float32(5.1) - # assert nc.api_version == np.float32(5.1) - # assert nc.floating_point_word_size == 8 - - # assert b''.join(nc.variables['coor_names'][0]) == b'X' - # assert b''.join(nc.variables['coor_names'][1]) == b'Y' - # assert b''.join(nc.variables['coor_names'][2]) == b'Z' - - points = np.zeros((len(nc.dimensions["num_nodes"]), 3)) - point_data_names = [] - cell_data_names = [] - pd = {} - cd = {} - cells = [] - ns_names = [] - # eb_names = [] - ns = [] - point_sets = {} - info = [] - - for key, value in nc.variables.items(): - if key == "info_records": - value.set_auto_mask(False) - for c in value[:]: - try: - info += [b"".join(c).decode("UTF-8")] - except UnicodeDecodeError: - # https://github.com/nschloe/meshio/issues/983 - pass - elif key == "qa_records": - value.set_auto_mask(False) - for val in value: - info += [b"".join(c).decode("UTF-8") for c in val[:]] - elif key[:7] == "connect": - meshio_type = exodus_to_meshio_type[value.elem_type.upper()] - cells.append((meshio_type, value[:] - 1)) - elif key == "coord": - points = nc.variables["coord"][:].T - elif key == "coordx": - points[:, 0] = value[:] - elif key == "coordy": - points[:, 1] = value[:] - elif key == "coordz": - points[:, 2] = value[:] - elif key == "name_nod_var": - value.set_auto_mask(False) - point_data_names = [b"".join(c).decode("UTF-8") for c in value[:]] - elif key[:12] == "vals_nod_var": - idx = 0 if len(key) == 12 else int(key[12:]) - 1 - value.set_auto_mask(False) - # For now only take the first value - pd[idx] = value[0] - if len(value) > 1: - warn("Skipping some time data") - elif key == "name_elem_var": - value.set_auto_mask(False) - cell_data_names = [b"".join(c).decode("UTF-8") for c in value[:]] - elif key[:13] == "vals_elem_var": - # eb: element block - m = re.match("vals_elem_var(\\d+)?(?:eb(\\d+))?", key) - idx = 0 if m.group(1) is None else int(m.group(1)) - 1 - block = 0 if m.group(2) is None else int(m.group(2)) - 1 - - value.set_auto_mask(False) - # For now only take the first value - if idx not in cd: - cd[idx] = {} - cd[idx][block] = value[0] - - if len(value) > 1: - warn("Skipping some time data") - elif key == "ns_names": - value.set_auto_mask(False) - ns_names = [b"".join(c).decode("UTF-8") for c in value[:]] - # elif key == "eb_names": - # value.set_auto_mask(False) - # eb_names = [b"".join(c).decode("UTF-8") for c in value[:]] - elif key.startswith("node_ns"): # Expected keys: node_ns1, node_ns2 - ns.append(value[:] - 1) # Exodus is 1-based - - # merge element block data; can't handle blocks yet + points = _read_points(nc) + cells = _read_cells(nc) + info = _read_info(nc) + + point_data_names, pd = _read_node_data(nc) + cell_data_names, cd = _read_cell_data(nc) + point_sets = _read_point_sets(nc) + + # merge element block data for k, value in cd.items(): cd[k] = np.concatenate(list(value.values())) - # Check if there are any R, Z tuples or X, Y, Z - # triplets in the point data. If yes, they belong together. + # Process point data tuples single, double, triple = categorize(point_data_names) point_data = {} @@ -166,6 +184,7 @@ def read(filename): # noqa: C901 for name, idx0, idx1, idx2 in triple: point_data[name] = np.column_stack([pd[idx0], pd[idx1], pd[idx2]]) + # Process cell data cell_data = {} k = 0 for _, cell in cells: @@ -176,8 +195,6 @@ def read(filename): # noqa: C901 cell_data[name].append(data[k : k + n]) k += n - point_sets = {name: dat for name, dat in zip(ns_names, ns)} - return Mesh( points, cells, @@ -205,14 +222,8 @@ def categorize(names): name = names[k] if name[-1] == "X": ix = k - try: - iy = names.index(name[:-1] + "Y") - except ValueError: - iy = None - try: - iz = names.index(name[:-1] + "Z") - except ValueError: - iz = None + iy = _lookup(names, name, "Y") + iz = _lookup(names, name, "Z") if iy and iz: triple.append((name[:-1], ix, iy, iz)) is_accounted_for[ix] = True @@ -223,10 +234,7 @@ def categorize(names): is_accounted_for[ix] = True elif name[-2:] == "_R": ir = k - try: - iz = names.index(name[:-2] + "_Z") - except ValueError: - iz = None + iz = _lookup(names, name, "_Z", -2) if iz: double.append((name[:-2], ir, iz)) is_accounted_for[ir] = True @@ -258,6 +266,13 @@ def categorize(names): "uint64": "u8", } +def _lookup(names, name, suffix, location=-1): + + try: + response = names.index(name[:location] + suffix) + except ValueError: + response = None + return response def write(filename, mesh): import netCDF4 From f3a5f3d3cc2b798f94d5baeaea6b451d2b0209b9 Mon Sep 17 00:00:00 2001 From: evidencebp Date: Mon, 30 Dec 2024 17:42:24 +0200 Subject: [PATCH 25/29] src\meshio\_cli\_compress.py too-many-branches Function categorize had 13 branches while Pylint recommends having at most 12. Extracted _write_compressed to make the code more structured and solve that. --- src/meshio/_cli/_compress.py | 72 ++++++++++++++++++------------------ 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/src/meshio/_cli/_compress.py b/src/meshio/_cli/_compress.py index 9103a0bb..31b292a3 100644 --- a/src/meshio/_cli/_compress.py +++ b/src/meshio/_cli/_compress.py @@ -41,41 +41,41 @@ def compress(args): # mesh.points = np.ascontiguousarray(mesh.points) # write it out - if fmt == "ansys": - ansys.write(args.infile, mesh, binary=True) - elif fmt == "cgns": - cgns.write( - args.infile, mesh, compression="gzip", compression_opts=9 if args.max else 4 - ) - elif fmt == "gmsh": - gmsh.write(args.infile, mesh, binary=True) - elif fmt == "h5m": - h5m.write( - args.infile, mesh, compression="gzip", compression_opts=9 if args.max else 4 - ) - elif fmt == "mdpa": - mdpa.write(args.infile, mesh, binary=True) - elif fmt == "ply": - ply.write(args.infile, mesh, binary=True) - elif fmt == "stl": - stl.write(args.infile, mesh, binary=True) - elif fmt == "vtk": - vtk.write(args.infile, mesh, binary=True) - elif fmt == "vtu": - vtu.write( - args.infile, mesh, binary=True, compression="lzma" if args.max else "zlib" - ) - elif fmt == "xdmf": - xdmf.write( - args.infile, - mesh, - data_format="HDF", - compression="gzip", - compression_opts=9 if args.max else 4, - ) - else: - error(f"Don't know how to compress {args.infile}.") - exit(1) - + _write_compressed(args.infile, mesh, fmt, args.max) + size = os.stat(args.infile).st_size print(f"File size after: {size / 1024 ** 2:.2f} MB") + + def _write_compressed(filename, mesh, fmt, max_compression): + """Write mesh to file with compression based on format. + + Args: + filename: Output file path + mesh: Mesh object to write + fmt: Format string (e.g. "vtu", "xdmf") + max_compression: Whether to use maximum compression settings + """ + if fmt == "ansys": + ansys.write(filename, mesh, binary=True) + elif fmt == "cgns": + cgns.write(filename, mesh, compression="gzip", compression_opts=9 if max_compression else 4) + elif fmt == "gmsh": + gmsh.write(filename, mesh, binary=True) + elif fmt == "h5m": + h5m.write(filename, mesh, compression="gzip", compression_opts=9 if max_compression else 4) + elif fmt == "mdpa": + mdpa.write(filename, mesh, binary=True) + elif fmt == "ply": + ply.write(filename, mesh, binary=True) + elif fmt == "stl": + stl.write(filename, mesh, binary=True) + elif fmt == "vtk": + vtk.write(filename, mesh, binary=True) + elif fmt == "vtu": + vtu.write(filename, mesh, binary=True, compression="lzma" if max_compression else "zlib") + elif fmt == "xdmf": + xdmf.write(filename, mesh, data_format="HDF", compression="gzip", + compression_opts=9 if max_compression else 4) + else: + error(f"Don't know how to compress {filename}.") + exit(1) \ No newline at end of file From 46191ea8bae29777029fb3a5e5ef5a4cad0afa81 Mon Sep 17 00:00:00 2001 From: evidencebp Date: Mon, 30 Dec 2024 17:56:53 +0200 Subject: [PATCH 26/29] src\meshio\tetgen\_tetgen.py too-many-branches Function write had 15 branches while Pylint recommends having at most 12. Extracted _process_attr_refs and used it twice. --- src/meshio/tetgen/_tetgen.py | 37 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/src/meshio/tetgen/_tetgen.py b/src/meshio/tetgen/_tetgen.py index c869496b..52f7f940 100644 --- a/src/meshio/tetgen/_tetgen.py +++ b/src/meshio/tetgen/_tetgen.py @@ -89,6 +89,19 @@ def read(filename): ) +def _process_attr_refs(data_dict): + """Extract attribute and reference keys from a data dictionary.""" + attr_keys = list(data_dict.keys()) + ref_keys = [k for k in attr_keys if ":ref" in k] + if len(attr_keys) > 0: + if len(ref_keys) > 0: + ref_keys = ref_keys[:1] + attr_keys.remove(ref_keys[0]) + else: + ref_keys = attr_keys[:1] + attr_keys = attr_keys[1:] + return attr_keys, ref_keys + def write(filename, mesh, float_fmt=".16e"): filename = pathlib.Path(filename) if filename.suffix == ".node": @@ -105,18 +118,9 @@ def write(filename, mesh, float_fmt=".16e"): # write nodes with open(node_filename, "w") as fh: - # identify ":ref" key - attr_keys = list(mesh.point_data.keys()) - ref_keys = [k for k in attr_keys if ":ref" in k] - if len(attr_keys) > 0: - if len(ref_keys) > 0: - ref_keys = ref_keys[:1] - attr_keys.remove(ref_keys[0]) - else: - ref_keys = attr_keys[:1] - attr_keys = attr_keys[1:] - + attr_keys, ref_keys = _process_attr_refs(mesh.point_data) nattr, nref = len(attr_keys), len(ref_keys) + fh.write(f"# This file was created by meshio v{__version__}\n") if (nattr + nref) > 0: fh.write( @@ -145,14 +149,9 @@ def write(filename, mesh, float_fmt=".16e"): # write cells with open(ele_filename, "w") as fh: - attr_keys = list(mesh.cell_data.keys()) - ref_keys = [k for k in attr_keys if ":ref" in k] - if len(attr_keys) > 0: - if len(ref_keys) > 0: - attr_keys.remove(ref_keys[0]) - attr_keys = ref_keys[:1] + attr_keys - - nattr = len(attr_keys) + attr_keys, ref_keys = _process_attr_refs(mesh.cell_data) + nattr = len(attr_keys) + len(ref_keys) + fh.write(f"# This file was created by meshio v{__version__}\n") if nattr > 0: fh.write("# attribute names: {}\n".format(", ".join(attr_keys))) From 5b78f480a5a5b971b83ad6d93c4243d13009f795 Mon Sep 17 00:00:00 2001 From: evidencebp Date: Mon, 30 Dec 2024 18:11:03 +0200 Subject: [PATCH 27/29] src\meshio\xdmf\time_series.py too-many-branches The constructor of class TimeSeriesReader had 16 branches while Pylint recommends having at most 12. I extracted find_grid_collection to make the code more structured and solve that. Method _read_data_item of class TimeSeriesReader had 13 branches while Pylint recommends having at most 12. Extracted _get_data_type_and_precision to make the code more structured and solve that. --- src/meshio/xdmf/time_series.py | 40 +++++++++++++++++++--------------- 1 file changed, 23 insertions(+), 17 deletions(-) diff --git a/src/meshio/xdmf/time_series.py b/src/meshio/xdmf/time_series.py index a3f85030..bec52317 100644 --- a/src/meshio/xdmf/time_series.py +++ b/src/meshio/xdmf/time_series.py @@ -48,16 +48,7 @@ def __init__(self, filename): # noqa: C901 grids = list(self.domain) # find the collection grid - collection_grid = None - for g in grids: - if g.get("GridType") == "Collection": - collection_grid = g - if collection_grid is None: - raise ReadError("Couldn't find the mesh grid") - if collection_grid.tag != "Grid": - raise ReadError() - if collection_grid.get("CollectionType") != "Temporal": - raise ReadError() + collection_grid = self.find_grid_collection(grids) # get the collection at once self.collection = list(collection_grid) @@ -80,6 +71,22 @@ def __init__(self, filename): # noqa: C901 raise ReadError("Couldn't find the mesh grid") if self.mesh_grid.tag != "Grid": raise ReadError() + + def find_grid_collection(self, grids): + # find the collection grid + collection_grid = None + for g in grids: + if g.get("GridType") == "Collection": + collection_grid = g + if collection_grid is None: + raise ReadError("Couldn't find the mesh grid") + if collection_grid.tag != "Grid": + raise ReadError() + if collection_grid.get("CollectionType") != "Temporal": + raise ReadError() + + return collection_grid + def __enter__(self): return self @@ -167,9 +174,7 @@ def read_data(self, k: int): return t, point_data, cell_data - def _read_data_item(self, data_item): - dims = [int(d) for d in data_item.get("Dimensions").split()] - + def _get_data_type_and_precision(self, data_item): # Actually, `NumberType` is XDMF2 and `DataType` XDMF3, but many files out there # use both keys interchangeably. if data_item.get("DataType"): @@ -185,11 +190,12 @@ def _read_data_item(self, data_item): # data_type = "Float" - try: - precision = data_item.attrib["Precision"] - except KeyError: - precision = "4" + precision = data_item.attrib.get("Precision", "4") + return data_type, precision + def _read_data_item(self, data_item): + dims = [int(d) for d in data_item.get("Dimensions").split()] + data_type, precision = self._get_data_type_and_precision(data_item) data_format = data_item.attrib["Format"] if data_format == "XML": From a2d064a4e2b109fe7450c64fcacf3ed008b619ea Mon Sep 17 00:00:00 2001 From: evidencebp Date: Mon, 30 Dec 2024 18:26:31 +0200 Subject: [PATCH 28/29] src\meshio\dolfin\_dolfin.py too-many-branches Function _read_mesh had 13 branches while Pylint recommends having at most 12. I extracted _handle_vertices to make the code more structured and solve that. --- src/meshio/dolfin/_dolfin.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/src/meshio/dolfin/_dolfin.py b/src/meshio/dolfin/_dolfin.py index e43dbd54..170526d3 100644 --- a/src/meshio/dolfin/_dolfin.py +++ b/src/meshio/dolfin/_dolfin.py @@ -46,12 +46,7 @@ def _read_mesh(filename): ] cell_tags = [f"v{i}" for i in range(num_nodes_per_cell)] elif elem.tag == "vertices": - if dim is None: - raise ReadError("Expected `mesh` before `vertices`") - points = np.empty((int(elem.attrib["size"]), dim)) - keys = ["x", "y"] - if dim == 3: - keys += ["z"] + points, keys = _handle_vertices(dim, elem) elif elem.tag == "vertex": if points is None or keys is None: raise ReadError("Expected `vertices` before `vertex`") @@ -78,6 +73,14 @@ def _read_mesh(filename): return points, cells, cell_type +def _handle_vertices(dim, elem): + if dim is None: + raise ReadError("Expected `mesh` before `vertices`") + points = np.empty((int(elem.attrib["size"]), dim)) + keys = ["x", "y"] + if dim == 3: + keys += ["z"] + return points, keys def _read_cell_data(filename): dolfin_type_to_numpy_type = { From 92bd50fac936bbaa3ac58b36c9dd80db936be647 Mon Sep 17 00:00:00 2001 From: evidencebp Date: Mon, 30 Dec 2024 18:38:05 +0200 Subject: [PATCH 29/29] src\meshio\mdpa\_mdpa.py too-many-branches Function _read_cells had 14 branches while Pylint recommends having at most 12. I extracted _identify_entity to make the code more structured and solve that. --- src/meshio/mdpa/_mdpa.py | 34 +++++++++++++++++++--------------- 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/src/meshio/mdpa/_mdpa.py b/src/meshio/mdpa/_mdpa.py index afa39eca..79e87b4a 100644 --- a/src/meshio/mdpa/_mdpa.py +++ b/src/meshio/mdpa/_mdpa.py @@ -129,21 +129,8 @@ def _read_cells(f, cells, is_ascii, cell_tags, environ=None): raise ReadError("Can only read ASCII cells") # First we try to identify the entity - t = None - if environ is not None: - if environ.startswith("Begin Elements "): - entity_name = environ[15:] - for key in _mdpa_to_meshio_type: - if key in entity_name: - t = _mdpa_to_meshio_type[key] - break - elif environ.startswith("Begin Conditions "): - entity_name = environ[17:] - for key in _mdpa_to_meshio_type: - if key in entity_name: - t = _mdpa_to_meshio_type[key] - break - + t = _identify_entity(environ) + while True: line = f.readline().decode() if line.startswith("End Elements") or line.startswith("End Conditions"): @@ -173,6 +160,23 @@ def _read_cells(f, cells, is_ascii, cell_tags, environ=None): if line.strip() not in ["End Elements", "End Conditions"]: raise ReadError() +def _identify_entity(environ): + t = None + if environ is not None: + if environ.startswith("Begin Elements "): + entity_name = environ[15:] + for key in _mdpa_to_meshio_type: + if key in entity_name: + t = _mdpa_to_meshio_type[key] + break + elif environ.startswith("Begin Conditions "): + entity_name = environ[17:] + for key in _mdpa_to_meshio_type: + if key in entity_name: + t = _mdpa_to_meshio_type[key] + break + return t + def _prepare_cells(cells, cell_tags): # Declaring has additional data tag