From 7ba80da8331d0cb322501d58066ed0d89d2dfeea Mon Sep 17 00:00:00 2001 From: Jer-Pha <131234246+Jer-Pha@users.noreply.github.com> Date: Sat, 8 Jun 2024 03:59:06 -0700 Subject: [PATCH 1/2] CI Test --- qs2csv/urls.py | 1 + 1 file changed, 1 insertion(+) diff --git a/qs2csv/urls.py b/qs2csv/urls.py index a6312d4..15b5fb8 100644 --- a/qs2csv/urls.py +++ b/qs2csv/urls.py @@ -20,3 +20,4 @@ urlpatterns = [ path("admin/", admin.site.urls), ] +# CI Test From 08454611d10234f2dde68e24c8c2f7712788100a Mon Sep 17 00:00:00 2001 From: Jer-Pha <131234246+Jer-Pha@users.noreply.github.com> Date: Sat, 8 Jun 2024 15:27:47 -0700 Subject: [PATCH 2/2] Major update to v0.3.0 --- README.md | 25 ++- pyproject.toml | 8 +- qs2csv/src/qs2csv/__init__.py | 2 +- qs2csv/src/qs2csv/qs2csv.py | 316 +++++++++++++++++++++++----------- qs2csv/tests/tests.py | 93 +++++++--- qs2csv/urls.py | 1 - tox.ini | 1 - 7 files changed, 306 insertions(+), 140 deletions(-) diff --git a/README.md b/README.md index f5f605a..70b14e3 100644 --- a/README.md +++ b/README.md @@ -30,8 +30,8 @@ Note: this will install [pandas](https://pandas.pydata.org/), which is used with views.py -```shell -from django_qs2csv import qs_to_csv +```python +from qs2csv import qs_to_csv from .models import SampleModel @@ -52,9 +52,9 @@ def export_csv(request): ### Return type -`qs_to_csv` returns a `django.http.HttpResponse` with the `Content-Type` and `Content-Disposition` headers. Additional headers can be added to the response before returning: +All functions return a `django.http.HttpResponse` with the `Content-Type` and `Content-Disposition` headers. Additional headers can be added to the response before returning: -```shell +```python ... response = qs_to_csv(my_queryset) @@ -65,20 +65,27 @@ response["Another-Header"] = "This is another header for the HttpResponse." ### Parameters -`qs : QuerySet` - **Required**. The QuerySet to be exported as a CSV file. This can be passed as QuerySet\[object], QuerySet\[dict] (values()), or QuerySet\[list\[tuple]] (values_list()). See the note in the [Limitations](#limitations) about QuerySet evaluation. +#### Universal -`header : bool` - Include a header row with field names. **Default: False** +`qs : QuerySet` - **Required**. The QuerySet to be exported as a CSV file. This can be passed as QuerySet\[object], QuerySet\[dict] (values()), or QuerySet\[list\[tuple]] (values_list()). See the note in the [Limitations](#limitations) about QuerySet evaluation. `filename : str` - The name of the exported CSV file. You do not need to include .csv, it will be added once the filename is evaluated. File names can not end in a period, include the symbols (< > : " / \\ | ? *), or be longer than 251 characters (255 w/ ".csv"). **Default: "export"** -`only : list[str]` - List the field names that you would like to include in the exported file. An empty list will include all fields, other than those in `defer`. Field names listed in both `only` and `defer` will not be included. See the note in the [Limitations](#limitations) section for details how this works with a QuerySet that calls only() / defer(). **Default: []** +`only : list[str]` - List the field names that you would like to include in the exported file. An empty list will include all fields, other than those in `defer`. Field names listed in both `only` and `defer` will not be included. See the note in the [Limitations](#limitations) section for details how this works with a QuerySet that calls only(), defer(), values(), or values_list(). **Default: []** + +`defer : list[str]` - List the field names that you do not want to include in the exported file. An empty list will include all fields, or just those mentioned in `only`. Field names listed in both `only` and `defer` will not be included. See the note in the [Limitations](#limitations) section for details how this works with a QuerySet that calls only(), defer(), values(), or values_list(). **Default: []** -`defer : list[str]` - List the field names that you do not want to include in the exported file. An empty list will include all fields, or just those mentioned in `only`. Field names listed in both `only` and `defer` will not be included. See the note in the [Limitations](#limitations) section for details how this works with a QuerySet that calls only() / defer(). **Default: []** +`header : bool` - Include a header row with field names. **Default: False** `verbose : bool` - Determine if the header row uses the fields' `verbose_name` or just the field names. This only applies when `header=True`. **Default: True** +#### qs_to_csv() and qs_to_csv_pd() + `values : bool` - Only enable this if your QuerySet was already evaluated (no longer lazy) and called values(). You must ensure your fields are properly selected in the original QuerySet, because this will skip applying the `only` and `defer` parameters. **Default: False** +#### qs_to_csv_rel_str() +`values : bool` - Only enable this if the QuerySet is passed to the function after calling values() or values_list(). This will convert the QuerySet back to a list of model objects, instead of a list of dicts/lists. See note in [Limitations](#limitations) for an _IMPORTANT WARNING_ about performance. **Default: False** + ### Limitations If the QuerySet was already evaluated before being passed to `qs_to_csv` then it will be re-evaluated by the function. Depending on the size of the QuerySet, complexity of the query and the database setup, this may add a noticeable delay. It is recommended to monitor the impact of database queries using `django.db.connection.queries` or [django-debug-toolbar](https://django-debug-toolbar.readthedocs.io/en/latest/index.html) during development. If the QuerySet must be evaluated before the function is called, it would be most efficient to use values() with the QuerySet (if possible) then pass `values=True` to `qs_to_csv`. @@ -89,6 +96,8 @@ If your QuerySet uses only() / defer() then you must include those same fields i `ManyToManyField` is not supported. +Passing ``values=True`` to ``qs_to_csv_rel_str()`` will create a new query, checking for primary keys (PKs) that are in a list of all PKs from your original QuerySet. **This will add significant time if your QuerySet is large and will potentially not work**, depending on the size of your QuerySet and your database's capabilities. It is recommended to avoid this by not using values() or values_list() when calling this function. It would be more efficient to create a brand new QuerySet than to do use this. Note: if you make this change, ensure `values` is False or the issue will remain. + ## License This project is licensed under the MIT License - see the [LICENSE.md](LICENSE.md) file for details. diff --git a/pyproject.toml b/pyproject.toml index 77fd981..1c3671d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,11 +4,11 @@ build-backend = "setuptools.build_meta" [tool.setuptools.packages.find] where = ["qs2csv\\src"] -include = ["django_qs2csv"] +include = ["qs2csv"] [project] name = "django-qs2csv" -version = "0.2.3" +version = "0.3.0" dependencies = [ "django>=3.2", 'importlib-metadata; python_version<"3.10"', @@ -19,7 +19,7 @@ authors = [ ] readme = "README.md" requires-python = ">=3.9" -keywords = ["csv", "export", "pandas", "httpresponse", "http response", "django"] +keywords = ["django", "csv", "export", "httpresponse", "http response", "pandas"] classifiers = [ "Development Status :: 3 - Alpha", "Intended Audience :: Developers", @@ -53,7 +53,7 @@ classifiers = [ pd = ["pandas>=1.5"] [project.urls] -Repository = "https://github.com/Jer-Pha/django-qs2csv" +Homepage = "https://github.com/Jer-Pha/django-qs2csv" Issues = "https://github.com/Jer-Pha/django-qs2csv/issues" [tool.black] diff --git a/qs2csv/src/qs2csv/__init__.py b/qs2csv/src/qs2csv/__init__.py index 80f613f..6dbff93 100644 --- a/qs2csv/src/qs2csv/__init__.py +++ b/qs2csv/src/qs2csv/__init__.py @@ -1 +1 @@ -from .qs2csv import qs_to_csv, qs_to_csv_pd +from .qs2csv import qs_to_csv, qs_to_csv_pd, qs_to_csv_rel_str diff --git a/qs2csv/src/qs2csv/qs2csv.py b/qs2csv/src/qs2csv/qs2csv.py index 364c133..a31f2a8 100644 --- a/qs2csv/src/qs2csv/qs2csv.py +++ b/qs2csv/src/qs2csv/qs2csv.py @@ -4,23 +4,12 @@ from django.db.models import QuerySet -def error_handler( - qs: Union[QuerySet[object], QuerySet[Dict[Any, Any]], QuerySet[List[Any]]], - values: bool, - filename: str, -) -> None: - """Checks for errors/warnings in `queryset_to_csv`.""" - # Ensure `values` is only being used with a QuerySet with .values() - if values and not issubclass(qs[0].__class__, dict): - raise TypeError( - "`values=True` only works with a QuerySet that utilizes .values()." - " QuerySets for model objects or values_list() are not compatible." - ) - - # Ensure `filename` is properly formatted - filename = filename.strip() - if filename != "export": +def validate_filename(filename: str) -> str: + """Ensures `filename` is properly formatted.""" + # Only process `filename` if it is not the default value. + if filename != "export.csv": SYMBOLS = ("<", ">", ":", '"', "/", "\\", "|", "?", "*") + filename = filename.strip() if len(filename) > 251 or not filename: raise ValueError( @@ -35,34 +24,35 @@ def error_handler( elif filename[-1] == ".": raise ValueError(f"`filename` can not end in a period.") - # Check if the QuerySet has been evaluated: - if qs._result_cache is not None: - from warnings import warn + if filename[-4:] != ".csv": + filename += ".csv" - warning = ( - "The QuerySet was already evaluated before being passed to this" - " function. This will result in another database hit converting" - " the QuerySet to a list of dicts by using values()." - ) - if qs and not values and issubclass(qs[0].__class__, dict): - warning += ( - "\nTo avoid this, pass `values=True` as a parameter. This will" - " use the QuerySet as-is. Note: `values` overrides the `only`" - " and `defer` params, which means fields must be filtered" - " before the QuerySet is passed to this function.\nThis can" - " only be done if you are using values()." - ) - warn(warning, ResourceWarning) + return filename -def get_fields( +def create_response(filename: str) -> HttpResponse: + """Creates HttpResponse with necessary headers.""" + return HttpResponse( + headers={ + "Content-Type": "text/csv", + "Content-Disposition": ( + f"attachment; filename={validate_filename(filename)}" + ), + }, + ) + + +def set_fields( fields: List[object], only: List[str], defer: List[str], - verbose: bool, -) -> List[List[str]]: - """Determines which fields to include in the response.""" - # Remove ManyToManyField (unsupported) +) -> List[object]: + """Determines which fields to include in the response. + + Removes ManyToManyField (unsupported) and applies `only` and + `defer` filters then returns a list of field objects. + + """ if only: # `defer` overrides `only` only = [f for f in only if f not in defer] @@ -74,58 +64,76 @@ def get_fields( else: fields = [f for f in fields if not f.many_to_many] - # Convert fields to field name strings - return ( - ( - [f.name for f in fields], - [f.verbose_name for f in fields], - ) - if verbose - else ([f.name for f in fields],) - ) + return fields -def build_response( - qs: Union[QuerySet[object], QuerySet[Dict[Any, Any]], QuerySet[List[Any]]], +def get_fields( + fields: List[object], + header: bool, + verbose: bool, +) -> Tuple[List[str], Optional[List[str]]]: + """Gets header then converts fields to a list of strings.""" + if header and verbose: + headers = [f.verbose_name for f in fields] + elif header: + headers = [f.name for f in fields] + else: + headers = None + + return [f.name for f in fields], headers + + +def qs_to_csv_core( + qs: Union[ + QuerySet[object], + QuerySet[Dict[Any, Any]], + QuerySet[List[Any]], + ], filename: str, - only: List[str], - defer: List[str], + only: List[Optional[str]], + defer: List[Optional[str]], +) -> Tuple[HttpResponse, List[object]]: + """Core functionality of all package functions.""" + fields = set_fields(qs.model._meta.local_fields, only, defer) + return create_response(filename), fields + + +def qs_to_values( + qs: Union[ + QuerySet[object], + QuerySet[Dict[Any, Any]], + QuerySet[List[Any]], + ], + filename: str, + only: List[Optional[str]], + defer: List[Optional[str]], header: bool, verbose: bool, values: bool, - pd: bool, + pd: bool = False, ) -> Tuple[ - HttpResponse, QuerySet[Dict[Any, Any]], + HttpResponse, List[str], - Optional[List[object]], + Optional[List[str]], ]: - # Check for errors - error_handler(qs, values, filename) - - # Specify the fields for values() and the header row - fields = get_fields(qs.model._meta.local_fields, only, defer, verbose) - head = [] if not header else fields[-1] - fields = fields[0] + """Converts QuerySet to a list of dicts using values().""" + response, fields = qs_to_csv_core(qs, filename, only, defer) + fields, headers = get_fields(fields, header, verbose) - # Convert QuerySet to list of dicts if not values: qs = qs.values(*fields) + elif not issubclass(qs[0].__class__, dict): + raise TypeError( + "``values=True`` only works with a QuerySet that utilizes" + " .values(). QuerySets for model objects or values_list()" + " are not compatible." + ) - # Check if the filename already includes the correct file type - if filename[-4:] != ".csv": - filename += ".csv" - - # Create the response - response = HttpResponse( - headers={ - "Content-Type": "text/csv", - "Content-Disposition": f"attachment; filename={filename}", - }, + return ( + (qs, response, headers, fields) if not pd else (qs, response, headers) ) - return (response, qs, head, fields) if not pd else (response, qs, head) - def qs_to_csv( qs: Union[QuerySet[object], QuerySet[Dict[Any, Any]], QuerySet[List[Any]]], @@ -146,19 +154,19 @@ def qs_to_csv( ---------- qs The QuerySet that will be converted to a CSV file. - header : default=False - Add/remove a header row of field names in the response. filename : default="export.csv" The file name for the exported file. Does not need .csv suffix. only : default=[] List of specific fields to include in the response. defer : default=[] List of specific fields not to include in the response + header : default=False + Add/remove a header row of field names in the response. Returns ------- HttpResponse - Includes the Content-Type and Content-Disposition headers. + Includes the "Content-Type" and "Content-Disposition" headers. Other Parameters ---------- @@ -167,23 +175,17 @@ def qs_to_csv( values : default=False Use the QuerySet as-is, must use values(). See ``Notes``. + See Also + -------- + qs_to_csv_pd() + qs_to_csv_rel_str() Raises ------ ValueError If `filename` is not formatted correctly. TypeError - If `values=True` and the QuerySet did not call values(). - - Warns - ----- - ResourceWarning - If the QuerySet will be evaluated more than once. - - See Also - -------- - error_handler : Checks for errors/warnings in this function. - get_fields : Determines which fields to include in the response. + If ``values=True`` and the QuerySet did not call values(). Notes ----- @@ -192,10 +194,10 @@ def qs_to_csv( ManyToManyField is not supported. - `headers` includes Content-Type and Content-Disposition. To add - headers, set the new header as an index key and assign a value to - it, the same as a dictionary. These headers can also be deleted - (not recommended). + The returned HttpResponse includes headers Content-Type and + Content-Disposition. To add headers, set the new header as an index + key and assign a value to it, the same as a dictionary. These + headers can also be deleted (not recommended). If the QuerySet was already evaluated before being passed to the function then it will be re-evaluated. Depending on the size of the @@ -204,16 +206,17 @@ def qs_to_csv( django.db.connection.queries or django-debug-toolbar during development. If the QuerySet must be evaluated before the function is called, it would be most efficient to use values() with the - QuerySet (if possible) then pass `values=True`. + QuerySet (if possible) then pass ``values=True``. If your QuerySet uses only() / defer() then you must include those same fields in the `only` / `defer` parameters when calling the function. The function transforms all QuerySets into a list of dicts w/ values(), which is incompatible with only() and defer(). - """ + This also applies if you specify fields in values() or values_list(). - response, qs, headers, fields = build_response( - qs, filename, only, defer, header, verbose, values, False + """ + qs, response, headers, fields = qs_to_values( + qs, filename, only, defer, header, verbose, values ) # Build csv file @@ -238,8 +241,7 @@ def qs_to_csv_pd( verbose: bool = True, values: bool = False, ) -> HttpResponse: - """ - This is a copy of qs_to_csv() that uses pandas. + """This is a copy of qs_to_csv() that uses the pandas library. This function is identical to qs_to_csv() except that it uses pandas.DataFrame().to_csv() instead of csv.DictWriter(). @@ -251,9 +253,10 @@ def qs_to_csv_pd( See Also -------- qs_to_csv + """ - response, qs, headers = build_response( - qs, filename, only, defer, header, verbose, values, True + qs, response, headers = qs_to_values( + qs, filename, only, defer, header, verbose, values, pd=True ) # Build csv file @@ -269,3 +272,116 @@ def qs_to_csv_pd( DataFrame(qs).to_csv(response, header=header, index=False) return response + + +def qs_to_csv_rel_str( + qs: Union[QuerySet[object], QuerySet[Dict[Any, Any]], QuerySet[List[Any]]], + filename: str = "export.csv", + only: List[str] = [], + defer: List[str] = [], + header: bool = False, + verbose: bool = True, + values: bool = False, +) -> HttpResponse: + """This is a copy of qs_to_csv() that prints __string__ for related + fields ForeignKey and OneToOneField instead of their primary keys. + + This function should not be used if the model has neither a + ForeignKey nor OneToOneField. ManyToManyField is not supported. + + Other Parameters + ---------- + values : default=False + Set as True if the QuerySet is passed after calling values() or + values_list(). This will convert the QuerySet back to a list of + model objects, instead of a list of dicts/lists. See ``Notes`` + for an important warning about performance. + + See Also + -------- + qs_to_csv + + Raises + ------ + ValueError + If the values()/values_list() QuerySet is too large to convert. + TypeError + If values=False and the QuerySet called values()/values_list(). + + Notes + ----- + Passing ``values=True`` to this function will create a new query, + checking for primary keys (PKs) that are in a list of all PKs from + your original QuerySet. **This will add significant time if your + QuerySet is large and will potentially not work**, depending on the + size of your QuerySet and your database's capabilities. + + It is recommended to avoid this by not using values() or + values_list() when calling this function. Note: if you make this + change, ensure `values` is False. + + """ + response, fields = qs_to_csv_core(qs, filename, only, defer) + + if values: + # See `Notes` in docstring for critical performance warning + model = qs.model + pk = model._meta.pk.name + qs = list(qs.values()) + if qs: + related_fields = [ + f.name for f in fields if f.many_to_one or f.one_to_one + ] + fields, headers = get_fields(fields, header, verbose) + qs = ( + model.objects.select_related(*related_fields) + .only(*fields) + .filter(pk__in=(d[pk] for d in qs)) + ) + else: + fields, headers = get_fields(fields, header, verbose) + + # Build csv file + from csv import writer + from django.db import reset_queries + + csv_writer = writer(response) + if header: + csv_writer.writerow(headers) + + try: + for obj in qs: + row = [] + + for field in fields: + data = getattr(obj, field) + if callable(data): # pragma: no cover + data = data() + data = ( + str(data, encoding="utf-8") + if isinstance(data, bytes) + else str(data) + ) + row.append(data) + reset_queries() + + csv_writer.writerow(row) + except Exception as e: + msg = str(e) + if not values and "object has no attribute" in msg: + raise TypeError( + msg + + " - The QuerySet was passed with values() or values_list()" + " without specifying values=True." + ) + elif values and "too many SQL variables" in msg: # pragma: no cover + raise ValueError( + msg + " - The original QuerySet was too large to convert from" + " values()/values_list() to a QuerySet of model objects." + " This can be resolved by decreasing the size of your" + " QuerySet or by not calling values/values_list() before" + " calling this function then setting values=False." + ) + raise e # pragma: no cover + + return response diff --git a/qs2csv/tests/tests.py b/qs2csv/tests/tests.py index 2b3ed97..bcdc492 100644 --- a/qs2csv/tests/tests.py +++ b/qs2csv/tests/tests.py @@ -4,14 +4,14 @@ from django.test import TestCase from ..models import ForeignKeyModel, TestModel -from ..src.qs2csv import qs_to_csv, qs_to_csv_pd +from ..src.qs2csv import qs_to_csv, qs_to_csv_pd, qs_to_csv_rel_str class AllFunctionsTest(TestCase): - """Tests all package functions""" + """Tests all package functions.""" def setUp(self): - """Sets up test data""" + """Sets up test data.""" self.fkm = ForeignKeyModel.objects.create() self.afm = TestModel.objects.create( foreign_key=self.fkm, @@ -28,7 +28,7 @@ def test_model_str(self): self.assertEqual(str(self.afm), f"AF Model #{self.afm.pk}") def test_qs_to_csv(self): - """Tests a standard export with default parameters. + """Tests qs_to_csv() with default parameters. filename = "export.csv" only = [] @@ -49,7 +49,7 @@ def test_qs_to_csv(self): self.assertIn("1 day, 0:00:00", header_row) def test_qs_to_csv_pd(self): - """Tests a standard pandas export with default parameters.""" + """Tests qs_to_csv_pd() with default parameters.""" response = qs_to_csv_pd(self.qs) self.assertEqual(response.status_code, 200) self.assertIn("Content-Type", response.headers) @@ -63,15 +63,15 @@ def test_qs_to_csv_pd(self): # self.assertEqual(header_row, ",".join(self.fields)) - def test_error_handler(self): - """Tests error_handler() when ``values = True``.""" + def test_qs_to_values(self): + """Tests qs_to_values() when ``values = True``.""" with self.assertRaises(TypeError): qs_to_csv(self.qs, values=True) with self.assertRaises(TypeError): - qs_to_csv_pd(self.qs, values=True) + qs_to_csv_pd(self.qs.values_list(), values=True) def test_filename_format(self): - """Tests file formatting errors.""" + """Tests filename formatting errors.""" f1 = ("a") * 252 with self.assertRaises(ValueError): qs_to_csv(self.qs, filename=f1) @@ -96,19 +96,8 @@ def test_filename_format(self): header_row = body_rows.pop(0) self.assertIn("Big Auto", header_row) - def test_evaluated_warning(self): - """Tests pre-evaluated QuerySet warnings.""" - qs = self.qs.values() - len(self.qs) - with self.assertWarns(ResourceWarning): - qs_to_csv(self.qs) - - len(qs) - with self.assertWarns(ResourceWarning): - qs_to_csv_pd(qs) - def test_only_param(self): - """Tests a standard export with only and defer parameters.""" + """Tests qs_to_csv() with only and defer parameters.""" only = [ "char_field", "boolean_field", @@ -132,7 +121,7 @@ def test_only_param(self): self.assertEqual(len(body_rows), 1) def test_only_param_pd(self): - """Tests a pandas export with only and verbose parameters.""" + """Tests qs_to_csv_pd() with only param and verbose header.""" only = [ "char_field", "boolean_field", @@ -149,7 +138,7 @@ def test_only_param_pd(self): self.assertEqual(len(body_rows[0]), 4) def test_defer_param(self): - """Tests a pandas export with only and verbose parameters.""" + """Tests qs_to_csv() with defer parameter.""" defer = ["decimal_field", "generic_ip_field"] response = qs_to_csv(self.qs.values_list(), defer=defer) content = response.content.decode("utf-8") @@ -158,10 +147,64 @@ def test_defer_param(self): self.assertEqual(len(body_rows[-1]), 14) def test_defer_param_pd(self): - """Tests a pandas export with defer parameter.""" + """Tests qs_to_csv_pd() with defer parameter.""" defer = ["duration_field", "many_to_many_field", "date_field"] - response = qs_to_csv(self.qs.values(), defer=defer) + response = qs_to_csv_pd(self.qs.values(), defer=defer) content = response.content.decode("utf-8") cvs_reader = reader(StringIO(content)) body_rows = list(cvs_reader) self.assertEqual(len(body_rows[0]), 14) + + def test_qs_to_csv_rel_str(self): + """Tests qs_to_csv_rel_str() with multiple params.""" + only = [] + defer = ["char_field"] + response = qs_to_csv_rel_str( + self.qs, + header=True, + filename="tests.py", + only=only, + defer=defer, + verbose=False, + ) + self.assertEqual(response.status_code, 200) + content = response.content.decode("utf-8") + cvs_reader = reader(StringIO(content)) + body_rows = list(cvs_reader) + header_row = body_rows.pop(0) + self.assertIn("date_field", header_row) + self.assertNotIn("many_to_many_field", header_row) + self.assertIn("1 day, 0:00:00", body_rows[0]) + + def test_qs_to_csv_rel_str_val(self): + """Tests qs_to_csv_rel_str() with values = True.""" + response = qs_to_csv_rel_str(self.qs.values_list(), values=True) + self.assertEqual(response.status_code, 200) + content = response.content.decode("utf-8") + cvs_reader = reader(StringIO(content)) + body_rows = list(cvs_reader) + self.assertEqual(len(body_rows), 1) + self.assertEqual(len(body_rows[0]), 16) + + response = qs_to_csv_rel_str( + self.qs, + values=True, + only=["char_field"], + header=True, + verbose=False, + ) + self.assertEqual(response.status_code, 200) + content2 = response.content.decode("utf-8") + cvs_reader2 = reader(StringIO(content2)) + body_rows2 = list(cvs_reader2) + header_row2 = body_rows2.pop(0) + self.assertEqual(len(header_row2), 1) + self.assertEqual(header_row2[-1], "char_field") + self.assertEqual(len(body_rows2[-1]), 1) + + def test_qs_to_csv_rel_str_err(self): + """Tests qs_to_csv_rel_str() for raised errors.""" + with self.assertRaises(TypeError): + qs_to_csv_rel_str(self.qs.values()) + with self.assertRaises(TypeError): + qs_to_csv_rel_str(self.qs.values_list()) diff --git a/qs2csv/urls.py b/qs2csv/urls.py index 15b5fb8..a6312d4 100644 --- a/qs2csv/urls.py +++ b/qs2csv/urls.py @@ -20,4 +20,3 @@ urlpatterns = [ path("admin/", admin.site.urls), ] -# CI Test diff --git a/tox.ini b/tox.ini index 43b438f..57e6361 100644 --- a/tox.ini +++ b/tox.ini @@ -15,5 +15,4 @@ description = run linters skip_install = true deps = -r{toxinidir}/requirements.txt commands = - pip install --upgrade pip pre-commit run --all-files