Skip to content

Commit ea6c251

Browse files
committed
Trim line lenght in the docuemntation/examples for the new theme
- the new nvidia-theme reccomends the line lenght of the code snippets 80 characters, not 100 as used in DALI Signed-off-by: Janusz Lisiecki <jlisiecki@nvidia.com>
1 parent 708af60 commit ea6c251

File tree

132 files changed

+17951
-1424
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

132 files changed

+17951
-1424
lines changed

.github/workflows/lint.yml

+4-2
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,8 @@ on:
77
# TODO(klecki): Deduplicate this list of directories with `lint.cmake` file
88
env:
99
PYTHON_SECURITY_LINT_PATHS: "./tools ./dali/python ./dali_tf_plugin"
10-
PYTHON_LINT_PATHS: "./dali ./docs ./internal_tools ./qa"
10+
PYTHON_LINT_DOCS_PATHS: "./docs"
11+
PYTHON_LINT_PATHS: "./dali ./internal_tools ./qa"
1112
AUTOGRAPH_LINT_PATHS: "./dali/python/nvidia/dali/_autograph ./dali/test/python/autograph/"
1213

1314
jobs:
@@ -20,7 +21,8 @@ jobs:
2021
python-version: '3.10'
2122
- run: pip install flake8 bandit "black[jupyter]"==24.4.2
2223
- run: black --check --verbose ${{ env.PYTHON_SECURITY_LINT_PATHS }} ${{ env.PYTHON_LINT_PATHS }} ${{ env.AUTOGRAPH_LINT_PATHS }}
23-
- run: flake8 --config=.flake8 ${{ env.PYTHON_SECURITY_LINT_PATHS }} ${{ env.PYTHON_LINT_PATHS }}
24+
- run: black --check --verbose ${{ env.PYTHON_LINT_DOCS_PATHS }}
25+
- run: flake8 --config=.flake8 ${{ env.PYTHON_SECURITY_LINT_PATHS }} ${{ env.PYTHON_LINT_PATHS }} ${{ env.PYTHON_LINT_DOCS_PATHS }}
2426
- run: flake8 --config=.flake8.ag ${{ env.AUTOGRAPH_LINT_PATHS }}
2527
- run: bandit --config bandit.yml -r ${{ env.PYTHON_SECURITY_LINT_PATHS }}
2628
cpp:

cmake/lint.cmake

+7-3
Original file line numberDiff line numberDiff line change
@@ -25,10 +25,12 @@ set(PYTHON_SECURITY_LINT_PATHS
2525
${PROJECT_SOURCE_DIR}/dali_tf_plugin
2626
)
2727

28+
set (PYTHON_LINT_DOCS_PATHS
29+
${PROJECT_SOURCE_DIR}/docs
30+
)
2831
set(PYTHON_LINT_PATHS
2932
${PYTHON_SECURITY_LINT_PATHS}
3033
${PROJECT_SOURCE_DIR}/dali
31-
${PROJECT_SOURCE_DIR}/docs
3234
${PROJECT_SOURCE_DIR}/qa
3335
${PROJECT_SOURCE_DIR}/internal_tools
3436
)
@@ -40,7 +42,9 @@ set(AUTOGRAPH_LINT_PATHS
4042

4143
add_custom_target(lint-python-black
4244
COMMAND
43-
black --check --config ${PROJECT_SOURCE_DIR}/pyproject.toml ${PYTHON_LINT_PATHS} ${AUTOGRAPH_LINT_PATHS}
45+
black --check ${PYTHON_LINT_PATHS} ${AUTOGRAPH_LINT_PATHS}
46+
COMMAND
47+
black --check ${PYTHON_LINT_DOCS_PATHS}
4448
COMMENT
4549
"Performing black Python formatting check"
4650
)
@@ -55,7 +59,7 @@ add_custom_target(lint-python-bandit
5559

5660
add_custom_target(lint-python-flake
5761
COMMAND
58-
flake8 --config=${PROJECT_SOURCE_DIR}/.flake8 ${PYTHON_LINT_PATHS}
62+
flake8 --config=${PROJECT_SOURCE_DIR}/.flake8 ${PYTHON_LINT_PATHS} ${PYTHON_LINT_DOCS_PATHS} ${PYTHON_LINT_DOCS_PATHS}
5963
COMMAND
6064
flake8 --config=${PROJECT_SOURCE_DIR}/.flake8.ag ${AUTOGRAPH_LINT_PATHS}
6165
COMMENT

dali/operators/decoder/inflate/inflate.cc

+4-4
Original file line numberDiff line numberDiff line change
@@ -40,11 +40,11 @@ concatenating compressed frames from the corresponding sequences.::
4040
4141
@pipeline_def
4242
def inflate_sequence_pipeline():
43-
compressed_seq, uncompressed_hwc_shape, compressed_chunk_sizes = fn.external_source(...)
43+
compres_seq, uncompres_hwc_shape, compres_chunk_sizes = fn.external_source(...)
4444
sequences = fn.experimental.inflate(
45-
compressed_seq.gpu(),
46-
chunk_sizes=compressed_chunk_sizes, # refers to sizes in ``compressed_seq``
47-
shape=uncompressed_hwc_shape,
45+
compres_seq.gpu(),
46+
chunk_sizes=compres_chunk_sizes, # refers to sizes in ``compres_seq``
47+
shape=uncompres_hwc_shape,
4848
layout="HWC",
4949
sequence_axis_name="F")
5050
return sequences

dali/operators/generic/erase/erase.cc

+10-4
Original file line numberDiff line numberDiff line change
@@ -77,8 +77,11 @@ A fill value is provided for all the channels. The coordinates can be transforme
7777
multiplying by the input shape.
7878
What gives::
7979
80-
output[y, x, c] = 100 if 0.15 * 300 <= x < (0.3 + 0.15) * 300 and 0.15 * 300 <= y < (0.3 + 0.15) * 300
81-
output[y, x, c] = input[y, x, c] otherwise
80+
if 0.15 * 300 <= x < (0.3 + 0.15) * 300 and
81+
0.15 * 300 <= y < (0.3 + 0.15) * 300:
82+
output[y, x, c] = 100
83+
else:
84+
output[y, x, c] = input[y, x, c] otherwise
8285
8386
**Example 4:**
8487
``anchor`` = (0.15, 0.15), ``shape`` = (20, 30), ``normalized_anchor`` = True, ``normalized_shape`` = False
@@ -90,8 +93,11 @@ coordinates. Since no axis_names is provided, the anchor and shape must contain
9093
except "C" (channels).
9194
What gives::
9295
93-
output[y, x, c] = 0 if 0.15 * 300 <= x < (0.15 * 300) + 20 and (0.15 * 300) <= y < (0.15 * 300) + 30
94-
output[y, x, c] = input[y, x, c] otherwise
96+
if 0.15 * 300 <= x < (0.15 * 300) + 20 and
97+
(0.15 * 300) <= y < (0.15 * 300) + 30:
98+
output[y, x, c] = 0
99+
else:
100+
output[y, x, c] = input[y, x, c] otherwise
95101
)code")
96102
.NumInput(1)
97103
.NumOutput(1)

dali/operators/image/color/brightness_contrast.cc

+2-1
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,8 @@ DALI_SCHEMA(BrightnessContrast)
7979
8080
The brightness and contrast are adjusted based on the following formula::
8181
82-
out = brightness_shift * output_range + brightness * (contrast_center + contrast * (in - contrast_center))
82+
out = brightness_shift * output_range + brightness *
83+
(contrast_center + contrast * (in - contrast_center))
8384
8485
Where the output_range is 1 for float outputs or the maximum positive value for integral types.
8586

dali/operators/input/video_input_cpu.cc

+9-5
Original file line numberDiff line numberDiff line change
@@ -60,21 +60,25 @@ will be partial and the last sequence in this batch will be determined using
6060
-------------------------------------------------------------------
6161
6262
63-
User decided that there shall be 5 frames per sequence and the last_sequence_policy='partial':
63+
User decided that there shall be 5 frames per sequence and
64+
the last_sequence_policy='partial':
6465
-------------------------------------------------------------------
6566
[ ][ ][ ][ ][ ][ ][ ][ ][ ][ ][ ][ ][ ][]
6667
-------------------------------------------------------------------
67-
Since there are not enough frames, the last sequence comprises 2 frames.
68+
Since there are not enough frames, the last sequence comprises 2 frames.
6869
6970
70-
The Pipeline has max_batch_size=3, therefore the operator will return 5 batches of sequences.
71-
First 4 batches comprise 3 sequences and the last batch is partial and comprises 2 sequences.
71+
The Pipeline has max_batch_size=3, therefore the operator will return
72+
5 batches of sequences.
73+
First 4 batches comprise 3 sequences and the last batch is partial and
74+
comprises 2 sequences.
7275
--------------- --------------- --------------- --------------- -------
7376
[ ][ ][ ] [ ][ ][ ] [ ][ ][ ] [ ][ ][ ] [ ][]
7477
--------------- --------------- --------------- --------------- -------
7578
7679
77-
With the last_sequence_policy='pad', the last sequence of the last batch will be padded with 0:
80+
With the last_sequence_policy='pad', the last sequence of the last batch
81+
will be padded with 0:
7882
--------------- --------------- --------------- --------------- -------000
7983
[ ][ ][ ] [ ][ ][ ] [ ][ ][ ] [ ][ ][ ] [ ][ ]
8084
--------------- --------------- --------------- --------------- -------000

dali/operators/reader/coco_reader_op.cc

+2-1
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,8 @@ images and annotation JSON files.
4141
4242
This readers produces the following outputs::
4343
44-
images, bounding_boxes, labels, ((polygons, vertices) | (pixelwise_masks)), (image_ids)
44+
images, bounding_boxes, labels, ((polygons, vertices) | (pixelwise_masks)),
45+
(image_ids)
4546
4647
* **images**
4748
Each sample contains image data with layout ``HWC`` (height, width, channels).

dali/operators/reader/nemo_asr_reader_op.cc

+16-3
Original file line numberDiff line numberDiff line change
@@ -38,9 +38,22 @@ NVIDIA NeMo compatible manifest.
3838
3939
Example manifest file::
4040
41-
{"audio_filepath": "path/to/audio1.wav", "duration": 3.45, "text": "this is a nemo tutorial"}
42-
{"audio_filepath": "path/to/audio1.wav", "offset": 3.45, "duration": 1.45, "text": "same audio file but using offset"}
43-
{"audio_filepath": "path/to/audio2.wav", "duration": 3.45, "text": "third transcript in this example"}
41+
{
42+
"audio_filepath": "path/to/audio1.wav",
43+
"duration": 3.45,
44+
"text": "this is a nemo tutorial"
45+
}
46+
{
47+
"audio_filepath": "path/to/audio1.wav",
48+
"offset": 3.45,
49+
"duration": 1.45,
50+
"text": "same audio file but using offset"
51+
}
52+
{
53+
"audio_filepath": "path/to/audio2.wav",
54+
"duration": 3.45,
55+
"text": "third transcript in this example"
56+
}
4457
4558
.. note::
4659
Only ``audio_filepath`` is field mandatory. If ``duration`` is not specified, the whole audio file will be used. A missing ``text`` field

dali/operators/segmentation/select_masks.cc

+2-1
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,8 @@ masks are present.
4040
Let us assume the following input mask, where symbolic coordinates are used for a clearer example::
4141
4242
polygons = [[0, 0, 3], [1, 3, 7], [2, 7, 10]]
43-
vertices = [[x0, y0], [x1, y1], [x2, y2], [x3, y3], [x4, y4], [x5, y5], [x6, y6], [x7, y7], [x8, y8], [x9, y9]]
43+
vertices = [[x0, y0], [x1, y1], [x2, y2], [x3, y3], [x4, y4], [x5, y5],
44+
[x6, y6], [x7, y7], [x8, y8], [x9, y9]]
4445
4546
Example 1: Selecting a single mask with id ``1``, maintaining the original id::
4647

dali/python/nvidia/dali/plugin/mxnet.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -481,7 +481,8 @@ class DALIClassificationIterator(DALIGenericIterator):
481481
482482
.. code-block:: python
483483
484-
DALIClassificationIterator(pipelines, reader_name, data_name, label_name, data_layout)
484+
DALIClassificationIterator(pipelines, reader_name, data_name, label_name,
485+
data_layout)
485486
486487
is equivalent to calling
487488

docs/advanced_topics_sharding.rst

+4-2
Original file line numberDiff line numberDiff line change
@@ -62,12 +62,14 @@ Shard calculation
6262

6363
Here is the formula to calculate the shard size for a shard ID::
6464

65-
floor((id + 1) * dataset_size / num_shards) - floor(id * dataset_size / num_shards)
65+
floor((id + 1) * dataset_size / num_shards) -
66+
floor(id * dataset_size / num_shards)
6667

6768
When the pipeline advances through the epochs and the reader moves to the next shard, the formula
6869
needs to be extended to reflect this change::
6970

70-
floor(((id + epoch_num) % num_shards + 1) * dataset_size / num_shards) - floor(((id + epoch_num) % num_shards) * dataset_size / num_shards)
71+
floor(((id + epoch_num) % num_shards + 1) * dataset_size / num_shards) -
72+
floor(((id + epoch_num) % num_shards) * dataset_size / num_shards)
7173

7274
When the second formula is used, providing a size value once at the beginning of the training works
7375
only when the ``stick_to_shard`` reader option is enabled and prevents DALI from rotating shards.

docs/auto_aug/auto_augment.rst

+2-1
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,8 @@ The best way is to wrap your policy creation into a function::
6060

6161
def my_custom_policy() -> Policy:
6262
"""
63-
Creates a simple AutoAugment policy with 3 sub-policies using custom magnitude ranges.
63+
Creates a simple AutoAugment policy with 3 sub-policies using custom
64+
magnitude ranges.
6465
"""
6566

6667
shear_x = augmentations.shear_x.augmentation((0, 0.5), True)

docs/autodoc_submodules.py

+21-7
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,8 @@
4545
exclude_fn_members = {}
4646

4747
installation_page_url = (
48-
"https://docs.nvidia.com/deeplearning/dali/user-guide/docs/installation.html"
48+
"https://docs.nvidia.com/deeplearning/dali/user-guide/"
49+
"docs/installation.html"
4950
)
5051

5152
mod_aditional_doc = {
@@ -87,11 +88,15 @@ def get_functions(module):
8788
or hidden members. No nested modules would be reported."""
8889
result = []
8990
# Take all public members of given module
90-
public_members = list(filter(lambda x: not str(x).startswith("_"), dir(module)))
91+
public_members = list(
92+
filter(lambda x: not str(x).startswith("_"), dir(module))
93+
)
9194
for member_name in public_members:
9295
member = getattr(module, member_name)
9396
# Just user-defined functions
94-
if inspect.isfunction(member) and not member.__module__.endswith("hidden"):
97+
if inspect.isfunction(member) and not member.__module__.endswith(
98+
"hidden"
99+
):
95100
result.append(member_name)
96101
return result
97102

@@ -157,7 +162,9 @@ def single_module_file(module, funs_in_module, references):
157162
result += "\n"
158163

159164
result += f"The following table lists all operations available in ``{module}`` module:\n"
160-
result += operations_table.operations_table_str(get_schema_names(module, funs_in_module))
165+
result += operations_table.operations_table_str(
166+
get_schema_names(module, funs_in_module)
167+
)
161168
result += "\n\n"
162169

163170
result += ".. toctree::\n :hidden:\n\n"
@@ -185,15 +192,22 @@ def fn_autodoc(out_filename, generated_path, references):
185192
# the rest is within the same directory, so there is no need for that
186193
all_modules_str += f" {generated_path / module}\n"
187194

188-
single_module_str = single_module_file(module, funs_in_module, references)
195+
single_module_str = single_module_file(
196+
module, funs_in_module, references
197+
)
189198
with open(generated_path / (module + ".rst"), "w") as module_file:
190199
module_file.write(single_module_str)
191200

192201
for fun in funs_in_module:
193202
full_name = f"{module}.{fun}"
194-
if module in exclude_fn_members and fun in exclude_fn_members[module]:
203+
if (
204+
module in exclude_fn_members
205+
and fun in exclude_fn_members[module]
206+
):
195207
continue
196-
with open(generated_path / (full_name + ".rst"), "w") as function_file:
208+
with open(
209+
generated_path / (full_name + ".rst"), "w"
210+
) as function_file:
197211
single_file_str = single_fun_file(full_name, references)
198212
function_file.write(single_file_str)
199213

0 commit comments

Comments
 (0)