From 858733b31aea3c158564c6b92487f7faa97372c1 Mon Sep 17 00:00:00 2001 From: Beat Buesser Date: Fri, 2 Aug 2024 12:39:14 +0200 Subject: [PATCH 01/27] Update pytest* versions Signed-off-by: Beat Buesser --- requirements_test.txt | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/requirements_test.txt b/requirements_test.txt index 6dc21179c1..cd77d65f21 100644 --- a/requirements_test.txt +++ b/requirements_test.txt @@ -55,11 +55,11 @@ jax[cpu]==0.4.26 # lingvo==0.6.4 # other -pytest~=7.4.3 -pytest-flake8~=1.1.1 -flake8~=4.0.1 +pytest~=8.3.2 +pytest-flake8~=1.2.2 +flake8~=7.1.0 pytest-mock~=3.14.0 -pytest-cov~=4.1.0 +pytest-cov~=5.0.0 requests~=2.31.0 ultralytics==8.0.217 ipython==8.25.0 From 8690ffed417acf46e4e8d96448dbfaef54b1cc28 Mon Sep 17 00:00:00 2001 From: Beat Buesser Date: Fri, 2 Aug 2024 14:03:16 +0200 Subject: [PATCH 02/27] Replace flake8 with ruff Signed-off-by: Beat Buesser --- .github/workflows/ci-style-checks.yml | 11 ++++++----- pyproject.toml | 2 ++ requirements_test.txt | 2 -- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci-style-checks.yml b/.github/workflows/ci-style-checks.yml index 548403342c..b3227e3929 100644 --- a/.github/workflows/ci-style-checks.yml +++ b/.github/workflows/ci-style-checks.yml @@ -39,16 +39,14 @@ jobs: - name: Install Dependencies run: | python -m pip install --upgrade pip setuptools wheel - pip install -q pylint==2.12.2 mypy==1.7.1 pycodestyle==2.8.0 black==21.12b0 + pip install -q pylint==2.12.2 mypy==1.7.1 pycodestyle==2.8.0 black==21.12b0 ruff==0.5.5 pip install -q -r <(sed '/^numpy/d;/^pluggy/d;/^tensorflow/d;/^keras/d' requirements_test.txt) pip install numpy==1.22.4 - pip install pluggy==0.13.1 pip install tensorflow==2.13.1 pip install keras==2.13.1 pip install types-six pip install types-PyYAML pip install types-setuptools - pip install click==8.0.2 pip list - name: pycodestyle run: pycodestyle --ignore=C0330,C0415,E203,E231,W503 --max-line-length=120 art @@ -58,9 +56,12 @@ jobs: - name: mypy if: ${{ always() }} run: mypy art - - name: pytest-flake8 + - name: ruff if: ${{ always() }} - run: pytest --flake8 -v -m flake8 --ignore=contrib + run: | + ruff check art/ + ruff check tests/ + ruff check examples/ - name: black if: ${{ always() }} run: | diff --git a/pyproject.toml b/pyproject.toml index f3a8b00b1b..fbbc9f0d02 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,2 +1,4 @@ [tool.black] line-length=120 +[tool.ruff] +exclude = [".venv", "contrib"] diff --git a/requirements_test.txt b/requirements_test.txt index cd77d65f21..e9e2491c3c 100644 --- a/requirements_test.txt +++ b/requirements_test.txt @@ -56,8 +56,6 @@ jax[cpu]==0.4.26 # other pytest~=8.3.2 -pytest-flake8~=1.2.2 -flake8~=7.1.0 pytest-mock~=3.14.0 pytest-cov~=5.0.0 requests~=2.31.0 From 6b410ce84844a932b65b756d96f65307c3573356 Mon Sep 17 00:00:00 2001 From: Beat Buesser Date: Fri, 2 Aug 2024 14:21:37 +0200 Subject: [PATCH 03/27] Update mypy, black, pycodestyle, and pylint Signed-off-by: Beat Buesser --- .github/workflows/ci-style-checks.yml | 7 +- art/__init__.py | 1 + art/attacks/__init__.py | 1 + art/attacks/evasion/__init__.py | 1 + .../adversarial_patch_numpy.py | 2 +- .../adversarial_patch_pytorch.py | 2 +- .../adversarial_patch_tensorflow.py | 2 +- art/attacks/evasion/boundary.py | 2 +- art/attacks/evasion/brendel_bethge.py | 86 +++++++++---------- art/attacks/evasion/hop_skip_jump.py | 2 +- .../imperceptible_asr_pytorch.py | 8 +- art/attacks/evasion/lowprofool.py | 2 +- art/attacks/evasion/newtonfool.py | 2 +- .../over_the_air_flickering_pytorch.py | 6 +- art/attacks/evasion/pe_malware_attack.py | 5 +- art/attacks/evasion/simba.py | 6 +- art/attacks/evasion/square_attack.py | 30 +++---- art/attacks/evasion/wasserstein.py | 6 +- art/attacks/evasion/zoo.py | 2 +- art/attacks/extraction/__init__.py | 1 + art/attacks/inference/__init__.py | 1 + .../inference/attribute_inference/__init__.py | 1 + .../inference/attribute_inference/baseline.py | 2 +- .../attribute_inference/black_box.py | 2 +- .../true_label_baseline.py | 2 +- .../membership_inference/__init__.py | 1 + .../membership_inference/black_box.py | 4 +- .../inference/model_inversion/__init__.py | 1 + .../inference/reconstruction/__init__.py | 1 + art/attacks/poisoning/__init__.py | 1 + .../backdoor_attack_dgm_trail.py | 2 +- .../hidden_trigger_backdoor_keras.py | 2 +- .../hidden_trigger_backdoor_pytorch.py | 2 +- .../poisoning/perturbations/__init__.py | 1 + art/defences/__init__.py | 1 + art/defences/detector/__init__.py | 1 + art/defences/detector/evasion/__init__.py | 1 + .../evasion/subsetscanning/scanningops.py | 12 ++- art/defences/detector/poison/__init__.py | 1 + .../detector/poison/activation_defence.py | 12 ++- art/defences/postprocessor/__init__.py | 1 + art/defences/preprocessor/__init__.py | 1 + .../preprocessor/feature_squeezing.py | 2 +- art/defences/preprocessor/mp3_compression.py | 4 +- art/defences/trainer/__init__.py | 1 + .../adversarial_trainer_oaat_pytorch.py | 24 ++---- art/defences/transformer/__init__.py | 1 + art/defences/transformer/evasion/__init__.py | 1 + .../transformer/poisoning/__init__.py | 1 + art/estimators/__init__.py | 1 + art/estimators/certification/__init__.py | 1 + .../certification/deep_z/__init__.py | 1 + .../derandomized_smoothing/__init__.py | 1 + .../ablators/__init__.py | 1 + .../certification/interval/__init__.py | 1 + .../certification/object_seeker/__init__.py | 1 + .../object_seeker/object_seeker.py | 2 +- .../randomized_smoothing/__init__.py | 1 + art/estimators/classification/__init__.py | 1 + art/estimators/classification/keras.py | 4 +- art/estimators/encoding/__init__.py | 1 + art/estimators/gan/__init__.py | 1 + art/estimators/generation/__init__.py | 1 + art/estimators/object_detection/__init__.py | 1 + .../tensorflow_faster_rcnn.py | 18 ++-- art/estimators/object_tracking/__init__.py | 1 + .../object_tracking/pytorch_goturn.py | 4 +- art/estimators/poison_mitigation/__init__.py | 1 + .../neural_cleanse/__init__.py | 1 + .../neural_cleanse/neural_cleanse.py | 2 +- .../poison_mitigation/strip/__init__.py | 1 + art/estimators/regression/__init__.py | 1 + art/estimators/speech_recognition/__init__.py | 1 + art/evaluations/security_curve/__init__.py | 1 + art/experimental/__init__.py | 1 + art/experimental/estimators/__init__.py | 1 + .../estimators/classification/__init__.py | 1 + art/metrics/__init__.py | 1 + art/metrics/privacy/__init__.py | 1 + art/optimizers.py | 6 +- art/preprocessing/__init__.py | 1 + art/preprocessing/audio/__init__.py | 1 + .../__init__.py | 1 + art/preprocessing/image/__init__.py | 1 + .../standardisation_mean_std/__init__.py | 1 + art/summary_writer.py | 2 +- art/utils.py | 2 +- art/visualization.py | 2 +- examples/adversarial_training_FBF.py | 1 + .../adversarial_training_data_augmentation.py | 1 + examples/backdoor_attack_dgm_red.py | 1 + examples/backdoor_attack_dgm_trail.py | 1 + examples/get_started_keras.py | 1 + examples/get_started_lightgbm.py | 1 + examples/get_started_mxnet.py | 1 + examples/get_started_pytorch.py | 1 + examples/get_started_scikit_learn.py | 1 + examples/get_started_tensorflow.py | 1 + examples/get_started_tensorflow_v2.py | 1 + examples/get_started_xgboost.py | 1 + examples/inverse_gan_author_utils.py | 2 +- pyproject.toml | 1 + requirements_test.txt | 12 ++- tests/attacks/evasion/test_lowprofool.py | 2 +- .../attacks/evasion/test_pe_malware_attack.py | 22 ++--- .../classification/test_scikitlearn.py | 16 +--- tests/test_utils.py | 2 +- 107 files changed, 237 insertions(+), 165 deletions(-) diff --git a/.github/workflows/ci-style-checks.yml b/.github/workflows/ci-style-checks.yml index b3227e3929..8119eaa915 100644 --- a/.github/workflows/ci-style-checks.yml +++ b/.github/workflows/ci-style-checks.yml @@ -39,14 +39,11 @@ jobs: - name: Install Dependencies run: | python -m pip install --upgrade pip setuptools wheel - pip install -q pylint==2.12.2 mypy==1.7.1 pycodestyle==2.8.0 black==21.12b0 ruff==0.5.5 - pip install -q -r <(sed '/^numpy/d;/^pluggy/d;/^tensorflow/d;/^keras/d' requirements_test.txt) + pip install -q pylint==3.2.6 mypy==1.11.1 pycodestyle==2.12.0 black==24.4.2 ruff==0.5.5 + pip install -q -r <(sed '/^numpy/d;/^tensorflow/d;/^keras/d' requirements_test.txt) pip install numpy==1.22.4 pip install tensorflow==2.13.1 pip install keras==2.13.1 - pip install types-six - pip install types-PyYAML - pip install types-setuptools pip list - name: pycodestyle run: pycodestyle --ignore=C0330,C0415,E203,E231,W503 --max-line-length=120 art diff --git a/art/__init__.py b/art/__init__.py index 33090f7666..497fff6eff 100644 --- a/art/__init__.py +++ b/art/__init__.py @@ -1,6 +1,7 @@ """ The Adversarial Robustness Toolbox (ART). """ + import logging.config # Project Imports diff --git a/art/attacks/__init__.py b/art/attacks/__init__.py index 35aab43da8..cbb23778c4 100644 --- a/art/attacks/__init__.py +++ b/art/attacks/__init__.py @@ -1,6 +1,7 @@ """ Module providing adversarial attacks under a common interface. """ + from art.attacks.attack import Attack, EvasionAttack, PoisoningAttack, PoisoningAttackBlackBox, PoisoningAttackWhiteBox from art.attacks.attack import PoisoningAttackGenerator, PoisoningAttackTransformer, PoisoningAttackObjectDetector from art.attacks.attack import ExtractionAttack, InferenceAttack, AttributeInferenceAttack diff --git a/art/attacks/evasion/__init__.py b/art/attacks/evasion/__init__.py index 35c9e61df4..63e62c3e56 100644 --- a/art/attacks/evasion/__init__.py +++ b/art/attacks/evasion/__init__.py @@ -1,6 +1,7 @@ """ Module providing evasion attacks under a common interface. """ + # pylint: disable=C0413 import importlib diff --git a/art/attacks/evasion/adversarial_patch/adversarial_patch_numpy.py b/art/attacks/evasion/adversarial_patch/adversarial_patch_numpy.py index f6af29d8ce..0346df9db4 100644 --- a/art/attacks/evasion/adversarial_patch/adversarial_patch_numpy.py +++ b/art/attacks/evasion/adversarial_patch/adversarial_patch_numpy.py @@ -325,7 +325,7 @@ def _get_circular_patch_mask(self, sharpness: int = 40) -> np.ndarray: x = np.linspace(-1, 1, diameter) y = np.linspace(-1, 1, diameter) x_grid, y_grid = np.meshgrid(x, y, sparse=True) - z_grid = (x_grid ** 2 + y_grid ** 2) ** sharpness + z_grid = (x_grid**2 + y_grid**2) ** sharpness mask = 1 - np.clip(z_grid, -1, 1) diff --git a/art/attacks/evasion/adversarial_patch/adversarial_patch_pytorch.py b/art/attacks/evasion/adversarial_patch/adversarial_patch_pytorch.py index a9a3f0bf6c..c9aa9bc1b5 100644 --- a/art/attacks/evasion/adversarial_patch/adversarial_patch_pytorch.py +++ b/art/attacks/evasion/adversarial_patch/adversarial_patch_pytorch.py @@ -269,7 +269,7 @@ def _get_circular_patch_mask(self, nb_samples: int, sharpness: int = 40) -> "tor x = np.linspace(-1, 1, diameter) y = np.linspace(-1, 1, diameter) x_grid, y_grid = np.meshgrid(x, y, sparse=True) - z_grid = (x_grid ** 2 + y_grid ** 2) ** sharpness + z_grid = (x_grid**2 + y_grid**2) ** sharpness image_mask: Union[int, np.ndarray[Any, np.dtype[Any]]] = 1 - np.clip(z_grid, -1, 1) elif self.patch_type == "square": image_mask = np.ones((diameter, diameter)) diff --git a/art/attacks/evasion/adversarial_patch/adversarial_patch_tensorflow.py b/art/attacks/evasion/adversarial_patch/adversarial_patch_tensorflow.py index ab71a74f47..3a613d46ef 100644 --- a/art/attacks/evasion/adversarial_patch/adversarial_patch_tensorflow.py +++ b/art/attacks/evasion/adversarial_patch/adversarial_patch_tensorflow.py @@ -240,7 +240,7 @@ def _get_circular_patch_mask(self, nb_samples: int, sharpness: int = 40) -> "tf. x = np.linspace(-1, 1, diameter) y = np.linspace(-1, 1, diameter) x_grid, y_grid = np.meshgrid(x, y, sparse=True) - z_grid = (x_grid ** 2 + y_grid ** 2) ** sharpness + z_grid = (x_grid**2 + y_grid**2) ** sharpness image_mask = 1 - np.clip(z_grid, -1, 1) image_mask = np.expand_dims(image_mask, axis=2) diff --git a/art/attacks/evasion/boundary.py b/art/attacks/evasion/boundary.py index 2738747ba4..d0416ec3f0 100644 --- a/art/attacks/evasion/boundary.py +++ b/art/attacks/evasion/boundary.py @@ -360,7 +360,7 @@ def _orthogonal_perturb(self, delta: float, current_sample: np.ndarray, original perturb_flat -= np.dot(perturb_flat, direction_flat.T) * direction_flat perturb = perturb_flat.reshape(self.estimator.input_shape) - hypotenuse = np.sqrt(1 + delta ** 2) + hypotenuse = np.sqrt(1 + delta**2) perturb = ((1 - hypotenuse) * (current_sample - original_sample) + perturb) / hypotenuse return perturb diff --git a/art/attacks/evasion/brendel_bethge.py b/art/attacks/evasion/brendel_bethge.py index 16c795cac1..a19c218f60 100644 --- a/art/attacks/evasion/brendel_bethge.py +++ b/art/attacks/evasion/brendel_bethge.py @@ -607,8 +607,8 @@ def _cubicmin(self, a, fa, fpa, b, fb, c, fc): if (db == 0) or (dc == 0) or (b == c): return None denom = (db * dc) ** 2 * (db - dc) - A = dc ** 2 * (fb - fa - C * db) - db ** 2 * (fc - fa - C * dc) - B = -(dc ** 3) * (fb - fa - C * db) + db ** 3 * (fc - fa - C * dc) + A = dc**2 * (fb - fa - C * db) - db**2 * (fc - fa - C * dc) + B = -(dc**3) * (fb - fa - C * db) + db**3 * (fc - fa - C * dc) A /= denom B /= denom @@ -742,7 +742,7 @@ def _minimum_norm_to_boundary(self, x, b, _ell, _u, c, bnorm): """ N = x.shape[0] - lambda_lower = 2 * c / (bnorm ** 2 + EPS) + lambda_lower = 2 * c / (bnorm**2 + EPS) lambda_upper = np.sign(c) * np.inf # optimal initial point (if box-constraints are neglected) _lambda = lambda_lower k = 0 @@ -761,12 +761,12 @@ def _minimum_norm_to_boundary(self, x, b, _ell, _u, c, bnorm): max_step = _u - x[n] delta_step = min(max_step, lam_step) _c += b[n] * delta_step - norm += delta_step ** 2 + norm += delta_step**2 else: max_step = _ell - x[n] delta_step = max(max_step, lam_step) _c += b[n] * delta_step - norm += delta_step ** 2 + norm += delta_step**2 else: for n in range(N): lam_step = _lambda * b[n] / 2 @@ -774,12 +774,12 @@ def _minimum_norm_to_boundary(self, x, b, _ell, _u, c, bnorm): max_step = _ell - x[n] delta_step = max(max_step, lam_step) _c += b[n] * delta_step - norm += delta_step ** 2 + norm += delta_step**2 else: max_step = _u - x[n] delta_step = min(max_step, lam_step) _c += b[n] * delta_step - norm += delta_step ** 2 + norm += delta_step**2 # adjust lambda if np.abs(_c) < np.abs(c): @@ -819,8 +819,8 @@ def optimize_boundary_s_t_trustregion_fun_and_jac(self, params, x0, x, b, min_, _mu = params[0] t = 1 / (2 * _mu + EPS) - g = -_mu * r ** 2 - grad_mu = -(r ** 2) + g = -_mu * r**2 + grad_mu = -(r**2) for n in range(N): d = -s * b[n] * t @@ -830,9 +830,9 @@ def optimize_boundary_s_t_trustregion_fun_and_jac(self, params, x0, x, b, min_, elif d > max_ - x[n]: d = max_ - x[n] else: - grad_mu += (b[n] + 2 * _mu * d) * (b[n] / (2 * _mu ** 2 + EPS)) + grad_mu += (b[n] + 2 * _mu * d) * (b[n] / (2 * _mu**2 + EPS)) - grad_mu += d ** 2 + grad_mu += d**2 g += (b[n] + _mu * d) * d return -g, -np.array([grad_mu]) @@ -1033,15 +1033,15 @@ def fun_and_jac(self, params, x0, x, b, min_, max_, c, r): distance += (d - dx) ** 2 b_dot_d += bn * d - d_norm += d ** 2 + d_norm += d**2 - g += (dx - d) ** 2 + mu * d ** 2 + lam * bn * d + g += (dx - d) ** 2 + mu * d**2 + lam * bn * d d_g_d_lam += bn * d - d_g_d_mu += d ** 2 + d_g_d_mu += d**2 - g += -mu * r ** 2 - lam * c + g += -mu * r**2 - lam * c d_g_d_lam -= c - d_g_d_mu -= r ** 2 + d_g_d_mu -= r**2 return -g, -np.array([d_g_d_lam, d_g_d_mu]) @@ -1100,9 +1100,9 @@ def fun_and_jac(self, params, x0, x, b, min_, max_, c, r): d_g_d_lam -= prefac * bn * t d_g_d_mu -= prefac * 2 * d * t - g += np.abs(dx - d) + mu * d ** 2 + lam * bn * d + g += np.abs(dx - d) + mu * d**2 + lam * bn * d d_g_d_lam += bn * d - d_g_d_mu += d ** 2 + d_g_d_mu += d**2 else: # mu == 0 for n in range(N): dx = x0[n] - x[n] @@ -1114,13 +1114,13 @@ def fun_and_jac(self, params, x0, x, b, min_, max_, c, r): else: d = min_ - x[n] - g += np.abs(dx - d) + mu * d ** 2 + lam * bn * d + g += np.abs(dx - d) + mu * d**2 + lam * bn * d d_g_d_lam += bn * d - d_g_d_mu += d ** 2 + d_g_d_mu += d**2 - g += -mu * r ** 2 - lam * c + g += -mu * r**2 - lam * c d_g_d_lam -= c - d_g_d_mu -= r ** 2 + d_g_d_mu -= r**2 return -g, -np.array([d_g_d_lam, d_g_d_mu]) @@ -1154,7 +1154,7 @@ def _get_final_delta(self, lam, mu, x0, x, b, min_, max_, c, r, touchup=True): delta[n] = d b_dot_d += b[n] * d - norm_d += d ** 2 + norm_d += d**2 distance += np.abs(d - dx) else: # mu == 0 for n in range(N): @@ -1169,7 +1169,7 @@ def _get_final_delta(self, lam, mu, x0, x, b, min_, max_, c, r, touchup=True): delta[n] = d b_dot_d += b[n] * d - norm_d += d ** 2 + norm_d += d**2 distance += np.abs(d - dx) if touchup: @@ -1185,7 +1185,7 @@ def _get_final_delta(self, lam, mu, x0, x, b, min_, max_, c, r, touchup=True): old_d = delta[n] new_d = old_d + dc / b[n] - if x[n] + new_d <= max_ and x[n] + new_d >= min_ and norm_d - old_d ** 2 + new_d ** 2 <= r ** 2: + if x[n] + new_d <= max_ and x[n] + new_d >= min_ and norm_d - old_d**2 + new_d**2 <= r**2: # conditions (a) and (b) are fulfilled if k == 0: min_distance = distance - np.abs(old_d - dx) + np.abs(new_d - dx) @@ -1232,7 +1232,7 @@ def binary_search(self, q0, bounds, x0, x, b, min_, max_, c, r, etol=1e-6, maxit func_calls = 0 bnorm = np.linalg.norm(b) - lambda0 = 2 * c / bnorm ** 2 + lambda0 = 2 * c / bnorm**2 k = 0 @@ -1349,10 +1349,10 @@ def fun(self, epsilon, x0, x, b, ell, u, c, r, lambda0=None): _active_bnorm += b[n] ** 2 _c += b[n] * delta_step - norm += delta_step ** 2 + norm += delta_step**2 if 0.9999 * np.abs(c) - EPS < np.abs(_c) < 1.0001 * np.abs(c) + EPS: - if norm > r ** 2: + if norm > r**2: return -np.inf, k, _lambda else: return -epsilon, k, _lambda @@ -1534,7 +1534,7 @@ def _nelder_mead_algorithm( ργ = ρ * γ ρχ = ρ * χ - σ_n = σ ** n + σ_n = σ**n f_val = np.empty(n + 1, dtype=np.float64) for i in range(n + 1): @@ -1766,7 +1766,7 @@ def fun(self, params, x0, x, b, min_, max_, c, r): lam, mu = params N = x0.shape[0] - g = -mu * r ** 2 - lam * c + g = -mu * r**2 - lam * c if mu > 0: t = 1 / (2 * mu) @@ -1775,7 +1775,7 @@ def fun(self, params, x0, x, b, min_, max_, c, r): dx = x0[n] - x[n] bn = b[n] - case1 = lam * bn * dx + mu * dx ** 2 + case1 = lam * bn * dx + mu * dx**2 optd = -lam * bn * t if optd < min_ - x[n]: @@ -1783,12 +1783,12 @@ def fun(self, params, x0, x, b, min_, max_, c, r): elif optd > max_ - x[n]: optd = max_ - x[n] - case2 = 1 + lam * bn * optd + mu * optd ** 2 + case2 = 1 + lam * bn * optd + mu * optd**2 if case1 <= case2: - g += mu * dx ** 2 + lam * bn * dx + g += mu * dx**2 + lam * bn * dx else: - g += 1 + mu * optd ** 2 + lam * bn * optd + g += 1 + mu * optd**2 + lam * bn * optd else: # arg min_delta ||delta - dx||_0 + lam * b^T delta # case delta[n] = dx[n]: lam * b[n] * dx[n] @@ -1800,7 +1800,7 @@ def fun(self, params, x0, x, b, min_, max_, c, r): case2 = 1 + lam * bn * (min_ - x[n]) case3 = 1 + lam * bn * (max_ - x[n]) if case1 <= case2 and case1 <= case3: - g += mu * dx ** 2 + lam * bn * dx + g += mu * dx**2 + lam * bn * dx elif case2 < case3: g += 1 + mu * (min_ - x[n]) ** 2 + lam * bn * (min_ - x[n]) else: @@ -1849,7 +1849,7 @@ def __get_final_delta(self, lam, mu, x0, x, b, min_, max_, c, r, touchup=True): bn = b[n] t = 1 / (2 * mu) - case1 = lam * bn * dx + mu * dx ** 2 + case1 = lam * bn * dx + mu * dx**2 optd = -lam * bn * t if optd < min_ - x[n]: @@ -1857,7 +1857,7 @@ def __get_final_delta(self, lam, mu, x0, x, b, min_, max_, c, r, touchup=True): elif optd > max_ - x[n]: optd = max_ - x[n] - case2 = 1 + lam * bn * optd + mu * optd ** 2 + case2 = 1 + lam * bn * optd + mu * optd**2 if case1 <= case2: d = dx @@ -1867,7 +1867,7 @@ def __get_final_delta(self, lam, mu, x0, x, b, min_, max_, c, r, touchup=True): delta[n] = d b_dot_d += bn * d - norm_d += d ** 2 + norm_d += d**2 else: # mu == 0 for n in range(N): dx = x0[n] - x[n] @@ -1885,7 +1885,7 @@ def __get_final_delta(self, lam, mu, x0, x, b, min_, max_, c, r, touchup=True): distance += 1 delta[n] = d - norm_d += d ** 2 + norm_d += d**2 b_dot_d += bn * d if touchup: @@ -1904,22 +1904,22 @@ def __get_final_delta(self, lam, mu, x0, x, b, min_, max_, c, r, touchup=True): old_d = delta[n] new_d = old_d + dc / b[n] - if x[n] + new_d <= max_ and x[n] + new_d >= min_ and norm_d - old_d ** 2 + new_d ** 2 <= r ** 2: + if x[n] + new_d <= max_ and x[n] + new_d >= min_ and norm_d - old_d**2 + new_d**2 <= r**2: # conditions (a) and (b) are fulfilled if k == 0: min_distance = distance - (np.abs(old_d - dx) > 1e-10) + (np.abs(new_d - dx) > 1e-10) min_distance_idx = n - min_norm = norm_d - old_d ** 2 + new_d ** 2 + min_norm = norm_d - old_d**2 + new_d**2 k += 1 else: new_distance = distance - (np.abs(old_d - dx) > 1e-10) + (np.abs(new_d - dx) > 1e-10) if ( min_distance > new_distance or min_distance == new_distance - and min_norm > norm_d - old_d ** 2 + new_d ** 2 + and min_norm > norm_d - old_d**2 + new_d**2 ): min_distance = new_distance - min_norm = norm_d - old_d ** 2 + new_d ** 2 + min_norm = norm_d - old_d**2 + new_d**2 min_distance_idx = n if k > 0: diff --git a/art/attacks/evasion/hop_skip_jump.py b/art/attacks/evasion/hop_skip_jump.py index 383f366a87..0e3ae7433f 100644 --- a/art/attacks/evasion/hop_skip_jump.py +++ b/art/attacks/evasion/hop_skip_jump.py @@ -597,7 +597,7 @@ def _compute_update( # Normalize random noise to fit into the range of input data rnd_noise = rnd_noise / np.sqrt( np.sum( - rnd_noise ** 2, + rnd_noise**2, axis=tuple(range(len(rnd_noise_shape)))[1:], keepdims=True, ) diff --git a/art/attacks/evasion/imperceptible_asr/imperceptible_asr_pytorch.py b/art/attacks/evasion/imperceptible_asr/imperceptible_asr_pytorch.py index cdb03fbbe2..c7211125e9 100644 --- a/art/attacks/evasion/imperceptible_asr/imperceptible_asr_pytorch.py +++ b/art/attacks/evasion/imperceptible_asr/imperceptible_asr_pytorch.py @@ -324,9 +324,9 @@ class only supports targeted attack. # Reset delta with new result local_batch_shape = successful_adv_input_1st_stage.shape self.global_optimal_delta.data = torch.zeros(self.batch_size, self.global_max_length).type(torch.float64) - self.global_optimal_delta.data[ - : local_batch_shape[0], : local_batch_shape[1] - ] = successful_perturbation_1st_stage + self.global_optimal_delta.data[: local_batch_shape[0], : local_batch_shape[1]] = ( + successful_perturbation_1st_stage + ) # Second stage of attack successful_adv_input_2nd_stage = self._attack_2nd_stage( @@ -767,7 +767,7 @@ def _psd_transform(self, delta: "torch.Tensor", original_max_psd: np.ndarray) -> # Compute the psd matrix psd = (8.0 / 3.0) * transformed_delta / self.win_length - psd = psd ** 2 + psd = psd**2 psd = ( torch.pow(torch.tensor(10.0).type(torch.float64), torch.tensor(9.6).type(torch.float64)).to( self.estimator.device diff --git a/art/attacks/evasion/lowprofool.py b/art/attacks/evasion/lowprofool.py index c1b298e0bb..0c38290009 100644 --- a/art/attacks/evasion/lowprofool.py +++ b/art/attacks/evasion/lowprofool.py @@ -217,7 +217,7 @@ def __calculate_feature_importances(self, x: np.ndarray, y: np.ndarray) -> None: # Apply a simple Pearson correlation calculation. pearson_correlations = [pearsonr(x[:, col], y)[0] for col in range(x.shape[1])] absolutes = np.abs(np.array(pearson_correlations)) - self.importance_vec = absolutes / np.power(np.sum(absolutes ** 2), 0.5) + self.importance_vec = absolutes / np.power(np.sum(absolutes**2), 0.5) elif callable(self.importance): # pragma: no cover # Apply a custom function to call on the provided data. diff --git a/art/attacks/evasion/newtonfool.py b/art/attacks/evasion/newtonfool.py index 3f1d5be517..4384824b7f 100644 --- a/art/attacks/evasion/newtonfool.py +++ b/art/attacks/evasion/newtonfool.py @@ -163,7 +163,7 @@ def _compute_pert(theta: np.ndarray, grads: np.ndarray, norm_grad: np.ndarray) - tol = 10e-8 nom = -theta.reshape((-1,) + (1,) * (len(grads.shape) - 1)) * grads - denom = norm_grad ** 2 + denom = norm_grad**2 denom[denom < tol] = tol result = nom / denom.reshape((-1,) + (1,) * (len(grads.shape) - 1)) diff --git a/art/attacks/evasion/over_the_air_flickering/over_the_air_flickering_pytorch.py b/art/attacks/evasion/over_the_air_flickering/over_the_air_flickering_pytorch.py index d61f8bb4ac..3690440b7e 100644 --- a/art/attacks/evasion/over_the_air_flickering/over_the_air_flickering_pytorch.py +++ b/art/attacks/evasion/over_the_air_flickering/over_the_air_flickering_pytorch.py @@ -153,7 +153,7 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n x_adv = x.copy().astype(ART_NUMPY_DTYPE) # Compute perturbation with batching - for (batch_id, batch_all) in enumerate( + for batch_id, batch_all in enumerate( tqdm(data_loader, desc="OverTheAirFlickeringPyTorch - Batches", leave=False, disable=not self.verbose) ): (batch, batch_labels) = batch_all[0], batch_all[1] @@ -265,7 +265,7 @@ def _get_loss_gradients(self, x: "torch.Tensor", y: "torch.Tensor", perturbation l_1 = torch.zeros(1).to(self.estimator.device) l_m = (label_prob - max_non_label_prob) * (1 - 2 * int(self.targeted)) + self.loss_margin - l_2 = (l_m ** 2) / self.loss_margin + l_2 = (l_m**2) / self.loss_margin l_3 = l_m adversarial_loss = torch.max(l_1, torch.min(l_2, l_3)[0])[0] @@ -273,7 +273,7 @@ def _get_loss_gradients(self, x: "torch.Tensor", y: "torch.Tensor", perturbation # calculate regularization terms # thickness - loss term perturbation_i = perturbation[[i]] + eps - norm_reg = torch.mean(perturbation_i ** 2) + 1e-12 + norm_reg = torch.mean(perturbation_i**2) + 1e-12 perturbation_roll_right = torch.roll(perturbation_i, 1, dims=1) perturbation_roll_left = torch.roll(perturbation_i, -1, dims=1) diff --git a/art/attacks/evasion/pe_malware_attack.py b/art/attacks/evasion/pe_malware_attack.py index 2ee43e2ee3..8e47cd4075 100644 --- a/art/attacks/evasion/pe_malware_attack.py +++ b/art/attacks/evasion/pe_malware_attack.py @@ -583,9 +583,8 @@ def generate( # pylint: disable=W0221 @staticmethod def process_file( - filepath: str, padding_char: int = 256, maxlen: int = 2 ** 20 + filepath: str, padding_char: int = 256, maxlen: int = 2**20 ) -> Tuple[np.ndarray, int]: # pragma: no cover - """ Go from raw file to numpy array. @@ -654,7 +653,7 @@ def insert_section( datapoint: Union[List[int], str], sample_size: Optional[int] = None, padding_char: int = 256, - maxlen: int = 2 ** 20, + maxlen: int = 2**20, bytes_to_assign: Optional[int] = None, verbose: bool = False, ) -> Union[ diff --git a/art/attacks/evasion/simba.py b/art/attacks/evasion/simba.py index 01916060ee..bab36accc9 100644 --- a/art/attacks/evasion/simba.py +++ b/art/attacks/evasion/simba.py @@ -415,9 +415,9 @@ def _block_idct(self, x, block_size=8, masked=False, ratio=0.5): submat = x[:, :, (i * block_size) : ((i + 1) * block_size), (j * block_size) : ((j + 1) * block_size)] if masked: submat = submat * mask - var_z[ - :, :, (i * block_size) : ((i + 1) * block_size), (j * block_size) : ((j + 1) * block_size) - ] = idct(idct(submat, axis=3, norm="ortho"), axis=2, norm="ortho") + var_z[:, :, (i * block_size) : ((i + 1) * block_size), (j * block_size) : ((j + 1) * block_size)] = ( + idct(idct(submat, axis=3, norm="ortho"), axis=2, norm="ortho") + ) if self.estimator.channels_first: return var_z diff --git a/art/attacks/evasion/square_attack.py b/art/attacks/evasion/square_attack.py index 2a6b2b6694..4a86a2fc0d 100644 --- a/art/attacks/evasion/square_attack.py +++ b/art/attacks/evasion/square_attack.py @@ -237,13 +237,13 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n delta_new = np.zeros(self.estimator.input_shape) if self.estimator.channels_first: - delta_new[ - :, height_mid : height_mid + height_tile, width_start : width_start + height_tile - ] = np.random.choice([-2 * self.eps, 2 * self.eps], size=[channels, 1, 1]) + delta_new[:, height_mid : height_mid + height_tile, width_start : width_start + height_tile] = ( + np.random.choice([-2 * self.eps, 2 * self.eps], size=[channels, 1, 1]) + ) else: - delta_new[ - height_mid : height_mid + height_tile, width_start : width_start + height_tile, : - ] = np.random.choice([-2 * self.eps, 2 * self.eps], size=[1, 1, channels]) + delta_new[height_mid : height_mid + height_tile, width_start : width_start + height_tile, :] = ( + np.random.choice([-2 * self.eps, 2 * self.eps], size=[1, 1, channels]) + ) x_robust_new = x_robust + delta_new @@ -281,12 +281,12 @@ def _get_perturbation(height): x_c -= 1 y_c -= 1 - gaussian_perturbation /= np.sqrt(np.sum(gaussian_perturbation ** 2)) + gaussian_perturbation /= np.sqrt(np.sum(gaussian_perturbation**2)) delta[: height // 2] = gaussian_perturbation delta[height // 2 : height // 2 + gaussian_perturbation.shape[0]] = -gaussian_perturbation - delta /= np.sqrt(np.sum(delta ** 2)) + delta /= np.sqrt(np.sum(delta**2)) if random.random() > 0.5: delta = np.transpose(delta) @@ -325,7 +325,7 @@ def _get_perturbation(height): height_start += height_tile x_robust_new = np.clip( - x_robust + delta_init / np.sqrt(np.sum(delta_init ** 2, axis=(1, 2, 3), keepdims=True)) * self.eps, + x_robust + delta_init / np.sqrt(np.sum(delta_init**2, axis=(1, 2, 3), keepdims=True)) * self.eps, self.estimator.clip_values[0], self.estimator.clip_values[1], ) @@ -455,12 +455,12 @@ def _get_perturbation(height): :, height_start : height_start + height_tile, width_start : width_start + height_tile, : ] / (np.maximum(1e-9, w_1_norm)) - diff_norm = (self.eps * np.ones(delta_new.shape)) ** 2 - norms_x_robust ** 2 + diff_norm = (self.eps * np.ones(delta_new.shape)) ** 2 - norms_x_robust**2 diff_norm[diff_norm < 0.0] = 0.0 if self.estimator.channels_first: - delta_new /= np.sqrt(np.sum(delta_new ** 2, axis=(2, 3), keepdims=True)) * np.sqrt( - diff_norm / channels + w_norm ** 2 + delta_new /= np.sqrt(np.sum(delta_new**2, axis=(2, 3), keepdims=True)) * np.sqrt( + diff_norm / channels + w_norm**2 ) delta_x_robust_init[ :, @@ -472,8 +472,8 @@ def _get_perturbation(height): :, :, height_start : height_start + height_tile, width_start : width_start + height_tile ] = delta_new else: - delta_new /= np.sqrt(np.sum(delta_new ** 2, axis=(1, 2), keepdims=True)) * np.sqrt( - diff_norm / channels + w_norm ** 2 + delta_new /= np.sqrt(np.sum(delta_new**2, axis=(1, 2), keepdims=True)) * np.sqrt( + diff_norm / channels + w_norm**2 ) delta_x_robust_init[ :, @@ -489,7 +489,7 @@ def _get_perturbation(height): x_init + self.eps * delta_x_robust_init - / np.sqrt(np.sum(delta_x_robust_init ** 2, axis=(1, 2, 3), keepdims=True)), + / np.sqrt(np.sum(delta_x_robust_init**2, axis=(1, 2, 3), keepdims=True)), self.estimator.clip_values[0], self.estimator.clip_values[1], ) diff --git a/art/attacks/evasion/wasserstein.py b/art/attacks/evasion/wasserstein.py index 9b8e913c12..aa83a2dcac 100644 --- a/art/attacks/evasion/wasserstein.py +++ b/art/attacks/evasion/wasserstein.py @@ -40,7 +40,7 @@ logger = logging.getLogger(__name__) -EPS_LOG = 10 ** -10 +EPS_LOG = 10**-10 class Wasserstein(EvasionAttack): @@ -599,7 +599,7 @@ def _unfold(x: np.ndarray, kernel_size: int, padding: int) -> np.ndarray: # Do unfolding res_dim_0 = x.shape[0] - res_dim_1 = x.shape[1] * kernel_size ** 2 + res_dim_1 = x.shape[1] * kernel_size**2 res_dim_2 = (shape[0] - kernel_size + 1) * (shape[1] - kernel_size + 1) result = np.zeros((res_dim_0, res_dim_1, res_dim_2)) @@ -634,7 +634,7 @@ def _local_transport(self, var_k: np.ndarray, x: np.ndarray, kernel_size: int) - # Compute local transport unfold_x = self._unfold(x=x, kernel_size=kernel_size, padding=kernel_size // 2) unfold_x = unfold_x.swapaxes(-1, -2) - unfold_x = unfold_x.reshape(*unfold_x.shape[:-1], num_channels, kernel_size ** 2) + unfold_x = unfold_x.reshape(*unfold_x.shape[:-1], num_channels, kernel_size**2) unfold_x = unfold_x.swapaxes(-2, -3) tmp_k = var_k.reshape(var_k.shape[0], num_channels, -1) diff --git a/art/attacks/evasion/zoo.py b/art/attacks/evasion/zoo.py index 3c48df2d33..62b5f3e38a 100644 --- a/art/attacks/evasion/zoo.py +++ b/art/attacks/evasion/zoo.py @@ -553,7 +553,7 @@ def _optimizer_adam_coordinate( # ADAM update mean[index] = beta1 * mean[index] + (1 - beta1) * grads - var[index] = beta2 * var[index] + (1 - beta2) * grads ** 2 + var[index] = beta2 * var[index] + (1 - beta2) * grads**2 corr = (np.sqrt(1 - np.power(beta2, adam_epochs[index]))) / (1 - np.power(beta1, adam_epochs[index])) orig_shape = current_noise.shape diff --git a/art/attacks/extraction/__init__.py b/art/attacks/extraction/__init__.py index e0fd6f8ce2..358724f7f9 100644 --- a/art/attacks/extraction/__init__.py +++ b/art/attacks/extraction/__init__.py @@ -1,6 +1,7 @@ """ Module providing extraction attacks under a common interface. """ + from art.attacks.extraction.functionally_equivalent_extraction import FunctionallyEquivalentExtraction from art.attacks.extraction.copycat_cnn import CopycatCNN from art.attacks.extraction.knockoff_nets import KnockoffNets diff --git a/art/attacks/inference/__init__.py b/art/attacks/inference/__init__.py index b0ea556f5e..99f97b8feb 100644 --- a/art/attacks/inference/__init__.py +++ b/art/attacks/inference/__init__.py @@ -1,6 +1,7 @@ """ Module providing inference attacks. """ + from art.attacks.inference import attribute_inference from art.attacks.inference import membership_inference from art.attacks.inference import model_inversion diff --git a/art/attacks/inference/attribute_inference/__init__.py b/art/attacks/inference/attribute_inference/__init__.py index 87402c388a..f2b0912d1c 100644 --- a/art/attacks/inference/attribute_inference/__init__.py +++ b/art/attacks/inference/attribute_inference/__init__.py @@ -1,6 +1,7 @@ """ Module providing attribute inference attacks. """ + from art.attacks.inference.attribute_inference.black_box import AttributeInferenceBlackBox from art.attacks.inference.attribute_inference.baseline import AttributeInferenceBaseline from art.attacks.inference.attribute_inference.true_label_baseline import AttributeInferenceBaselineTrueLabel diff --git a/art/attacks/inference/attribute_inference/baseline.py b/art/attacks/inference/attribute_inference/baseline.py index 5ecd6cda30..bf137d499e 100644 --- a/art/attacks/inference/attribute_inference/baseline.py +++ b/art/attacks/inference/attribute_inference/baseline.py @@ -295,7 +295,7 @@ def forward(self, x): self.attack_model.train() # type: ignore for _ in range(self.epochs): - for (input1, targets) in train_loader: + for input1, targets in train_loader: input1, targets = to_cuda(input1), to_cuda(targets) _, targets = torch.autograd.Variable(input1), torch.autograd.Variable(targets) diff --git a/art/attacks/inference/attribute_inference/black_box.py b/art/attacks/inference/attribute_inference/black_box.py index 32e58332f4..edce93d74d 100644 --- a/art/attacks/inference/attribute_inference/black_box.py +++ b/art/attacks/inference/attribute_inference/black_box.py @@ -357,7 +357,7 @@ def forward(self, x): self.attack_model.train() # type: ignore for _ in range(self.epochs): - for (input1, targets) in train_loader: + for input1, targets in train_loader: input1, targets = to_cuda(input1), to_cuda(targets) _, targets = torch.autograd.Variable(input1), torch.autograd.Variable(targets) diff --git a/art/attacks/inference/attribute_inference/true_label_baseline.py b/art/attacks/inference/attribute_inference/true_label_baseline.py index 2fab59cfde..9430e4dfb4 100644 --- a/art/attacks/inference/attribute_inference/true_label_baseline.py +++ b/art/attacks/inference/attribute_inference/true_label_baseline.py @@ -323,7 +323,7 @@ def forward(self, x): self.attack_model.train() # type: ignore for _ in range(self.epochs): - for (input1, targets) in train_loader: + for input1, targets in train_loader: input1, targets = to_cuda(input1), to_cuda(targets) _, targets = torch.autograd.Variable(input1), torch.autograd.Variable(targets) diff --git a/art/attacks/inference/membership_inference/__init__.py b/art/attacks/inference/membership_inference/__init__.py index 08d7385a5d..21e1415cbd 100644 --- a/art/attacks/inference/membership_inference/__init__.py +++ b/art/attacks/inference/membership_inference/__init__.py @@ -1,6 +1,7 @@ """ Module providing membership inference attacks. """ + from art.attacks.inference.membership_inference.black_box import MembershipInferenceBlackBox from art.attacks.inference.membership_inference.black_box_rule_based import MembershipInferenceBlackBoxRuleBased from art.attacks.inference.membership_inference.label_only_gap_attack import LabelOnlyGapAttack diff --git a/art/attacks/inference/membership_inference/black_box.py b/art/attacks/inference/membership_inference/black_box.py index a142d86d16..1704346c44 100644 --- a/art/attacks/inference/membership_inference/black_box.py +++ b/art/attacks/inference/membership_inference/black_box.py @@ -346,7 +346,7 @@ def forward(self, x_1, label): self.attack_model.train() # type: ignore for _ in range(self.epochs): - for (input1, input2, targets) in train_loader: + for input1, input2, targets in train_loader: input1, input2, targets = to_cuda(input1), to_cuda(input2), to_cuda(targets) _, input2 = torch.autograd.Variable(input1), torch.autograd.Variable(input2) targets = torch.autograd.Variable(targets) @@ -403,7 +403,7 @@ def forward(self, x_1): self.attack_model.train() # type: ignore for _ in range(self.epochs): - for (input1, targets) in train_loader: + for input1, targets in train_loader: input1, targets = to_cuda(input1), to_cuda(targets) input1 = torch.autograd.Variable(input1) targets = torch.autograd.Variable(targets) diff --git a/art/attacks/inference/model_inversion/__init__.py b/art/attacks/inference/model_inversion/__init__.py index 73fc935f8a..d1f3e6c1c1 100644 --- a/art/attacks/inference/model_inversion/__init__.py +++ b/art/attacks/inference/model_inversion/__init__.py @@ -1,4 +1,5 @@ """ Module providing model inversion attacks. """ + from art.attacks.inference.model_inversion.mi_face import MIFace diff --git a/art/attacks/inference/reconstruction/__init__.py b/art/attacks/inference/reconstruction/__init__.py index b818ab0a75..03d8e39095 100644 --- a/art/attacks/inference/reconstruction/__init__.py +++ b/art/attacks/inference/reconstruction/__init__.py @@ -1,4 +1,5 @@ """ Module providing model inversion attacks. """ + from art.attacks.inference.reconstruction.white_box import DatabaseReconstruction diff --git a/art/attacks/poisoning/__init__.py b/art/attacks/poisoning/__init__.py index 147d86f87e..fa62ad125a 100644 --- a/art/attacks/poisoning/__init__.py +++ b/art/attacks/poisoning/__init__.py @@ -1,6 +1,7 @@ """ Module providing poisoning attacks under a common interface. """ + from art.attacks.poisoning.backdoor_attack_dgm.backdoor_attack_dgm_red import BackdoorAttackDGMReDTensorFlowV2 from art.attacks.poisoning.backdoor_attack_dgm.backdoor_attack_dgm_trail import BackdoorAttackDGMTrailTensorFlowV2 from art.attacks.poisoning.backdoor_attack import PoisoningAttackBackdoor diff --git a/art/attacks/poisoning/backdoor_attack_dgm/backdoor_attack_dgm_trail.py b/art/attacks/poisoning/backdoor_attack_dgm/backdoor_attack_dgm_trail.py index 1deeea7e7c..b7f9edb9ac 100644 --- a/art/attacks/poisoning/backdoor_attack_dgm/backdoor_attack_dgm_trail.py +++ b/art/attacks/poisoning/backdoor_attack_dgm/backdoor_attack_dgm_trail.py @@ -96,7 +96,7 @@ def poison_estimator( max_iter=100, lambda_p=0.1, verbose=-1, - **kwargs + **kwargs, # ): ) -> "GENERATOR_TYPE": """ diff --git a/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor_keras.py b/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor_keras.py index 8b2925cf5d..cd91c54e88 100644 --- a/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor_keras.py +++ b/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor_keras.py @@ -221,7 +221,7 @@ def poison( # pylint: disable=W0221 decay_exp = 0 else: decay_exp = max(max_index) + 1 - learning_rate = self.learning_rate * (self.decay_coeff ** decay_exp) + learning_rate = self.learning_rate * (self.decay_coeff**decay_exp) # Compute distance between features and match samples feat2 = self.estimator.get_activations(poison_samples, self.feature_layer) diff --git a/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor_pytorch.py b/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor_pytorch.py index 8d34b1713c..e5466f5510 100644 --- a/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor_pytorch.py +++ b/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor_pytorch.py @@ -230,7 +230,7 @@ def poison( # pylint: disable=W0221 decay_exp = 0 else: decay_exp = max(max_index) + 1 - learning_rate = self.learning_rate * (self.decay_coeff ** decay_exp) + learning_rate = self.learning_rate * (self.decay_coeff**decay_exp) # Compute the feature representation of the current poisons and # identify the closest trigger sample for each poison diff --git a/art/attacks/poisoning/perturbations/__init__.py b/art/attacks/poisoning/perturbations/__init__.py index 0e608d5680..77fd7fcb6f 100644 --- a/art/attacks/poisoning/perturbations/__init__.py +++ b/art/attacks/poisoning/perturbations/__init__.py @@ -1,6 +1,7 @@ """ Module providing perturbation functions under a common interface """ + from art.attacks.poisoning.perturbations.image_perturbations import ( add_pattern_bd, add_single_bd, diff --git a/art/defences/__init__.py b/art/defences/__init__.py index 65d8b10fa1..59dce476a9 100644 --- a/art/defences/__init__.py +++ b/art/defences/__init__.py @@ -1,6 +1,7 @@ """ Module implementing multiple types of defences against adversarial attacks. """ + from art.defences import detector from art.defences import postprocessor from art.defences import preprocessor diff --git a/art/defences/detector/__init__.py b/art/defences/detector/__init__.py index f2f7937aae..242e822caa 100644 --- a/art/defences/detector/__init__.py +++ b/art/defences/detector/__init__.py @@ -1,5 +1,6 @@ """ Module implementing detector-based defences against adversarial attacks. """ + from art.defences.detector import evasion from art.defences.detector import poison diff --git a/art/defences/detector/evasion/__init__.py b/art/defences/detector/evasion/__init__.py index 7a53323f5f..26112a2afe 100644 --- a/art/defences/detector/evasion/__init__.py +++ b/art/defences/detector/evasion/__init__.py @@ -1,6 +1,7 @@ """ Module implementing detector-based defences against evasion attacks. """ + from art.defences.detector.evasion.evasion_detector import EvasionDetector from art.defences.detector.evasion.binary_input_detector import BinaryInputDetector from art.defences.detector.evasion.binary_activation_detector import BinaryActivationDetector diff --git a/art/defences/detector/evasion/subsetscanning/scanningops.py b/art/defences/detector/evasion/subsetscanning/scanningops.py index 503866ed96..e8d770ba85 100644 --- a/art/defences/detector/evasion/subsetscanning/scanningops.py +++ b/art/defences/detector/evasion/subsetscanning/scanningops.py @@ -165,12 +165,20 @@ def single_restart( if image_to_node: # passed pvalues are only those belonging to fixed images, update nodes in return # only sending sub of images - (score_from_optimization, sub_of_nodes, optimal_alpha,) = ScanningOps.optimize_in_single_dimension( + ( + score_from_optimization, + sub_of_nodes, + optimal_alpha, + ) = ScanningOps.optimize_in_single_dimension( pvalues[sub_of_images, :, :], a_max, image_to_node, score_function ) else: # passed pvalues are only those belonging to fixed nodes, update images in return # only sending sub of nodes - (score_from_optimization, sub_of_images, optimal_alpha,) = ScanningOps.optimize_in_single_dimension( + ( + score_from_optimization, + sub_of_images, + optimal_alpha, + ) = ScanningOps.optimize_in_single_dimension( pvalues[:, sub_of_nodes, :], a_max, image_to_node, score_function ) diff --git a/art/defences/detector/poison/__init__.py b/art/defences/detector/poison/__init__.py index 228957b2c1..506eb8b31b 100644 --- a/art/defences/detector/poison/__init__.py +++ b/art/defences/detector/poison/__init__.py @@ -1,6 +1,7 @@ """ Module implementing detector-based defences against poisoning attacks. """ + from art.defences.detector.poison.poison_filtering_defence import PoisonFilteringDefence from art.defences.detector.poison.ground_truth_evaluator import GroundTruthEvaluator from art.defences.detector.poison.activation_defence import ActivationDefence diff --git a/art/defences/detector/poison/activation_defence.py b/art/defences/detector/poison/activation_defence.py index 45b09d0e4d..cf99dd30c2 100644 --- a/art/defences/detector/poison/activation_defence.py +++ b/art/defences/detector/poison/activation_defence.py @@ -333,12 +333,20 @@ def analyze_clusters(self, **kwargs) -> Tuple[Dict[str, Any], np.ndarray]: report, ) = analyzer.analyze_by_relative_size(self.clusters_by_class) elif self.cluster_analysis == "distance": - (self.assigned_clean_by_class, self.poisonous_clusters, report,) = analyzer.analyze_by_distance( + ( + self.assigned_clean_by_class, + self.poisonous_clusters, + report, + ) = analyzer.analyze_by_distance( self.clusters_by_class, separated_activations=self.red_activations_by_class, ) elif self.cluster_analysis == "silhouette-scores": - (self.assigned_clean_by_class, self.poisonous_clusters, report,) = analyzer.analyze_by_silhouette_score( + ( + self.assigned_clean_by_class, + self.poisonous_clusters, + report, + ) = analyzer.analyze_by_silhouette_score( self.clusters_by_class, reduced_activations_by_class=self.red_activations_by_class, ) diff --git a/art/defences/postprocessor/__init__.py b/art/defences/postprocessor/__init__.py index dec23dca74..1a4e04ea7f 100644 --- a/art/defences/postprocessor/__init__.py +++ b/art/defences/postprocessor/__init__.py @@ -1,6 +1,7 @@ """ Module implementing postprocessing defences against adversarial attacks. """ + from art.defences.postprocessor.class_labels import ClassLabels from art.defences.postprocessor.gaussian_noise import GaussianNoise from art.defences.postprocessor.high_confidence import HighConfidence diff --git a/art/defences/preprocessor/__init__.py b/art/defences/preprocessor/__init__.py index 474a19d2f9..27ce0b9e4b 100644 --- a/art/defences/preprocessor/__init__.py +++ b/art/defences/preprocessor/__init__.py @@ -1,6 +1,7 @@ """ Module implementing preprocessing defences against adversarial attacks. """ + from art.defences.preprocessor.cutmix.cutmix import CutMix from art.defences.preprocessor.cutmix.cutmix_pytorch import CutMixPyTorch from art.defences.preprocessor.cutmix.cutmix_tensorflow import CutMixTensorFlowV2 diff --git a/art/defences/preprocessor/feature_squeezing.py b/art/defences/preprocessor/feature_squeezing.py index 557cab9ac2..49b2d01394 100644 --- a/art/defences/preprocessor/feature_squeezing.py +++ b/art/defences/preprocessor/feature_squeezing.py @@ -82,7 +82,7 @@ def __call__(self, x: np.ndarray, y: Optional[np.ndarray] = None) -> Tuple[np.nd x_normalized = x - self.clip_values[0] x_normalized = x_normalized / (self.clip_values[1] - self.clip_values[0]) - max_value = np.rint(2 ** self.bit_depth - 1) + max_value = np.rint(2**self.bit_depth - 1) res = np.rint(x_normalized * max_value) / max_value res = res * (self.clip_values[1] - self.clip_values[0]) diff --git a/art/defences/preprocessor/mp3_compression.py b/art/defences/preprocessor/mp3_compression.py index e3947ab7e6..7cb2139321 100644 --- a/art/defences/preprocessor/mp3_compression.py +++ b/art/defences/preprocessor/mp3_compression.py @@ -92,7 +92,7 @@ def wav_to_mp3(x, sample_rate): elif x_dtype != np.int16 and normalized: # x is not of type np.int16 and seems to be normalized. Therefore undoing normalization and # casting to np.int16. - x = (x * 2 ** 15).astype(np.int16) + x = (x * 2**15).astype(np.int16) tmp_wav, tmp_mp3 = BytesIO(), BytesIO() write(tmp_wav, sample_rate, x) @@ -112,7 +112,7 @@ def wav_to_mp3(x, sample_rate): if normalized: # x was normalized. Therefore normalizing x_mp3. - x_mp3 = x_mp3 * 2 ** -15 + x_mp3 = x_mp3 * 2**-15 return x_mp3.astype(x_dtype) x_orig_type = x.dtype diff --git a/art/defences/trainer/__init__.py b/art/defences/trainer/__init__.py index d7585869a4..02ea930869 100644 --- a/art/defences/trainer/__init__.py +++ b/art/defences/trainer/__init__.py @@ -1,6 +1,7 @@ """ Module implementing train-based defences against adversarial attacks. """ + from art.defences.trainer.trainer import Trainer from art.defences.trainer.adversarial_trainer import AdversarialTrainer from art.defences.trainer.certified_adversarial_trainer_pytorch import AdversarialTrainerCertifiedPytorch diff --git a/art/defences/trainer/adversarial_trainer_oaat_pytorch.py b/art/defences/trainer/adversarial_trainer_oaat_pytorch.py index a83d787881..c012d73e89 100644 --- a/art/defences/trainer/adversarial_trainer_oaat_pytorch.py +++ b/art/defences/trainer/adversarial_trainer_oaat_pytorch.py @@ -809,7 +809,7 @@ def normalize_concatenate_activations( temp_activation = activations_dict[name] size_temp_activation = list(temp_activation.size()) norm_factor_layer = size_temp_activation[2] * size_temp_activation[3] - norm_temp_activation = torch.sqrt(torch.sum(temp_activation ** 2, dim=1, keepdim=True)) + EPS + norm_temp_activation = torch.sqrt(torch.sum(temp_activation**2, dim=1, keepdim=True)) + EPS temp_activation_n_channel = temp_activation / norm_temp_activation temp_activation_n_layer_channel = temp_activation_n_channel / np.sqrt(norm_factor_layer) temp_activation_n_layer_channel_flat = temp_activation_n_layer_channel.view(size_temp_activation[0], -1) @@ -1177,13 +1177,10 @@ def _projection( "The parameter `eps` of type `np.ndarray` is not supported to use with norm 2." ) - values_tmp = ( - values_tmp - * torch.min( - torch.tensor([1.0], dtype=torch.float32).to(self._classifier.device), - eps / (torch.norm(values_tmp, p=2, dim=1) + EPS), - ).unsqueeze_(-1) - ) + values_tmp = values_tmp * torch.min( + torch.tensor([1.0], dtype=torch.float32).to(self._classifier.device), + eps / (torch.norm(values_tmp, p=2, dim=1) + EPS), + ).unsqueeze_(-1) elif norm_p == 1: if isinstance(eps, np.ndarray): @@ -1191,13 +1188,10 @@ def _projection( "The parameter `eps` of type `np.ndarray` is not supported to use with norm 1." ) - values_tmp = ( - values_tmp - * torch.min( - torch.tensor([1.0], dtype=torch.float32).to(self._classifier.device), - eps / (torch.norm(values_tmp, p=1, dim=1) + EPS), - ).unsqueeze_(-1) - ) + values_tmp = values_tmp * torch.min( + torch.tensor([1.0], dtype=torch.float32).to(self._classifier.device), + eps / (torch.norm(values_tmp, p=1, dim=1) + EPS), + ).unsqueeze_(-1) elif norm_p in [np.inf, "inf"]: if isinstance(eps, np.ndarray): diff --git a/art/defences/transformer/__init__.py b/art/defences/transformer/__init__.py index ddb41880b4..de17a51ee0 100644 --- a/art/defences/transformer/__init__.py +++ b/art/defences/transformer/__init__.py @@ -1,6 +1,7 @@ """ Module implementing transformer-based defences against adversarial attacks. """ + from art.defences.transformer.transformer import Transformer from art.defences.transformer import evasion diff --git a/art/defences/transformer/evasion/__init__.py b/art/defences/transformer/evasion/__init__.py index d2a6d92a7c..a6a9803777 100644 --- a/art/defences/transformer/evasion/__init__.py +++ b/art/defences/transformer/evasion/__init__.py @@ -1,4 +1,5 @@ """ Module implementing transformer-based defences against evasion attacks. """ + from art.defences.transformer.evasion.defensive_distillation import DefensiveDistillation diff --git a/art/defences/transformer/poisoning/__init__.py b/art/defences/transformer/poisoning/__init__.py index 6657651db6..db950e1824 100644 --- a/art/defences/transformer/poisoning/__init__.py +++ b/art/defences/transformer/poisoning/__init__.py @@ -1,5 +1,6 @@ """ Module implementing transformer-based defences against poisoning attacks. """ + from art.defences.transformer.poisoning.neural_cleanse import NeuralCleanse from art.defences.transformer.poisoning.strip import STRIP diff --git a/art/estimators/__init__.py b/art/estimators/__init__.py index f9a7d41928..462bb32651 100644 --- a/art/estimators/__init__.py +++ b/art/estimators/__init__.py @@ -1,6 +1,7 @@ """ This module contains the Estimator API. """ + from art.estimators.estimator import ( BaseEstimator, LossGradientsMixin, diff --git a/art/estimators/certification/__init__.py b/art/estimators/certification/__init__.py index 83a69eb514..906504ecb5 100644 --- a/art/estimators/certification/__init__.py +++ b/art/estimators/certification/__init__.py @@ -1,6 +1,7 @@ """ This module contains certified classifiers. """ + import importlib from art.estimators.certification.randomized_smoothing.randomized_smoothing import RandomizedSmoothingMixin from art.estimators.certification.randomized_smoothing.numpy import NumpyRandomizedSmoothing diff --git a/art/estimators/certification/deep_z/__init__.py b/art/estimators/certification/deep_z/__init__.py index 4b8e0d9a5f..b1975650d9 100644 --- a/art/estimators/certification/deep_z/__init__.py +++ b/art/estimators/certification/deep_z/__init__.py @@ -1,6 +1,7 @@ """ DeepZ based certification estimators. """ + from art.estimators.certification.deep_z.deep_z import ZonoDenseLayer from art.estimators.certification.deep_z.deep_z import ZonoBounds from art.estimators.certification.deep_z.deep_z import ZonoConv diff --git a/art/estimators/certification/derandomized_smoothing/__init__.py b/art/estimators/certification/derandomized_smoothing/__init__.py index 69753f4f39..3c4ee109e9 100644 --- a/art/estimators/certification/derandomized_smoothing/__init__.py +++ b/art/estimators/certification/derandomized_smoothing/__init__.py @@ -1,5 +1,6 @@ """ DeRandomized smoothing estimators. """ + from art.estimators.certification.derandomized_smoothing.pytorch import PyTorchDeRandomizedSmoothing from art.estimators.certification.derandomized_smoothing.tensorflow import TensorFlowV2DeRandomizedSmoothing diff --git a/art/estimators/certification/derandomized_smoothing/ablators/__init__.py b/art/estimators/certification/derandomized_smoothing/ablators/__init__.py index 23715d4aba..78c15801d3 100644 --- a/art/estimators/certification/derandomized_smoothing/ablators/__init__.py +++ b/art/estimators/certification/derandomized_smoothing/ablators/__init__.py @@ -1,6 +1,7 @@ """ This module contains the ablators for the certified smoothing approaches. """ + import importlib from art.estimators.certification.derandomized_smoothing.ablators.tensorflow import ColumnAblator, BlockAblator diff --git a/art/estimators/certification/interval/__init__.py b/art/estimators/certification/interval/__init__.py index 7ceacef674..0d2f496fcb 100644 --- a/art/estimators/certification/interval/__init__.py +++ b/art/estimators/certification/interval/__init__.py @@ -1,6 +1,7 @@ """ Interval based certification estimators. """ + from art.estimators.certification.interval.interval import PyTorchIntervalDense from art.estimators.certification.interval.interval import PyTorchIntervalConv2D from art.estimators.certification.interval.interval import PyTorchIntervalReLU diff --git a/art/estimators/certification/object_seeker/__init__.py b/art/estimators/certification/object_seeker/__init__.py index 99b8175584..5275c88295 100644 --- a/art/estimators/certification/object_seeker/__init__.py +++ b/art/estimators/certification/object_seeker/__init__.py @@ -1,5 +1,6 @@ """ ObjectSeeker estimators. """ + from art.estimators.certification.object_seeker.object_seeker import ObjectSeekerMixin from art.estimators.certification.object_seeker.pytorch import PyTorchObjectSeeker diff --git a/art/estimators/certification/object_seeker/object_seeker.py b/art/estimators/certification/object_seeker/object_seeker.py index d810717581..0f63588793 100644 --- a/art/estimators/certification/object_seeker/object_seeker.py +++ b/art/estimators/certification/object_seeker/object_seeker.py @@ -138,7 +138,7 @@ def _prune_boxes( keep_indices = [] for idx, (masked_box, masked_label) in enumerate(zip(masked_boxes, masked_labels)): keep = True - for (base_box, base_label) in zip(base_boxes, base_labels): + for base_box, base_label in zip(base_boxes, base_labels): if masked_label == base_label: ioa = intersection_over_area(masked_box, base_box) if ioa >= self.prune_threshold: diff --git a/art/estimators/certification/randomized_smoothing/__init__.py b/art/estimators/certification/randomized_smoothing/__init__.py index 2faa24dc34..3b189912cb 100644 --- a/art/estimators/certification/randomized_smoothing/__init__.py +++ b/art/estimators/certification/randomized_smoothing/__init__.py @@ -1,6 +1,7 @@ """ Randomized smoothing estimators. """ + from art.estimators.certification.randomized_smoothing.randomized_smoothing import RandomizedSmoothingMixin from art.estimators.certification.randomized_smoothing.numpy import NumpyRandomizedSmoothing diff --git a/art/estimators/classification/__init__.py b/art/estimators/classification/__init__.py index 1af1909d35..280f205e4e 100644 --- a/art/estimators/classification/__init__.py +++ b/art/estimators/classification/__init__.py @@ -2,6 +2,7 @@ Classifier API for applying all attacks. Use the :class:`.Classifier` wrapper to be able to apply an attack to a preexisting model. """ + from art.estimators.classification.classifier import ( ClassifierMixin, ClassGradientsMixin, diff --git a/art/estimators/classification/keras.py b/art/estimators/classification/keras.py index 6f6f7e47c8..c322a51358 100644 --- a/art/estimators/classification/keras.py +++ b/art/estimators/classification/keras.py @@ -271,7 +271,9 @@ def _initialize_params( raise ValueError("Loss function not recognised.") # Define the loss using the loss function - if "__name__" in dir(loss_function,) and loss_function.__name__ in [ + if "__name__" in dir( + loss_function, + ) and loss_function.__name__ in [ "categorical_crossentropy", "sparse_categorical_crossentropy", "binary_crossentropy", diff --git a/art/estimators/encoding/__init__.py b/art/estimators/encoding/__init__.py index 9e40f8ae70..d6808eadab 100644 --- a/art/estimators/encoding/__init__.py +++ b/art/estimators/encoding/__init__.py @@ -1,6 +1,7 @@ """ Encoder API. """ + from art.estimators.encoding.encoder import EncoderMixin from art.estimators.encoding.tensorflow import TensorFlowEncoder diff --git a/art/estimators/gan/__init__.py b/art/estimators/gan/__init__.py index 33ee4f13cc..b8ec6a426e 100644 --- a/art/estimators/gan/__init__.py +++ b/art/estimators/gan/__init__.py @@ -1,4 +1,5 @@ """ GAN Estimator API. """ + from art.estimators.gan.tensorflow import TensorFlowV2GAN diff --git a/art/estimators/generation/__init__.py b/art/estimators/generation/__init__.py index 86dd5ed0a9..1bd68b0b49 100644 --- a/art/estimators/generation/__init__.py +++ b/art/estimators/generation/__init__.py @@ -1,6 +1,7 @@ """ Generator API. """ + from art.estimators.generation.generator import GeneratorMixin from art.estimators.generation.tensorflow import TensorFlowGenerator diff --git a/art/estimators/object_detection/__init__.py b/art/estimators/object_detection/__init__.py index 37a3097e14..ec0bec81e0 100644 --- a/art/estimators/object_detection/__init__.py +++ b/art/estimators/object_detection/__init__.py @@ -1,6 +1,7 @@ """ Module containing estimators for object detection. """ + from art.estimators.object_detection.object_detector import ObjectDetectorMixin from art.estimators.object_detection.pytorch_object_detector import PyTorchObjectDetector diff --git a/art/estimators/object_detection/tensorflow_faster_rcnn.py b/art/estimators/object_detection/tensorflow_faster_rcnn.py index f7e09e9b41..56544bd2e1 100644 --- a/art/estimators/object_detection/tensorflow_faster_rcnn.py +++ b/art/estimators/object_detection/tensorflow_faster_rcnn.py @@ -374,13 +374,13 @@ def loss_gradient( # pylint: disable=W0221 # Create feed_dict feed_dict = {self.images: x_preprocessed} - for (placeholder, value) in zip(self._groundtruth_boxes_list, y): + for placeholder, value in zip(self._groundtruth_boxes_list, y): feed_dict[placeholder] = value["boxes"] - for (placeholder, value) in zip(self._groundtruth_classes_list, y): + for placeholder, value in zip(self._groundtruth_classes_list, y): feed_dict[placeholder] = value["labels"] - for (placeholder, value) in zip(self._groundtruth_weights_list, y): + for placeholder, value in zip(self._groundtruth_weights_list, y): feed_dict[placeholder] = [1.0] * len(value["labels"]) # Compute gradients @@ -532,13 +532,13 @@ def compute_loss(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray: # Create feed_dict feed_dict = {self.images: x_preprocessed} - for (placeholder, value) in zip(self._groundtruth_boxes_list, y): + for placeholder, value in zip(self._groundtruth_boxes_list, y): feed_dict[placeholder] = value["boxes"] - for (placeholder, value) in zip(self._groundtruth_classes_list, y): + for placeholder, value in zip(self._groundtruth_classes_list, y): feed_dict[placeholder] = value["labels"] - for (placeholder, value) in zip(self._groundtruth_weights_list, y): + for placeholder, value in zip(self._groundtruth_weights_list, y): feed_dict[placeholder] = value["scores"] loss_values = self._sess.run(self._loss_total, feed_dict=feed_dict) @@ -561,13 +561,13 @@ def compute_losses(self, x: np.ndarray, y: np.ndarray) -> Dict[str, np.ndarray]: # Create feed_dict feed_dict = {self.images: x_preprocessed} - for (placeholder, value) in zip(self._groundtruth_boxes_list, y): + for placeholder, value in zip(self._groundtruth_boxes_list, y): feed_dict[placeholder] = value["boxes"] - for (placeholder, value) in zip(self._groundtruth_classes_list, y): + for placeholder, value in zip(self._groundtruth_classes_list, y): feed_dict[placeholder] = value["labels"] - for (placeholder, value) in zip(self._groundtruth_weights_list, y): + for placeholder, value in zip(self._groundtruth_weights_list, y): feed_dict[placeholder] = value["scores"] # Get the losses graph diff --git a/art/estimators/object_tracking/__init__.py b/art/estimators/object_tracking/__init__.py index b84b05efba..49c0365095 100644 --- a/art/estimators/object_tracking/__init__.py +++ b/art/estimators/object_tracking/__init__.py @@ -1,6 +1,7 @@ """ Module containing estimators for object tracking. """ + from art.estimators.object_tracking.object_tracker import ObjectTrackerMixin from art.estimators.object_tracking.pytorch_goturn import PyTorchGoturn diff --git a/art/estimators/object_tracking/pytorch_goturn.py b/art/estimators/object_tracking/pytorch_goturn.py index 2cdb6db300..dd85f78e56 100644 --- a/art/estimators/object_tracking/pytorch_goturn.py +++ b/art/estimators/object_tracking/pytorch_goturn.py @@ -500,9 +500,7 @@ def edge_spacing_y_f(bbox_tight: "torch.Tensor") -> "torch.Tensor": return torch.maximum(torch.tensor(0.0).to(self.device), (output_height / 2) - bbox_center_y) - def crop_pad_image( - bbox_tight: "torch.Tensor", image: "torch.Tensor" - ) -> Tuple[ + def crop_pad_image(bbox_tight: "torch.Tensor", image: "torch.Tensor") -> Tuple[ "torch.Tensor", Tuple["torch.Tensor", "torch.Tensor", "torch.Tensor", "torch.Tensor"], "torch.Tensor", diff --git a/art/estimators/poison_mitigation/__init__.py b/art/estimators/poison_mitigation/__init__.py index d59d395924..63b4fad2c6 100644 --- a/art/estimators/poison_mitigation/__init__.py +++ b/art/estimators/poison_mitigation/__init__.py @@ -1,6 +1,7 @@ """ This module implements all poison mitigation models in ART. """ + from art.estimators.poison_mitigation import neural_cleanse from art.estimators.poison_mitigation.strip import strip from art.estimators.poison_mitigation.neural_cleanse.keras import KerasNeuralCleanse diff --git a/art/estimators/poison_mitigation/neural_cleanse/__init__.py b/art/estimators/poison_mitigation/neural_cleanse/__init__.py index 997b9cf45c..9d57af73ae 100644 --- a/art/estimators/poison_mitigation/neural_cleanse/__init__.py +++ b/art/estimators/poison_mitigation/neural_cleanse/__init__.py @@ -1,5 +1,6 @@ """ Neural cleanse estimators. """ + from art.estimators.poison_mitigation.neural_cleanse.neural_cleanse import NeuralCleanseMixin from art.estimators.poison_mitigation.neural_cleanse.keras import KerasNeuralCleanse diff --git a/art/estimators/poison_mitigation/neural_cleanse/neural_cleanse.py b/art/estimators/poison_mitigation/neural_cleanse/neural_cleanse.py index 62c6f82c08..97203cc019 100644 --- a/art/estimators/poison_mitigation/neural_cleanse/neural_cleanse.py +++ b/art/estimators/poison_mitigation/neural_cleanse/neural_cleanse.py @@ -84,7 +84,7 @@ def __init__( self.early_stop_threshold = early_stop_threshold self.early_stop_patience = early_stop_patience self.cost_multiplier_up = cost_multiplier - self.cost_multiplier_down = cost_multiplier ** 1.5 + self.cost_multiplier_down = cost_multiplier**1.5 self.batch_size = batch_size self.top_indices: List[int] = [] self.activation_threshold = 0 diff --git a/art/estimators/poison_mitigation/strip/__init__.py b/art/estimators/poison_mitigation/strip/__init__.py index 4ebf7d55a8..0f729468f5 100644 --- a/art/estimators/poison_mitigation/strip/__init__.py +++ b/art/estimators/poison_mitigation/strip/__init__.py @@ -1,4 +1,5 @@ """ STRIP estimators. """ + from art.estimators.poison_mitigation.strip.strip import STRIPMixin diff --git a/art/estimators/regression/__init__.py b/art/estimators/regression/__init__.py index 085d5e3059..d053aee31c 100644 --- a/art/estimators/regression/__init__.py +++ b/art/estimators/regression/__init__.py @@ -1,6 +1,7 @@ """ This module implements all regressors in ART. """ + from art.estimators.regression.regressor import RegressorMixin, Regressor from art.estimators.regression.scikitlearn import ScikitlearnRegressor diff --git a/art/estimators/speech_recognition/__init__.py b/art/estimators/speech_recognition/__init__.py index 6e6e5eeed0..0da3ffb48d 100644 --- a/art/estimators/speech_recognition/__init__.py +++ b/art/estimators/speech_recognition/__init__.py @@ -1,6 +1,7 @@ """ Module containing estimators for speech recognition. """ + from art.estimators.speech_recognition.speech_recognizer import SpeechRecognizerMixin from art.estimators.speech_recognition.pytorch_deep_speech import PyTorchDeepSpeech diff --git a/art/evaluations/security_curve/__init__.py b/art/evaluations/security_curve/__init__.py index f6fc6f1bad..de4006374d 100644 --- a/art/evaluations/security_curve/__init__.py +++ b/art/evaluations/security_curve/__init__.py @@ -1,4 +1,5 @@ """ This module implements the evaluation of Security Curves. """ + from art.evaluations.security_curve.security_curve import SecurityCurve diff --git a/art/experimental/__init__.py b/art/experimental/__init__.py index 3f4753efd9..d3ee1b7f53 100644 --- a/art/experimental/__init__.py +++ b/art/experimental/__init__.py @@ -1,4 +1,5 @@ """ This module contains the experimental Estimator API. """ + from art.experimental.estimators.jax import JaxEstimator diff --git a/art/experimental/estimators/__init__.py b/art/experimental/estimators/__init__.py index 7016108edb..68a0629dc7 100644 --- a/art/experimental/estimators/__init__.py +++ b/art/experimental/estimators/__init__.py @@ -1,4 +1,5 @@ """ Experimental Estimator API """ + from art.experimental.estimators.jax import JaxEstimator diff --git a/art/experimental/estimators/classification/__init__.py b/art/experimental/estimators/classification/__init__.py index 7529ddd0d8..e04625c199 100644 --- a/art/experimental/estimators/classification/__init__.py +++ b/art/experimental/estimators/classification/__init__.py @@ -1,4 +1,5 @@ """ Experimental classifiers. """ + from art.experimental.estimators.classification.jax import JaxClassifier diff --git a/art/metrics/__init__.py b/art/metrics/__init__.py index 459bd47953..6432a851ae 100644 --- a/art/metrics/__init__.py +++ b/art/metrics/__init__.py @@ -1,6 +1,7 @@ """ Module providing metrics and verifications. """ + from art.metrics.metrics import adversarial_accuracy from art.metrics.metrics import empirical_robustness from art.metrics.metrics import loss_sensitivity diff --git a/art/metrics/privacy/__init__.py b/art/metrics/privacy/__init__.py index 34c66b1c31..4fb02f2385 100644 --- a/art/metrics/privacy/__init__.py +++ b/art/metrics/privacy/__init__.py @@ -1,5 +1,6 @@ """ Module providing metrics and verifications. """ + from art.metrics.privacy.membership_leakage import PDTP, SHAPr, ComparisonType from art.metrics.privacy.worst_case_mia_score import get_roc_for_fpr, get_roc_for_multi_fprs diff --git a/art/optimizers.py b/art/optimizers.py index 0384e4d44b..6a8d20aa30 100644 --- a/art/optimizers.py +++ b/art/optimizers.py @@ -47,11 +47,11 @@ def update(self, niter: int, x: np.ndarray, delta_x: np.ndarray) -> np.ndarray: self.m_dx = self.beta_1 * self.m_dx + (1 - self.beta_1) * delta_x # rms - self.v_dx = self.beta_2 * self.v_dx + (1 - self.beta_2) * (delta_x ** 2) + self.v_dx = self.beta_2 * self.v_dx + (1 - self.beta_2) * (delta_x**2) # bias - m_dw_corr = self.m_dx / (1 - self.beta_1 ** niter) - v_dw_corr = self.v_dx / (1 - self.beta_2 ** niter) + m_dw_corr = self.m_dx / (1 - self.beta_1**niter) + v_dw_corr = self.v_dx / (1 - self.beta_2**niter) # update x = x - self.alpha * (m_dw_corr / (np.sqrt(v_dw_corr) + self.epsilon)) diff --git a/art/preprocessing/__init__.py b/art/preprocessing/__init__.py index 11da54dc1e..30dd410c4c 100644 --- a/art/preprocessing/__init__.py +++ b/art/preprocessing/__init__.py @@ -1,6 +1,7 @@ """ Module for preprocessing operations. """ + from art.preprocessing.preprocessing import Preprocessor from art.preprocessing.preprocessing import PreprocessorPyTorch from art.preprocessing.preprocessing import PreprocessorTensorFlowV2 diff --git a/art/preprocessing/audio/__init__.py b/art/preprocessing/audio/__init__.py index 4b6f22bd82..20b1422dc1 100644 --- a/art/preprocessing/audio/__init__.py +++ b/art/preprocessing/audio/__init__.py @@ -1,5 +1,6 @@ """ This module contains audio preprocessing tools. """ + from art.preprocessing.audio.l_filter.numpy import LFilter from art.preprocessing.audio.l_filter.pytorch import LFilterPyTorch diff --git a/art/preprocessing/expectation_over_transformation/__init__.py b/art/preprocessing/expectation_over_transformation/__init__.py index 592ce632b6..5a5c30344f 100644 --- a/art/preprocessing/expectation_over_transformation/__init__.py +++ b/art/preprocessing/expectation_over_transformation/__init__.py @@ -1,6 +1,7 @@ """ Module providing expectation over transformations. """ + from art.preprocessing.expectation_over_transformation.image_center_crop.pytorch import EoTImageCenterCropPyTorch from art.preprocessing.expectation_over_transformation.image_rotation.tensorflow import EoTImageRotationTensorFlow from art.preprocessing.expectation_over_transformation.image_rotation.pytorch import EoTImageRotationPyTorch diff --git a/art/preprocessing/image/__init__.py b/art/preprocessing/image/__init__.py index 1d2b3e0f61..97b171bc01 100644 --- a/art/preprocessing/image/__init__.py +++ b/art/preprocessing/image/__init__.py @@ -1,6 +1,7 @@ """ This module contains image preprocessing tools. """ + from art.preprocessing.image.image_resize.numpy import ImageResize from art.preprocessing.image.image_resize.pytorch import ImageResizePyTorch from art.preprocessing.image.image_resize.tensorflow import ImageResizeTensorFlowV2 diff --git a/art/preprocessing/standardisation_mean_std/__init__.py b/art/preprocessing/standardisation_mean_std/__init__.py index 7b8593b1c5..ae547848a6 100644 --- a/art/preprocessing/standardisation_mean_std/__init__.py +++ b/art/preprocessing/standardisation_mean_std/__init__.py @@ -1,6 +1,7 @@ """ This module contains tool for input standardisation with mean and standard deviation. """ + from art.preprocessing.standardisation_mean_std.numpy import StandardisationMeanStd from art.preprocessing.standardisation_mean_std.pytorch import StandardisationMeanStdPyTorch from art.preprocessing.standardisation_mean_std.tensorflow import StandardisationMeanStdTensorFlow diff --git a/art/summary_writer.py b/art/summary_writer.py index ab216d78d1..e996aef81a 100644 --- a/art/summary_writer.py +++ b/art/summary_writer.py @@ -272,7 +272,7 @@ def update( np.square((self.losses[str(batch_id)][i_step] - self.losses[str(batch_id)][-1]) / delta_loss) + ((delta_step - i_step) / delta_step) ** 2 ) - cos_beta = -(side_b ** 2 - (side_a ** 2 + side_c ** 2)) / (2 * side_a * side_c) + cos_beta = -(side_b**2 - (side_a**2 + side_c**2)) / (2 * side_a * side_c) i_2_step = 1 - np.abs(cos_beta) self.i_2 = np.minimum(self.i_2, i_2_step) diff --git a/art/utils.py b/art/utils.py index bcd6d00f49..6c0261cccc 100644 --- a/art/utils.py +++ b/art/utils.py @@ -613,7 +613,7 @@ def random_sphere( ) a_tmp = np.random.randn(nb_points, nb_dims) - s_2 = np.sum(a_tmp ** 2, axis=1) + s_2 = np.sum(a_tmp**2, axis=1) base = gammainc(nb_dims / 2.0, s_2 / 2.0) ** (1 / nb_dims) * radius / np.sqrt(s_2) res = a_tmp * (np.tile(base, (nb_dims, 1))).T diff --git a/art/visualization.py b/art/visualization.py index f33fa2bfd9..7cae41fb30 100644 --- a/art/visualization.py +++ b/art/visualization.py @@ -56,7 +56,7 @@ def create_sprite(images: np.ndarray) -> np.ndarray: images = convert_to_rgb(images) n = int(np.ceil(np.sqrt(images.shape[0]))) - padding = ((0, n ** 2 - images.shape[0]), (0, 0), (0, 0)) + ((0, 0),) * (images.ndim - 3) + padding = ((0, n**2 - images.shape[0]), (0, 0), (0, 0)) + ((0, 0),) * (images.ndim - 3) images = np.pad(images, padding, mode="constant", constant_values=0) # Tile the individual thumbnails into an image diff --git a/examples/adversarial_training_FBF.py b/examples/adversarial_training_FBF.py index 6afdc578bd..e3e38f113f 100644 --- a/examples/adversarial_training_FBF.py +++ b/examples/adversarial_training_FBF.py @@ -1,6 +1,7 @@ """ This is an example of how to use ART for adversarial training of a model with Fast is better than free protocol """ + import math from PIL import Image diff --git a/examples/adversarial_training_data_augmentation.py b/examples/adversarial_training_data_augmentation.py index 622e6b428f..ad6703c576 100644 --- a/examples/adversarial_training_data_augmentation.py +++ b/examples/adversarial_training_data_augmentation.py @@ -1,6 +1,7 @@ """ This is an example of how to use ART and Keras to perform adversarial training using data generators for CIFAR10 """ + import tensorflow as tf tf.compat.v1.disable_eager_execution() diff --git a/examples/backdoor_attack_dgm_red.py b/examples/backdoor_attack_dgm_red.py index 16366f849e..1fed5b128d 100644 --- a/examples/backdoor_attack_dgm_red.py +++ b/examples/backdoor_attack_dgm_red.py @@ -4,6 +4,7 @@ Please refer to the original paper (https://arxiv.org/abs/2108.01644) for further information. """ + from art.attacks.poisoning.backdoor_attack_dgm.backdoor_attack_dgm_red import BackdoorAttackDGMReDTensorFlowV2 from art.estimators.generation.tensorflow import TensorFlowV2Generator diff --git a/examples/backdoor_attack_dgm_trail.py b/examples/backdoor_attack_dgm_trail.py index db43c87851..093d21ea6d 100644 --- a/examples/backdoor_attack_dgm_trail.py +++ b/examples/backdoor_attack_dgm_trail.py @@ -4,6 +4,7 @@ Please refer to the original paper (https://arxiv.org/abs/2108.01644) for further information. """ + import numpy as np import tensorflow as tf diff --git a/examples/get_started_keras.py b/examples/get_started_keras.py index 29d64b046c..0e12e68eb1 100644 --- a/examples/get_started_keras.py +++ b/examples/get_started_keras.py @@ -4,6 +4,7 @@ it would also be possible to provide a pretrained model to the ART classifier. The parameters are chosen for reduced computational requirements of the script and not optimised for accuracy. """ + import tensorflow as tf tf.compat.v1.disable_eager_execution() diff --git a/examples/get_started_lightgbm.py b/examples/get_started_lightgbm.py index 3c5a9612c3..fa542052a2 100644 --- a/examples/get_started_lightgbm.py +++ b/examples/get_started_lightgbm.py @@ -4,6 +4,7 @@ the model, it would also be possible to provide a pretrained model to the ART classifier. The parameters are chosen for reduced computational requirements of the script and not optimised for accuracy. """ + import lightgbm as lgb import numpy as np diff --git a/examples/get_started_mxnet.py b/examples/get_started_mxnet.py index 34f5892327..a7fa0b3f86 100644 --- a/examples/get_started_mxnet.py +++ b/examples/get_started_mxnet.py @@ -4,6 +4,7 @@ it would also be possible to provide a pretrained model to the ART classifier. The parameters are chosen for reduced computational requirements of the script and not optimised for accuracy. """ + import mxnet from mxnet.gluon.nn import Conv2D, MaxPool2D, Flatten, Dense import numpy as np diff --git a/examples/get_started_pytorch.py b/examples/get_started_pytorch.py index ef461abd9b..9d9967799d 100644 --- a/examples/get_started_pytorch.py +++ b/examples/get_started_pytorch.py @@ -4,6 +4,7 @@ it would also be possible to provide a pretrained model to the ART classifier. The parameters are chosen for reduced computational requirements of the script and not optimised for accuracy. """ + import torch.nn as nn import torch.nn.functional as F import torch.optim as optim diff --git a/examples/get_started_scikit_learn.py b/examples/get_started_scikit_learn.py index 5ca87e89af..488a234597 100644 --- a/examples/get_started_scikit_learn.py +++ b/examples/get_started_scikit_learn.py @@ -4,6 +4,7 @@ the model, it would also be possible to provide a pretrained model to the ART classifier. The parameters are chosen for reduced computational requirements of the script and not optimised for accuracy. """ + from sklearn.svm import SVC import numpy as np diff --git a/examples/get_started_tensorflow.py b/examples/get_started_tensorflow.py index fd2088f48b..b031eee144 100644 --- a/examples/get_started_tensorflow.py +++ b/examples/get_started_tensorflow.py @@ -4,6 +4,7 @@ the model, it would also be possible to provide a pretrained model to the ART classifier. The parameters are chosen for reduced computational requirements of the script and not optimised for accuracy. """ + import tensorflow.compat.v1 as tf import numpy as np diff --git a/examples/get_started_tensorflow_v2.py b/examples/get_started_tensorflow_v2.py index e531809bc8..98473f1183 100644 --- a/examples/get_started_tensorflow_v2.py +++ b/examples/get_started_tensorflow_v2.py @@ -4,6 +4,7 @@ the model, it would also be possible to provide a pretrained model to the ART classifier. The parameters are chosen for reduced computational requirements of the script and not optimised for accuracy. """ + import numpy as np from art.attacks.evasion import FastGradientMethod diff --git a/examples/get_started_xgboost.py b/examples/get_started_xgboost.py index 4409487618..54c3bb0fa8 100644 --- a/examples/get_started_xgboost.py +++ b/examples/get_started_xgboost.py @@ -4,6 +4,7 @@ ART classifier. The parameters are chosen for reduced computational requirements of the script and not optimised for accuracy. """ + import xgboost as xgb import numpy as np diff --git a/examples/inverse_gan_author_utils.py b/examples/inverse_gan_author_utils.py index 1ff0310bb3..7924caac8a 100644 --- a/examples/inverse_gan_author_utils.py +++ b/examples/inverse_gan_author_utils.py @@ -1862,7 +1862,7 @@ def __call__(self, inputs, labels, is_training=True): def _l2normalize(v, eps=1e-12): - return v / (tf.reduce_sum(v ** 2) ** 0.5 + eps) + return v / (tf.reduce_sum(v**2) ** 0.5 + eps) def spectral_norm(w, num_iters=1, update_collection=None): diff --git a/pyproject.toml b/pyproject.toml index fbbc9f0d02..23a4d6ef57 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,3 +2,4 @@ line-length=120 [tool.ruff] exclude = [".venv", "contrib"] +ignore = ["F401"] diff --git a/requirements_test.txt b/requirements_test.txt index e9e2491c3c..6e79d9130a 100644 --- a/requirements_test.txt +++ b/requirements_test.txt @@ -54,10 +54,20 @@ jax[cpu]==0.4.26 # tensorflow-gpu==2.1.0 # lingvo==0.6.4 -# other +# tests and style checking pytest~=8.3.2 pytest-mock~=3.14.0 pytest-cov~=5.0.0 +pylint==3.2.6 +mypy==1.11.1 +pycodestyle==2.12.0 +black==24.4.2 +ruff==0.5.5 +types-six==1.16.21.20240513 +types-PyYAML==6.0.12.20240724 +types-setuptools==71.1.0.20240726 + +# other requests~=2.31.0 ultralytics==8.0.217 ipython==8.25.0 diff --git a/tests/attacks/evasion/test_lowprofool.py b/tests/attacks/evasion/test_lowprofool.py index 18365887d8..2bc9c1777d 100644 --- a/tests/attacks/evasion/test_lowprofool.py +++ b/tests/attacks/evasion/test_lowprofool.py @@ -330,7 +330,7 @@ def test_fit_importances(iris_dataset): def pearson_correlations(x, y): correlations = [pearsonr(x[:, col], y)[0] for col in range(x.shape[1])] absolutes = np.abs(np.array(correlations)) - result = absolutes / np.power(np.sum(absolutes ** 2), 0.5) + result = absolutes / np.power(np.sum(absolutes**2), 0.5) return result # Setup classifier diff --git a/tests/attacks/evasion/test_pe_malware_attack.py b/tests/attacks/evasion/test_pe_malware_attack.py index c4fcf71e1e..57f4c7f1a5 100644 --- a/tests/attacks/evasion/test_pe_malware_attack.py +++ b/tests/attacks/evasion/test_pe_malware_attack.py @@ -18,7 +18,7 @@ def fix_get_synthetic_data(): """ # generate dummy data padding_char = 256 - maxlen = 2 ** 20 + maxlen = 2**20 # batch of 5 datapoints synthetic_data = np.ones((5, maxlen), dtype=np.uint16) * padding_char @@ -92,7 +92,7 @@ def get_prediction_model(param_dic): output = tf.keras.layers.Dense(1, name="output_layer")(dense) return tf.keras.Model(inputs=inp, outputs=output) - param_dic = {"maxlen": 2 ** 20, "input_dim": 257, "embedding_size": 8} + param_dic = {"maxlen": 2**20, "input_dim": 257, "embedding_size": 8} prediction_model = get_prediction_model(param_dic) model_weights = np.random.normal(loc=0, scale=1.0, size=(257, 8)) @@ -113,7 +113,7 @@ def test_no_perturbation(art_warning, fix_get_synthetic_data, fix_make_dummy_mod Assert that with 0 perturbation the data is unmodified """ try: - param_dic = {"maxlen": 2 ** 20, "input_dim": 257, "embedding_size": 8} + param_dic = {"maxlen": 2**20, "input_dim": 257, "embedding_size": 8} # First check: with no perturbation the malware of sufficient size, and benign files, should be unperturbed attack = MalwareGDTensorFlow( classifier=fix_make_dummy_model[0], embedding_weights=fix_make_dummy_model[1], l_0=0, param_dic=param_dic @@ -151,7 +151,7 @@ def test_append_attack(art_warning, fix_get_synthetic_data, fix_make_dummy_model Check append attack wih a given l0 budget """ try: - param_dic = {"maxlen": 2 ** 20, "input_dim": 257, "embedding_size": 8} + param_dic = {"maxlen": 2**20, "input_dim": 257, "embedding_size": 8} l0_budget = 1250 attack = MalwareGDTensorFlow( classifier=fix_make_dummy_model[0], @@ -218,7 +218,7 @@ def generate_synthetic_slack_regions(size): batch_of_slack_sizes.append(size_of_slack) return batch_of_slack_starts, batch_of_slack_sizes - param_dic = {"maxlen": 2 ** 20, "input_dim": 257, "embedding_size": 8} + param_dic = {"maxlen": 2**20, "input_dim": 257, "embedding_size": 8} # First check: with no perturbation the malware of sufficient size, and benign files, should be unperturbed attack = MalwareGDTensorFlow( classifier=fix_make_dummy_model[0], embedding_weights=fix_make_dummy_model[1], l_0=0, param_dic=param_dic @@ -299,9 +299,9 @@ def test_large_append(art_warning, fix_get_synthetic_data, fix_make_dummy_model) """ # Fourth check append large perturbation try: - param_dic = {"maxlen": 2 ** 20, "input_dim": 257, "embedding_size": 8} + param_dic = {"maxlen": 2**20, "input_dim": 257, "embedding_size": 8} # First check: with no perturbation the malware of sufficient size, and benign files, should be unperturbed - l0_budget = int(((2 ** 20) * 0.2)) + l0_budget = int(((2**20) * 0.2)) attack = MalwareGDTensorFlow( classifier=fix_make_dummy_model[0], embedding_weights=fix_make_dummy_model[1], @@ -340,7 +340,7 @@ def test_dos_header_attack(art_warning, fix_get_synthetic_data, fix_make_dummy_m """ # 5th check: DOS header attack try: - param_dic = {"maxlen": 2 ** 20, "input_dim": 257, "embedding_size": 8} + param_dic = {"maxlen": 2**20, "input_dim": 257, "embedding_size": 8} # First check: with no perturbation the malware of sufficient size, and benign files, should be unperturbed l0_budget = 290 attack = MalwareGDTensorFlow( @@ -424,7 +424,7 @@ def generate_synthetic_slack_regions(size): # 6th check: Do not automatically append extra perturbation l0_budget = 1250 - param_dic = {"maxlen": 2 ** 20, "input_dim": 257, "embedding_size": 8} + param_dic = {"maxlen": 2**20, "input_dim": 257, "embedding_size": 8} attack = MalwareGDTensorFlow( classifier=fix_make_dummy_model[0], embedding_weights=fix_make_dummy_model[1], @@ -493,7 +493,7 @@ def test_do_not_check_for_valid(art_warning, fix_get_synthetic_data, fix_make_du """ try: l0_budget = 1250 - param_dic = {"maxlen": 2 ** 20, "input_dim": 257, "embedding_size": 8} + param_dic = {"maxlen": 2**20, "input_dim": 257, "embedding_size": 8} attack = MalwareGDTensorFlow( classifier=fix_make_dummy_model[0], embedding_weights=fix_make_dummy_model[1], @@ -573,7 +573,7 @@ def test_check_params(art_warning, image_dl_estimator_for_attack): @pytest.mark.framework_agnostic def test_classifier_type_check_fail(art_warning, fix_make_dummy_model): try: - param_dic = {"maxlen": 2 ** 20, "input_dim": 257, "embedding_size": 8} + param_dic = {"maxlen": 2**20, "input_dim": 257, "embedding_size": 8} backend_test_classifier_type_check_fail( MalwareGDTensorFlow, [BaseEstimator, NeuralNetworkMixin, ClassifierMixin], diff --git a/tests/estimators/classification/test_scikitlearn.py b/tests/estimators/classification/test_scikitlearn.py index 14fccaf1c5..56ecf30e57 100644 --- a/tests/estimators/classification/test_scikitlearn.py +++ b/tests/estimators/classification/test_scikitlearn.py @@ -357,12 +357,8 @@ def setUpClass(cls): super().setUpClass() binary_class_index = np.argmax(cls.y_train_iris, axis=1) < 2 - x_train_binary = cls.x_train_iris[ - binary_class_index, - ] - y_train_binary = cls.y_train_iris[ - binary_class_index, - ][:, [0, 1]] + x_train_binary = cls.x_train_iris[binary_class_index,] + y_train_binary = cls.y_train_iris[binary_class_index,][:, [0, 1]] cls.sklearn_model = LogisticRegression( verbose=0, C=1, solver="newton-cg", dual=False, fit_intercept=True, multi_class="ovr" @@ -384,12 +380,8 @@ def test_class_gradient(self): def test_loss_gradient(self): binary_class_index = np.argmax(self.y_test_iris, axis=1) < 2 - x_test_binary = self.x_test_iris[ - binary_class_index, - ] - y_test_binary = self.y_test_iris[ - binary_class_index, - ][:, [0, 1]] + x_test_binary = self.x_test_iris[binary_class_index,] + y_test_binary = self.y_test_iris[binary_class_index,][:, [0, 1]] grad_predicted = self.classifier.loss_gradient(x_test_binary[0:1], y_test_binary[0:1]) grad_expected = np.asarray([[-0.37703343, 0.31890249, -1.18813638, -0.46208951]]) diff --git a/tests/test_utils.py b/tests/test_utils.py index a1a295ae8d..307ce456ba 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -140,7 +140,7 @@ def test_projection_norm(self): x_proj = projection(rand_sign * x, 3.14159, 2) self.assertEqual(x.shape, x_proj.shape) - self.assertTrue(np.allclose(np.sqrt(np.sum(x_proj ** 2, axis=t)), 3.14159, atol=10e-8)) + self.assertTrue(np.allclose(np.sqrt(np.sum(x_proj**2, axis=t)), 3.14159, atol=10e-8)) x_proj = projection(rand_sign * x, 0.314159, np.inf) self.assertEqual(x.shape, x_proj.shape) From 3ad1538af50d11fb1ae0599116c578cf6428e7a5 Mon Sep 17 00:00:00 2001 From: Beat Buesser Date: Fri, 2 Aug 2024 14:45:36 +0200 Subject: [PATCH 04/27] Update pylintrc and fix ruff alert Signed-off-by: Beat Buesser --- .pylintrc | 763 +++++++++---------- art/defences/preprocessor/mp3_compression.py | 2 +- pyproject.toml | 2 +- 3 files changed, 372 insertions(+), 395 deletions(-) diff --git a/.pylintrc b/.pylintrc index c38f949e2a..c7504055b6 100644 --- a/.pylintrc +++ b/.pylintrc @@ -1,40 +1,44 @@ -[MASTER] +[MAIN] -# A comma-separated list of package or module names from where C extensions may -# be loaded. Extensions are loading into the active Python interpreter and may -# run arbitrary code. -extension-pkg-whitelist= +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= -# Add files or directories to the blacklist. They should be base names, not +# Files or directories to be skipped. They should be base names, not # paths. ignore=CVS -# Add files or directories matching the regex patterns to the blacklist. The -# regex matches against base names, not paths. -ignore-patterns= +# Add files or directories matching the regex patterns to the ignore-list. The +# regex matches against paths and can be in Posix or Windows format. +ignore-paths= -# Python code to execute, usually for sys.path manipulation such as -# pygtk.require(). -#init-hook= - -# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the -# number of processors available to use. -jobs=1 +# Files or directories matching the regex patterns are skipped. The regex +# matches against base names, not paths. +ignore-patterns=^\.# -# Control the amount of potential inferred values when inferring a single -# object. This can help the performance when dealing with large functions or -# complex, nested conditions. -limit-inference-results=100 +# Pickle collected data for later comparisons. +persistent=yes # List of plugins (as comma separated values of python modules names) to load, # usually to register additional checkers. load-plugins= + pylint.extensions.check_elif, + pylint.extensions.bad_builtin, + pylint.extensions.docparams, + pylint.extensions.for_any_all, + pylint.extensions.set_membership, + pylint.extensions.code_style, + pylint.extensions.overlapping_exceptions, + pylint.extensions.typing, + pylint.extensions.redefined_variable_type, + pylint.extensions.comparison_placement, + pylint.extensions.broad_try_clause, + pylint.extensions.dict_init_mutate, + pylint.extensions.consider_refactoring_into_while_condition, -# Pickle collected data for later comparisons. -persistent=yes - -# Specify a configuration file. -#rcfile= +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use. +jobs=1 # When enabled, pylint would attempt to guess common misconfiguration and emit # user-friendly hints instead of false-positive error messages. @@ -44,193 +48,119 @@ suggestion-mode=yes # active Python interpreter and may run arbitrary code. unsafe-load-any-extension=no +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code +extension-pkg-allow-list= + +# Minimum supported python version +py-version = 3.9.0 + +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 + +# Specify a score threshold under which the program will exit with error. +fail-under=10.0 + +# Return non-zero exit code if any of these messages/categories are detected, +# even if score is above --fail-under value. Syntax same as enable. Messages +# specified are enabled, while categories only check already-enabled messages. +fail-on= + +# Clear in-memory caches upon conclusion of linting. Useful if running pylint in +# a server-like mode. +clear-cache-post-run=no + [MESSAGES CONTROL] # Only show warnings with the listed confidence levels. Leave empty to show -# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED. -confidence= +# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED +# confidence= + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable= + use-symbolic-message-instead, + useless-suppression, # Disable the message, report, category or checker with the given id(s). You # can either give multiple identifiers separated by comma (,) or put this # option multiple times (only on the command line, not in the configuration -# file where it should appear only once). You can also use "--disable=all" to -# disable everything first and then reenable specific checks. For example, if +# file where it should appear only once).You can also use "--disable=all" to +# disable everything first and then re-enable specific checks. For example, if # you want to run only the similarities checker, you can use "--disable=all # --enable=similarities". If you want to run only the classes checker, but have -# no Warning level messages displayed, use "--disable=all --enable=classes -# --disable=W". -disable=fixme, - misplaced-comparison-constant, - no-member, - duplicate-code, - unnecessary-pass, - useless-super-delegation, - too-few-public-methods, - too-many-instance-attributes, - too-many-locals, - too-many-arguments, - too-many-attributes, - too-many-branches, - too-many-statements, - attribute-defined-outside-init, - print-statement, - parameter-unpacking, - unpacking-in-except, - old-raise-syntax, - backtick, - long-suffix, - old-ne-operator, - old-octal-literal, - import-star-module-level, - non-ascii-bytes-literal, - raw-checker-failed, - bad-inline-option, - locally-disabled, - file-ignored, - suppressed-message, - useless-suppression, - deprecated-pragma, - use-symbolic-message-instead, - apply-builtin, - basestring-builtin, - buffer-builtin, - cmp-builtin, - coerce-builtin, - execfile-builtin, - file-builtin, - long-builtin, - raw_input-builtin, - reduce-builtin, - standarderror-builtin, - unicode-builtin, - xrange-builtin, - coerce-method, - delslice-method, - getslice-method, - setslice-method, - no-absolute-import, - old-division, - dict-iter-method, - dict-view-method, - next-method-called, - metaclass-assignment, - indexing-exception, - raising-string, - reload-builtin, - oct-method, - hex-method, - nonzero-method, - cmp-method, - input-builtin, - round-builtin, - intern-builtin, - unichr-builtin, - map-builtin-not-iterating, - zip-builtin-not-iterating, - range-builtin-not-iterating, - filter-builtin-not-iterating, - using-cmp-argument, - eq-without-hash, - div-method, - idiv-method, - rdiv-method, - exception-message-attribute, - invalid-str-codec, - sys-max-int, - bad-python3-import, - deprecated-string-function, - deprecated-str-translate-call, - deprecated-itertools-function, - deprecated-types-field, - next-method-defined, - dict-items-not-iterating, - dict-keys-not-iterating, - dict-values-not-iterating, - deprecated-operator-function, - deprecated-urllib-function, - xreadlines-attribute, - deprecated-sys-function, - exception-escape, - comprehension-escape - -# Enable the message, report, category or checker with the given id(s). You can -# either give multiple identifier separated by comma (,) or put this option -# multiple time (only on the command line, not in the configuration file where -# it should appear only once). See also the "--disable" option for examples. -enable=c-extension-no-member +# no Warning level messages displayed, use"--disable=all --enable=classes +# --disable=W" + +disable= + attribute-defined-outside-init, + invalid-name, + missing-docstring, + protected-access, + too-few-public-methods, + # handled by black + format, + # We anticipate #3512 where it will become optional + fixme, + consider-using-assignment-expr, [REPORTS] -# Python expression which should return a note less than 10 (10 is the highest -# note). You have access to the variables errors warning, statement which -# respectively contain the number of errors / warnings messages and the total -# number of statements analyzed. This is used by the global evaluation report -# (RP0004). -evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) - -# Template used to display messages. This is a python new-style format string -# used to format the message information. See doc for all details. -#msg-template= - -# Set the output format. Available formats are text, parseable, colorized, json -# and msvs (visual studio). You can also give a reporter class, e.g. +# Set the output format. Available formats are text, parseable, colorized, msvs +# (visual studio) and html. You can also give a reporter class, eg # mypackage.mymodule.MyReporterClass. output-format=text -# Tells whether to display a full report or only the messages. +# Tells whether to display a full report or only the messages reports=no -# Activate the evaluation score. -score=yes - - -[REFACTORING] +# Python expression which should return a note less than 10 (10 is the highest +# note). You have access to the variables 'fatal', 'error', 'warning', 'refactor', 'convention' +# and 'info', which contain the number of messages in each category, as +# well as 'statement', which is the total number of statements analyzed. This +# score is used by the global evaluation report (RP0004). +evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)) -# Maximum number of nested blocks for function / method body -max-nested-blocks=7 +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details +#msg-template= -# Complete name of functions that never returns. When checking for -# inconsistent-return-statements if a never returning function is called then -# it will be considered as an explicit return statement and no message will be -# printed. -never-returning-functions=sys.exit +# Activate the evaluation score. +score=yes [LOGGING] -# Format style used to check logging format string. `old` means using % -# formatting, while `new` is for `{}` formatting. -logging-format-style=old - # Logging modules to check that the string format arguments are in logging -# function parameter format. +# function parameter format logging-modules=logging +# The type of string formatting that logging methods do. `old` means using % +# formatting, `new` is for `{}` formatting. +logging-format-style=old -[SPELLING] - -# Limits count of emitted suggestions for spelling mistakes. -max-spelling-suggestions=4 - -# Spelling dictionary name. Available dictionaries: none. To make it working -# install python-enchant package.. -spelling-dict= -# List of comma separated words that should not be checked. -spelling-ignore-words= +[MISCELLANEOUS] -# A path to a file that contains private dictionary; one word per line. -spelling-private-dict-file= +# List of note tags to take in consideration, separated by a comma. +notes=FIXME,XXX,TODO -# Tells whether to store unknown words to indicated private dictionary in -# --spelling-private-dict-file option instead of raising a message. -spelling-store-unknown-words=no +# Regular expression of note tags to take in consideration. +#notes-rgx= [SIMILARITIES] +# Minimum lines number of a similarity. +min-similarity-lines=6 + # Ignore comments when computing similarities. ignore-comments=yes @@ -238,338 +168,385 @@ ignore-comments=yes ignore-docstrings=yes # Ignore imports when computing similarities. -ignore-imports=no +ignore-imports=yes -# Minimum lines number of a similarity. -min-similarity-lines=4 +# Signatures are removed from the similarity computation +ignore-signatures=yes -[TYPECHECK] +[VARIABLES] -# List of decorators that produce context managers, such as -# contextlib.contextmanager. Add to this list to register other decorators that -# produce valid context managers. -contextmanager-decorators=contextlib.contextmanager +# Tells whether we should check for unused import in __init__ files. +init-import=no -# List of members which are set dynamically and missed by pylint inference -# system, and so shouldn't trigger E1101 when accessed. Python regular -# expressions are accepted. -generated-members= +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= -# Tells whether missing members accessed in mixin class should be ignored. A -# mixin class is detected if its name ends with "mixin" (case insensitive). -ignore-mixin-members=yes +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_,_cb -# Tells whether to warn about missing members when the owner of the attribute -# is inferred to be None. -ignore-none=yes +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes -# This flag controls whether pylint should warn about no-member and similar -# checks whenever an opaque object is returned when inferring. The inference -# can return multiple potential results while evaluating a Python object, but -# some branches might not be evaluated, which results in partial inference. In -# that case, it might be useful to still emit no-member and other checks for -# the rest of the inferred objects. -ignore-on-opaque-inference=yes +# List of names allowed to shadow builtins +allowed-redefined-builtins= -# List of class names for which member attributes should not be checked (useful -# for classes with dynamically set attributes). This supports the use of -# qualified names. -ignored-classes=optparse.Values,thread._local,_thread._local +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io -# List of module names for which member attributes should not be checked -# (useful for modules/projects where namespaces are manipulated during runtime -# and thus existing member attributes cannot be deduced by static analysis. It -# supports qualified module names, as well as Unix pattern matching. -ignored-modules= -# Show a hint with possible names when a member name was not found. The aspect -# of finding the hint is based on edit distance. -missing-member-hint=yes +[FORMAT] -# The minimum edit distance a name should have in order to be considered a -# similar match for a missing member name. -missing-member-hint-distance=1 +# Maximum number of characters on a single line. +max-line-length=100 -# The total number of similar names that should be taken in consideration when -# showing a hint for a missing member. -missing-member-max-choices=1 +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no -[VARIABLES] +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no -# List of additional names supposed to be defined in builtins. Remember that -# you should avoid defining new builtins when possible. -additional-builtins= +# Maximum number of lines in a module +max-module-lines=2000 -# Tells whether unused global variables should be treated as a violation. -allow-global-unused-variables=yes +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' -# List of strings which can identify a callback function by name. A callback -# name must start or end with one of those strings. -callbacks=cb_, - _cb +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 -# A regular expression matching the name of dummy variables (i.e. expected to -# not be used). -dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= -# Argument names that match this expression will be ignored. Default to name -# with leading underscore. -ignored-argument-names=_.*|^ignored_|^unused_ -# Tells whether we should check for unused import in __init__ files. -init-import=no +[BASIC] -# List of qualified module names which can have objects that can redefine -# builtins. -redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io +# Good variable names which should always be accepted, separated by a comma +good-names=i,j,k,ex,Run,_ +# Good variable names regexes, separated by a comma. If names match any regex, +# they will always be accepted +good-names-rgxs= -[MISCELLANEOUS] +# Bad variable names which should always be refused, separated by a comma +bad-names=foo,bar,baz,toto,tutu,tata -# List of note tags to take in consideration, separated by a comma. -notes=FIXME, - XXX, - TODO +# Bad variable names regexes, separated by a comma. If names match any regex, +# they will always be refused +bad-names-rgxs= +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= -[BASIC] +# Include a hint for the correct naming format with invalid-name +include-naming-hint=no -# Naming style matching correct argument names. -argument-naming-style=snake_case +# Naming style matching correct function names. +function-naming-style=snake_case -# Regular expression matching correct argument names. Overrides argument- -# naming-style. -#argument-rgx= +# Regular expression matching correct function names +function-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming style matching correct variable names. +variable-naming-style=snake_case + +# Regular expression matching correct variable names +variable-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names +const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ # Naming style matching correct attribute names. attr-naming-style=snake_case -# Regular expression matching correct attribute names. Overrides attr-naming- -# style. -#attr-rgx= +# Regular expression matching correct attribute names +attr-rgx=[a-z_][a-z0-9_]{2,}$ -# Good variable names which should always be accepted, separated by a comma -# i,j,k = typical indices -# n,m = typical numbers -# ex = for exceptions and errors -# x,y= typical data -# _ = placeholder name -good-names=i,j,k,n,m,ex,x,y,_,logger,op - -# Bad variable names which should always be refused, separated by a comma. -bad-names=foo, - bar, - baz, - toto, - tutu, - tata +# Naming style matching correct argument names. +argument-naming-style=snake_case + +# Regular expression matching correct argument names +argument-rgx=[a-z_][a-z0-9_]{2,30}$ # Naming style matching correct class attribute names. class-attribute-naming-style=any -# Regular expression matching correct class attribute names. Overrides class- -# attribute-naming-style. -#class-attribute-rgx= +# Regular expression matching correct class attribute names +class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ + +# Naming style matching correct class constant names. +class-const-naming-style=UPPER_CASE + +# Regular expression matching correct class constant names. Overrides class- +# const-naming-style. +#class-const-rgx= + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names +inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ # Naming style matching correct class names. class-naming-style=PascalCase -# Regular expression matching correct class names. Overrides class-naming- -# style. -#class-rgx= +# Regular expression matching correct class names +class-rgx=[A-Z_][a-zA-Z0-9]+$ -# Naming style matching correct constant names. -const-naming-style=UPPER_CASE -# Regular expression matching correct constant names. Overrides const-naming- -# style. -#const-rgx= +# Naming style matching correct module names. +module-naming-style=snake_case + +# Regular expression matching correct module names +module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ + + +# Naming style matching correct method names. +method-naming-style=snake_case + +# Regular expression matching correct method names +method-rgx=[a-z_][a-z0-9_]{2,}$ + +# Regular expression matching correct type variable names +#typevar-rgx= + +# Regular expression which should only match function or class names that do +# not require a docstring. Use ^(?!__init__$)_ to also check __init__. +no-docstring-rgx=__.*__ # Minimum line length for functions/classes that require docstrings, shorter # ones are exempt. docstring-min-length=-1 -# Naming style matching correct function names. -function-naming-style=snake_case +# List of decorators that define properties, such as abc.abstractproperty. +property-classes=abc.abstractproperty -# Regular expression matching correct function names. Overrides function- -# naming-style. -#function-rgx= -# Include a hint for the correct naming format with invalid-name. -include-naming-hint=no +[TYPECHECK] -# Naming style matching correct inline iteration names. -inlinevar-naming-style=any +# Regex pattern to define which classes are considered mixins if ignore-mixin- +# members is set to 'yes' +mixin-class-rgx=.*MixIn -# Regular expression matching correct inline iteration names. Overrides -# inlinevar-naming-style. -#inlinevar-rgx= +# List of module names for which member attributes should not be checked and +# will not be imported (useful for modules/projects where namespaces are +# manipulated during runtime and thus existing member attributes cannot be +# deduced by static analysis). It supports qualified module names, as well +# as Unix pattern matching. +ignored-modules= -# Naming style matching correct method names. -method-naming-style=snake_case +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=SQLObject, optparse.Values, thread._local, _thread._local -# Regular expression matching correct method names. Overrides method-naming- -# style. -#method-rgx= +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members=REQUEST,acl_users,aq_parent,argparse.Namespace -# Naming style matching correct module names. -module-naming-style=snake_case +# List of decorators that create context managers from functions, such as +# contextlib.contextmanager. +contextmanager-decorators=contextlib.contextmanager -# Regular expression matching correct module names. Overrides module-naming- -# style. -#module-rgx= +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes -# Colon-delimited sets of names that determine each other's naming style when -# the name regexes allow several styles. -name-group= +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes -# Regular expression which should only match function or class names that do -# not require a docstring. -no-docstring-rgx=^_ +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes -# List of decorators that produce properties, such as abc.abstractproperty. Add -# to this list to register other decorators that produce valid properties. -# These decorators are taken in consideration only for invalid-name. -property-classes=abc.abstractproperty +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 -# Naming style matching correct variable names. -variable-naming-style=snake_case +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 -# Regular expression matching correct variable names. Overrides variable- -# naming-style. -#variable-rgx= +[SPELLING] +# Spelling dictionary name. Available dictionaries: none. To make it working +# install python-enchant package. +spelling-dict= -[FORMAT] +# List of comma separated words that should not be checked. +spelling-ignore-words= -# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. -expected-line-ending-format= +# List of comma separated words that should be considered directives if they +# appear and the beginning of a comment and should not be checked. +spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy:,pragma:,# noinspection -# Regexp for a line that is allowed to be longer than the limit. -ignore-long-lines=^\s*(# )??$ +# A path to a file that contains private dictionary; one word per line. +spelling-private-dict-file=.pyenchant_pylint_custom_dict.txt -# Number of spaces of indent required inside a hanging or continued line. -indent-after-paren=4 +# Tells whether to store unknown words to indicated private dictionary in +# --spelling-private-dict-file option instead of raising a message. +spelling-store-unknown-words=no -# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 -# tab). -indent-string=' ' +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=2 -# Maximum number of characters on a single line. -max-line-length=120 -# Maximum number of lines in a module. -max-module-lines=1000 +[DESIGN] -# List of optional constructs for which whitespace checking is disabled. `dict- -# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. -# `trailing-comma` allows a space between comma and closing bracket: (a, ). -# `empty-line` allows space-only lines. -no-space-check=trailing-comma, - dict-separator +# Maximum number of arguments for function / method +max-args = 9 -# Allow the body of a class to be on the same line as the declaration if body -# contains single statement. -single-line-class-stmt=no +# Maximum number of locals for function / method body +max-locals = 19 -# Allow the body of an if to be on the same line as the test if there is no -# else. -single-line-if-stmt=no +# Maximum number of return / yield for function / method body +max-returns=11 + +# Maximum number of branch for function / method body +max-branches = 20 +# Maximum number of statements in function / method body +max-statements = 50 + +# Maximum number of attributes for a class (see R0902). +max-attributes=11 + +# Maximum number of statements in a try-block +max-try-statements = 7 + +# Maximum number of positional arguments (see R0917). +max-positional-arguments = 12 + +[CLASSES] + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__,__new__,setUp,__post_init__ + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict,_fields,_replace,_source,_make + +# Warn about protected attribute access inside special methods +check-protected-access-in-special-methods=no [IMPORTS] +# List of modules that can be imported at any level, not just the top level +# one. +allow-any-import-level= + # Allow wildcard imports from modules that define __all__. allow-wildcard-with-all=no +# Allow explicit reexports by alias from a package __init__. +allow-reexport-from-package=no + # Analyse import fallback blocks. This can be used to support both Python 2 and # 3 compatible code, which means that the block might have code that exists # only in one or another interpreter, leading to false positives when analysed. analyse-fallback-blocks=no -# Deprecated modules which should not be used, separated by a comma. -deprecated-modules=optparse,tkinter.tix - -# Create a graph of external dependencies in the given file (report RP0402 must -# not be disabled). -ext-import-graph= +# Deprecated modules which should not be used, separated by a comma +deprecated-modules=regsub,TERMIOS,Bastion,rexec # Create a graph of every (i.e. internal and external) dependencies in the -# given file (report RP0402 must not be disabled). +# given file (report RP0402 must not be disabled) import-graph= +# Create a graph of external dependencies in the given file (report RP0402 must +# not be disabled) +ext-import-graph= + # Create a graph of internal dependencies in the given file (report RP0402 must -# not be disabled). +# not be disabled) int-import-graph= # Force import order to recognize a module as part of the standard # compatibility libraries. -known-standard-library= +known-standard-library=_string # Force import order to recognize a module as part of a third party library. known-third-party=enchant +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= -[DESIGN] -# Maximum number of arguments for function / method. -max-args=5 +[EXCEPTIONS] -# Maximum number of attributes for a class (see R0902). -max-attributes=7 +# Exceptions that will emit a warning when being caught. Defaults to +# "Exception" +overgeneral-exceptions=builtins.Exception -# Maximum number of boolean expressions in an if statement. -max-bool-expr=5 -# Maximum number of branch for function / method body. -max-branches=12 +[TYPING] -# Maximum number of locals for function / method body. -max-locals=15 +# Set to ``no`` if the app / library does **NOT** need to support runtime +# introspection of type annotations. If you use type annotations +# **exclusively** for type checking of an application, you're probably fine. +# For libraries, evaluate if some users what to access the type hints at +# runtime first, e.g., through ``typing.get_type_hints``. Applies to Python +# versions 3.7 - 3.9 +runtime-typing = no -# Maximum number of parents for a class (see R0901). -max-parents=11 -# Maximum number of public methods for a class (see R0904). -max-public-methods=20 +[DEPRECATED_BUILTINS] -# Maximum number of return / yield for function / method body. -max-returns=6 +# List of builtins function names that should not be used, separated by a comma +bad-functions=map,input -# Maximum number of statements in function / method body. -max-statements=50 -# Minimum number of public methods for a class (see R0903). -min-public-methods=2 +[REFACTORING] +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 -[CLASSES] +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit,argparse.parse_error -# List of method names used to declare (i.e. assign) instance attributes. -defining-attr-methods=__init__, - __new__, - setUp -# List of member names, which should be excluded from the protected access -# warning. -exclude-protected=_asdict, - _fields, - _replace, - _source, - _make +[STRING] -# List of valid names for the first argument in a class method. -valid-classmethod-first-arg=cls +# This flag controls whether inconsistent-quotes generates a warning when the +# character used as a quote delimiter is used inconsistently within a module. +check-quote-consistency=no -# List of valid names for the first argument in a metaclass class method. -valid-metaclass-classmethod-first-arg=cls +# This flag controls whether the implicit-str-concat should generate a warning +# on implicit string concatenation in sequences defined over several lines. +check-str-concat-over-line-jumps=no -[EXCEPTIONS] +[CODE_STYLE] -# Exceptions that will emit a warning when being caught. Defaults to -# "Exception". -overgeneral-exceptions=Exception +# Max line length for which to sill emit suggestions. Used to prevent optional +# suggestions which would get split by a code formatter (e.g., black). Will +# default to the setting for ``max-line-length``. +#max-line-length-suggestions= \ No newline at end of file diff --git a/art/defences/preprocessor/mp3_compression.py b/art/defences/preprocessor/mp3_compression.py index 7cb2139321..50e198c466 100644 --- a/art/defences/preprocessor/mp3_compression.py +++ b/art/defences/preprocessor/mp3_compression.py @@ -150,7 +150,7 @@ def wav_to_mp3(x, sample_rate): if x.dtype != object and self.channels_first: x_mp3 = np.swapaxes(x_mp3, 1, 2) - if x_orig_type != object and x.dtype == object and x.ndim == 2: + if x_orig_type is not object and x.dtype is object and x.ndim == 2: x_mp3 = x_mp3.astype(x_orig_type) return x_mp3, y diff --git a/pyproject.toml b/pyproject.toml index 23a4d6ef57..42c0f19f31 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,4 +2,4 @@ line-length=120 [tool.ruff] exclude = [".venv", "contrib"] -ignore = ["F401"] +lint.ignore = ["F401"] From 3e3543328b0940e2a4d625be10208582f9c6bedd Mon Sep 17 00:00:00 2001 From: Beat Buesser Date: Sun, 4 Aug 2024 00:33:58 +0200 Subject: [PATCH 05/27] Fix warnings introduced by upgrades Signed-off-by: Beat Buesser --- .github/workflows/ci-style-checks.yml | 7 +- .pylintrc | 10 +- art/__init__.py | 2 - art/attacks/attack.py | 69 ++++----- art/attacks/evasion/__init__.py | 2 +- art/attacks/evasion/adversarial_asr.py | 4 +- .../adversarial_patch/adversarial_patch.py | 16 +- .../adversarial_patch_numpy.py | 16 +- .../adversarial_patch_pytorch.py | 42 ++--- .../adversarial_patch_tensorflow.py | 44 +++--- .../adversarial_texture_pytorch.py | 46 +++--- art/attacks/evasion/auto_attack.py | 16 +- .../evasion/auto_conjugate_gradient.py | 18 ++- .../auto_projected_gradient_descent.py | 20 +-- art/attacks/evasion/boundary.py | 12 +- art/attacks/evasion/brendel_bethge.py | 20 +-- art/attacks/evasion/carlini.py | 16 +- .../evasion/composite_adversarial_attack.py | 40 ++--- art/attacks/evasion/decision_tree_attack.py | 12 +- art/attacks/evasion/deepfool.py | 4 +- art/attacks/evasion/dpatch.py | 30 ++-- art/attacks/evasion/dpatch_robust.py | 32 ++-- art/attacks/evasion/elastic_net.py | 6 +- art/attacks/evasion/fast_gradient.py | 42 ++--- .../feature_adversaries_numpy.py | 10 +- .../feature_adversaries_pytorch.py | 16 +- .../feature_adversaries_tensorflow.py | 16 +- art/attacks/evasion/frame_saliency.py | 6 +- .../geometric_decision_based_attack.py | 16 +- .../evasion/graphite/graphite_blackbox.py | 86 +++++------ .../graphite/graphite_whitebox_pytorch.py | 38 ++--- art/attacks/evasion/graphite/utils.py | 57 +++---- art/attacks/evasion/hclu.py | 4 +- art/attacks/evasion/hop_skip_jump.py | 24 +-- .../imperceptible_asr/imperceptible_asr.py | 40 ++--- .../imperceptible_asr_pytorch.py | 32 ++-- art/attacks/evasion/iterative_method.py | 8 +- .../evasion/laser_attack/algorithms.py | 6 +- .../evasion/laser_attack/laser_attack.py | 32 ++-- art/attacks/evasion/laser_attack/utils.py | 9 +- art/attacks/evasion/lowprofool.py | 23 +-- .../evasion/momentum_iterative_method.py | 10 +- art/attacks/evasion/newtonfool.py | 4 +- .../over_the_air_flickering_pytorch.py | 11 +- art/attacks/evasion/overload/box_iou.py | 4 +- art/attacks/evasion/overload/overload.py | 10 +- art/attacks/evasion/pe_malware_attack.py | 89 +++++------ art/attacks/evasion/pixel_threshold.py | 42 ++--- .../projected_gradient_descent.py | 24 +-- .../projected_gradient_descent_numpy.py | 40 ++--- .../projected_gradient_descent_pytorch.py | 44 +++--- ...rojected_gradient_descent_tensorflow_v2.py | 48 +++--- art/attacks/evasion/saliency_map.py | 6 +- art/attacks/evasion/shadow_attack.py | 14 +- art/attacks/evasion/shapeshifter.py | 41 ++--- art/attacks/evasion/sign_opt.py | 27 ++-- art/attacks/evasion/simba.py | 8 +- art/attacks/evasion/spatial_transformation.py | 14 +- art/attacks/evasion/square_attack.py | 13 +- .../targeted_universal_perturbation.py | 12 +- art/attacks/evasion/universal_perturbation.py | 24 +-- art/attacks/evasion/virtual_adversarial.py | 4 +- art/attacks/evasion/wasserstein.py | 6 +- art/attacks/evasion/zoo.py | 24 +-- art/attacks/extraction/copycat_cnn.py | 4 +- .../functionally_equivalent_extraction.py | 34 ++-- art/attacks/extraction/knockoff_nets.py | 6 +- .../inference/attribute_inference/baseline.py | 20 +-- .../attribute_inference/black_box.py | 36 ++--- .../attribute_inference/meminf_based.py | 10 +- .../true_label_baseline.py | 24 +-- .../white_box_decision_tree.py | 13 +- .../white_box_lifestyle_decision_tree.py | 17 +- .../membership_inference/black_box.py | 34 ++-- .../black_box_rule_based.py | 4 +- .../label_only_boundary_distance.py | 8 +- .../membership_inference/shadow_models.py | 25 +-- .../inference/model_inversion/mi_face.py | 4 +- .../inference/reconstruction/white_box.py | 5 +- .../poisoning/adversarial_embedding_attack.py | 32 ++-- art/attacks/poisoning/backdoor_attack.py | 12 +- .../backdoor_attack_dgm_red.py | 1 - art/attacks/poisoning/bad_det/bad_det_gma.py | 16 +- art/attacks/poisoning/bad_det/bad_det_oda.py | 16 +- art/attacks/poisoning/bad_det/bad_det_oga.py | 16 +- art/attacks/poisoning/bad_det/bad_det_rma.py | 18 +-- .../poisoning/bullseye_polytope_attack.py | 24 +-- .../poisoning/clean_label_backdoor_attack.py | 12 +- .../poisoning/feature_collision_attack.py | 14 +- .../poisoning/gradient_matching_attack.py | 53 ++++--- .../hidden_trigger_backdoor.py | 12 +- .../hidden_trigger_backdoor_keras.py | 45 +++--- .../hidden_trigger_backdoor_pytorch.py | 14 +- .../perturbations/audio_perturbations.py | 4 +- .../perturbations/image_perturbations.py | 6 +- art/attacks/poisoning/poisoning_attack_svm.py | 11 +- art/attacks/poisoning/sleeper_agent_attack.py | 37 +++-- art/config.py | 6 +- art/data_generators.py | 32 ++-- .../evasion/binary_activation_detector.py | 12 +- .../detector/evasion/binary_input_detector.py | 4 +- .../detector/evasion/evasion_detector.py | 8 +- .../evasion/subsetscanning/detector.py | 18 +-- .../evasion/subsetscanning/scanner.py | 6 +- .../evasion/subsetscanning/scanningops.py | 6 +- .../detector/poison/activation_defence.py | 57 ++++--- .../detector/poison/clustering_analyzer.py | 44 +++--- .../detector/poison/ground_truth_evaluator.py | 6 +- .../poison/poison_filtering_defence.py | 6 +- .../detector/poison/provenance_defense.py | 22 +-- art/defences/detector/poison/roni.py | 13 +- .../poison/spectral_signature_defense.py | 6 +- art/defences/postprocessor/postprocessor.py | 3 +- art/defences/preprocessor/cutmix/cutmix.py | 4 +- .../preprocessor/cutmix/cutmix_pytorch.py | 8 +- .../preprocessor/cutmix/cutmix_tensorflow.py | 12 +- art/defences/preprocessor/cutout/cutout.py | 4 +- .../preprocessor/cutout/cutout_pytorch.py | 8 +- .../preprocessor/cutout/cutout_tensorflow.py | 8 +- .../preprocessor/feature_squeezing.py | 4 +- .../preprocessor/gaussian_augmentation.py | 8 +- art/defences/preprocessor/inverse_gan.py | 12 +- art/defences/preprocessor/jpeg_compression.py | 4 +- art/defences/preprocessor/label_smoothing.py | 4 +- art/defences/preprocessor/mixup/mixup.py | 4 +- .../preprocessor/mixup/mixup_pytorch.py | 8 +- .../preprocessor/mixup/mixup_tensorflow.py | 8 +- art/defences/preprocessor/mp3_compression.py | 4 +- .../preprocessor/mp3_compression_pytorch.py | 14 +- art/defences/preprocessor/pixel_defend.py | 8 +- art/defences/preprocessor/preprocessor.py | 30 ++-- art/defences/preprocessor/resample.py | 4 +- .../preprocessor/spatial_smoothing.py | 8 +- .../preprocessor/spatial_smoothing_pytorch.py | 16 +- .../spatial_smoothing_tensorflow.py | 10 +- .../preprocessor/thermometer_encoding.py | 4 +- .../preprocessor/variance_minimization.py | 8 +- .../preprocessor/video_compression.py | 4 +- .../preprocessor/video_compression_pytorch.py | 14 +- art/defences/trainer/adversarial_trainer.py | 16 +- .../trainer/adversarial_trainer_awp.py | 16 +- .../adversarial_trainer_awp_pytorch.py | 58 +++---- .../trainer/adversarial_trainer_fbf.py | 12 +- .../adversarial_trainer_fbf_pytorch.py | 40 +++-- .../trainer/adversarial_trainer_madry_pgd.py | 22 +-- .../trainer/adversarial_trainer_oaat.py | 19 +-- .../adversarial_trainer_oaat_pytorch.py | 134 +++++++--------- .../trainer/adversarial_trainer_trades.py | 10 +- .../adversarial_trainer_trades_pytorch.py | 69 ++++----- .../certified_adversarial_trainer_pytorch.py | 30 ++-- art/defences/trainer/dp_instahide_trainer.py | 16 +- .../trainer/ibp_certified_trainer_pytorch.py | 38 ++--- .../evasion/defensive_distillation.py | 4 +- .../transformer/poisoning/neural_cleanse.py | 8 +- art/defences/transformer/poisoning/strip.py | 4 +- art/defences/transformer/transformer.py | 6 +- art/estimators/certification/abstain.py | 7 - art/estimators/certification/deep_z/deep_z.py | 16 +- .../certification/deep_z/pytorch.py | 39 ++--- .../derandomized_smoothing/ablators/ablate.py | 18 +-- .../ablators/pytorch.py | 34 ++-- .../ablators/tensorflow.py | 48 +++--- .../derandomized_smoothing/pytorch.py | 50 +++--- .../derandomized_smoothing/tensorflow.py | 27 ++-- .../vision_transformers/pytorch.py | 8 +- .../certification/interval/interval.py | 31 ++-- .../certification/interval/pytorch.py | 37 ++--- .../object_seeker/object_seeker.py | 19 ++- .../certification/object_seeker/pytorch.py | 68 ++++---- .../randomized_smoothing/macer/pytorch.py | 20 +-- .../randomized_smoothing/macer/tensorflow.py | 25 +-- .../randomized_smoothing/numpy.py | 22 +-- .../randomized_smoothing/pytorch.py | 24 +-- .../randomized_smoothing.py | 10 +- .../smooth_adv/pytorch.py | 20 +-- .../smooth_adv/tensorflow.py | 21 +-- .../smooth_mix/pytorch.py | 32 ++-- .../randomized_smoothing/tensorflow.py | 27 ++-- art/estimators/classification/GPy.py | 25 ++- art/estimators/classification/blackbox.py | 38 +++-- art/estimators/classification/catboost.py | 22 +-- art/estimators/classification/classifier.py | 16 +- .../classification/deep_partition_ensemble.py | 27 ++-- .../classification/detector_classifier.py | 26 ++-- art/estimators/classification/ensemble.py | 34 ++-- art/estimators/classification/hugging_face.py | 40 ++--- art/estimators/classification/keras.py | 84 +++++----- art/estimators/classification/lightgbm.py | 25 ++- art/estimators/classification/mxnet.py | 42 +++-- art/estimators/classification/pytorch.py | 128 +++++++-------- .../classification/query_efficient_bb.py | 19 ++- art/estimators/classification/scikitlearn.py | 140 ++++++++--------- art/estimators/classification/tensorflow.py | 146 ++++++++---------- art/estimators/classification/xgboost.py | 30 ++-- art/estimators/encoding/tensorflow.py | 26 ++-- art/estimators/estimator.py | 44 +++--- art/estimators/gan/tensorflow.py | 10 +- art/estimators/generation/tensorflow.py | 44 +++--- art/estimators/keras.py | 10 +- art/estimators/mxnet.py | 6 - art/estimators/object_detection/detr.py | 12 +- .../pytorch_detection_transformer.py | 32 ++-- .../object_detection/pytorch_faster_rcnn.py | 24 +-- .../pytorch_object_detector.py | 100 ++++++------ .../object_detection/pytorch_yolo.py | 28 ++-- .../tensorflow_faster_rcnn.py | 78 +++++----- .../tensorflow_v2_faster_rcnn.py | 72 ++++----- art/estimators/object_detection/utils.py | 30 ++-- .../object_tracking/pytorch_goturn.py | 68 ++++---- .../poison_mitigation/neural_cleanse/keras.py | 24 +-- .../neural_cleanse/neural_cleanse.py | 18 +-- .../poison_mitigation/strip/strip.py | 10 +- art/estimators/pytorch.py | 11 +- art/estimators/regression/blackbox.py | 26 ++-- art/estimators/regression/keras.py | 73 ++++----- art/estimators/regression/pytorch.py | 120 +++++++------- art/estimators/regression/scikitlearn.py | 28 ++-- art/estimators/scikitlearn.py | 8 +- .../speech_recognition/pytorch_deep_speech.py | 52 +++---- .../speech_recognition/pytorch_espresso.py | 36 ++--- .../speech_recognition/speech_recognizer.py | 4 +- .../speech_recognition/tensorflow_lingvo.py | 44 +++--- art/estimators/tensorflow.py | 4 +- .../security_curve/security_curve.py | 21 +-- art/exceptions.py | 6 +- .../estimators/classification/jax.py | 31 ++-- art/experimental/estimators/jax.py | 3 - art/metrics/metrics.py | 30 ++-- art/metrics/privacy/membership_leakage.py | 16 +- art/metrics/privacy/worst_case_mia_score.py | 14 +- art/metrics/verification_decisions_trees.py | 40 ++--- art/preprocessing/audio/l_filter/numpy.py | 10 +- art/preprocessing/audio/l_filter/pytorch.py | 14 +- .../image_center_crop/pytorch.py | 14 +- .../image_rotation/pytorch.py | 20 +-- .../image_rotation/tensorflow.py | 17 +- .../natural_corruptions/brightness/pytorch.py | 14 +- .../brightness/tensorflow.py | 14 +- .../natural_corruptions/contrast/pytorch.py | 14 +- .../contrast/tensorflow.py | 14 +- .../gaussian_noise/pytorch.py | 14 +- .../gaussian_noise/tensorflow.py | 14 +- .../natural_corruptions/shot_noise/pytorch.py | 14 +- .../shot_noise/tensorflow.py | 16 +- .../natural_corruptions/zoom_blur/pytorch.py | 14 +- .../zoom_blur/tensorflow.py | 14 +- .../pytorch.py | 20 +-- .../tensorflow.py | 14 +- art/preprocessing/image/image_resize/numpy.py | 14 +- .../image/image_resize/pytorch.py | 16 +- .../image/image_resize/tensorflow.py | 14 +- .../image/image_square_pad/numpy.py | 16 +- .../image/image_square_pad/pytorch.py | 18 ++- .../image/image_square_pad/tensorflow.py | 16 +- art/preprocessing/preprocessing.py | 2 +- .../standardisation_mean_std/numpy.py | 16 +- .../standardisation_mean_std/pytorch.py | 16 +- .../standardisation_mean_std/tensorflow.py | 16 +- .../standardisation_mean_std/utils.py | 6 +- art/summary_writer.py | 22 +-- art/utils.py | 131 ++++++++-------- art/visualization.py | 8 +- examples/adversarial_training_cifar10.py | 1 + examples/inverse_gan_author_utils.py | 2 +- pyproject.toml | 2 + ...test_functionally_equivalent_extraction.py | 17 +- 266 files changed, 3073 insertions(+), 3185 deletions(-) diff --git a/.github/workflows/ci-style-checks.yml b/.github/workflows/ci-style-checks.yml index 8119eaa915..d07db45cc5 100644 --- a/.github/workflows/ci-style-checks.yml +++ b/.github/workflows/ci-style-checks.yml @@ -49,16 +49,13 @@ jobs: run: pycodestyle --ignore=C0330,C0415,E203,E231,W503 --max-line-length=120 art - name: pylint if: ${{ always() }} - run: pylint --disable=C0330,C0415,E203,E1136,E0401,E1102 -rn art + run: pylint --fail-under=9.67 art/ - name: mypy if: ${{ always() }} run: mypy art - name: ruff if: ${{ always() }} - run: | - ruff check art/ - ruff check tests/ - ruff check examples/ + run: ruff check art/ tests/ examples/ - name: black if: ${{ always() }} run: | diff --git a/.pylintrc b/.pylintrc index c7504055b6..19e4d064ac 100644 --- a/.pylintrc +++ b/.pylintrc @@ -109,7 +109,15 @@ disable= # We anticipate #3512 where it will become optional fixme, consider-using-assignment-expr, - + # ART + duplicate-code, + no-member, + import-outside-toplevel, + import-error, + not-callable, + too-many-arguments, + too-many-locals, + unsupported-binary-operation, [REPORTS] diff --git a/art/__init__.py b/art/__init__.py index 497fff6eff..5671acfcbd 100644 --- a/art/__init__.py +++ b/art/__init__.py @@ -15,8 +15,6 @@ # Semantic Version __version__ = "1.18.1" -# pylint: disable=C0103 - LOGGING = { "version": 1, "disable_existing_loggers": False, diff --git a/art/attacks/attack.py b/art/attacks/attack.py index 4a6d5167a7..595b5f4941 100644 --- a/art/attacks/attack.py +++ b/art/attacks/attack.py @@ -18,11 +18,11 @@ """ This module implements the abstract base classes for all attacks. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import abc import logging -from typing import Any, Dict, List, Optional, Tuple, Union, TYPE_CHECKING +from typing import Any, TYPE_CHECKING import numpy as np @@ -38,17 +38,18 @@ class InputFilter(abc.ABCMeta): # pragma: no cover """ - Metaclass to ensure that inputs are ndarray for all of the subclass generate and extract calls + Metaclass to ensure that inputs are ndarray for all the subclass generate and extract calls """ - # pylint: disable=W0613 - def __init__(cls, name, bases, clsdict): # pylint: disable=W0231 + def __init__(cls, name, bases, clsdict): """ This function overrides any existing generate or extract methods with a new method that ensures the input is an `np.ndarray`. There is an assumption that the input object has implemented __array__ with np.array calls. """ + super().__init__() + def make_replacement(fdict, func_name): """ This function overrides creates replacement functions dynamically @@ -94,17 +95,17 @@ class Attack(abc.ABC): Abstract base class for all attack abstract base classes. """ - attack_params: List[str] = [] + attack_params: list[str] = [] # The _estimator_requirements define the requirements an estimator must satisfy to be used as a target for an # attack. They should be a tuple of requirements, where each requirement is either a class the estimator must # inherit from, or a tuple of classes which define a union, i.e. the estimator must inherit from at least one class # in the requirement tuple. - _estimator_requirements: Optional[Union[Tuple[Any, ...], Tuple[()]]] = None + _estimator_requirements: tuple[Any, ...] | tuple[()] | None = None def __init__( self, estimator, - summary_writer: Union[str, bool, SummaryWriter] = False, + summary_writer: str | bool | SummaryWriter = False, ): """ :param estimator: An estimator. @@ -126,7 +127,7 @@ def __init__( self._estimator = estimator self._summary_writer_arg = summary_writer - self._summary_writer: Optional[SummaryWriter] = None + self._summary_writer: SummaryWriter | None = None if isinstance(summary_writer, SummaryWriter): # pragma: no cover self._summary_writer = summary_writer @@ -212,13 +213,13 @@ def __init__(self, **kwargs) -> None: super().__init__(**kwargs) @abc.abstractmethod - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial examples and return them as an array. This method should be overridden by all concrete evasion attack implementations. :param x: An array with the original inputs to be attacked. - :param y: Correct labels or target labels for `x`, depending if the attack is targeted + :param y: Correct labels or target labels for `x`, depending on if the attack is targeted or not. This parameter is only used by some of the attacks. :return: An array holding the adversarial examples. """ @@ -241,14 +242,14 @@ class PoisoningAttack(Attack): Abstract base class for poisoning attack classes """ - def __init__(self, classifier: Optional["CLASSIFIER_TYPE"]) -> None: + def __init__(self, classifier: "CLASSIFIER_TYPE" | None) -> None: """ :param classifier: A trained classifier (or none if no classifier is needed) """ super().__init__(classifier) @abc.abstractmethod - def poison(self, x: np.ndarray, y=Optional[np.ndarray], **kwargs) -> Tuple[np.ndarray, np.ndarray]: + def poison(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> tuple[np.ndarray, np.ndarray]: """ Generate poisoning examples and return them as an array. This method should be overridden by all concrete poisoning attack implementations. @@ -310,14 +311,8 @@ class PoisoningAttackTransformer(PoisoningAttack): These attacks have an additional method, `poison_estimator`, that returns the poisoned classifier. """ - def __init__(self, classifier: Optional["CLASSIFIER_TYPE"]) -> None: - """ - :param classifier: A trained classifier (or none if no classifier is needed) - """ - super().__init__(classifier) - @abc.abstractmethod - def poison(self, x: np.ndarray, y=Optional[np.ndarray], **kwargs) -> Tuple[np.ndarray, np.ndarray]: + def poison(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> tuple[np.ndarray, np.ndarray]: """ Generate poisoning examples and return them as an array. This method should be overridden by all concrete poisoning attack implementations. @@ -354,16 +349,16 @@ def __init__(self): @abc.abstractmethod def poison( self, - x: Union[np.ndarray, List[np.ndarray]], - y: List[Dict[str, np.ndarray]], + x: np.ndarray | list[np.ndarray], + y: list[dict[str, np.ndarray]], **kwargs, - ) -> Tuple[Union[np.ndarray, List[np.ndarray]], List[Dict[str, np.ndarray]]]: + ) -> tuple[np.ndarray | list[np.ndarray], list[dict[str, np.ndarray]]]: """ Generate poisoning examples and return them as an array. This method should be overridden by all concrete poisoning attack implementations. :param x: An array with the original inputs to be attacked. - :param y: True labels of type `List[Dict[np.ndarray]]`, one dictionary per input image. + :param y: True labels of type `list[dict[np.ndarray]]`, one dictionary per input image. The keys and values of the dictionary are: - boxes [N, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. - labels [N]: the labels for each image @@ -385,7 +380,7 @@ def __init__(self): super().__init__(None) # type: ignore @abc.abstractmethod - def poison(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> Tuple[np.ndarray, np.ndarray]: + def poison(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> tuple[np.ndarray, np.ndarray]: """ Generate poisoning examples and return them as an array. This method should be overridden by all concrete poisoning attack implementations. @@ -403,7 +398,7 @@ class PoisoningAttackWhiteBox(PoisoningAttack): """ @abc.abstractmethod - def poison(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> Tuple[np.ndarray, np.ndarray]: + def poison(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> tuple[np.ndarray, np.ndarray]: """ Generate poisoning examples and return them as an array. This method should be overridden by all concrete poisoning attack implementations. @@ -422,7 +417,7 @@ class ExtractionAttack(Attack): """ @abc.abstractmethod - def extract(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> "CLASSIFIER_TYPE": + def extract(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> "CLASSIFIER_TYPE": """ Extract models and return them as an ART classifier. This method should be overridden by all concrete extraction attack implementations. @@ -448,7 +443,7 @@ def __init__(self, estimator): super().__init__(estimator) @abc.abstractmethod - def infer(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def infer(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Infer sensitive attributes from the targeted estimator. This method should be overridden by all concrete inference attack implementations. @@ -467,7 +462,7 @@ class AttributeInferenceAttack(InferenceAttack): attack_params = InferenceAttack.attack_params + ["attack_feature"] - def __init__(self, estimator, attack_feature: Union[int, slice] = 0): + def __init__(self, estimator, attack_feature: int | slice = 0): """ :param estimator: A trained estimator targeted for inference attack. :type estimator: :class:`.art.estimators.estimator.BaseEstimator` @@ -478,7 +473,7 @@ def __init__(self, estimator, attack_feature: Union[int, slice] = 0): self.attack_feature = get_feature_index(attack_feature) @abc.abstractmethod - def infer(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def infer(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Infer sensitive attributes from the targeted estimator. This method should be overridden by all concrete inference attack implementations. @@ -490,7 +485,7 @@ def infer(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.n raise NotImplementedError @staticmethod - def _check_attack_feature(attack_feature: Union[int, slice]) -> None: + def _check_attack_feature(attack_feature: int | slice) -> None: if not isinstance(attack_feature, int) and not isinstance(attack_feature, slice): raise ValueError("Attack feature must be either an integer or a slice object.") @@ -506,16 +501,8 @@ class MembershipInferenceAttack(InferenceAttack): Abstract base class for membership inference attack classes. """ - def __init__(self, estimator): - """ - :param estimator: A trained estimator targeted for inference attack. - :type estimator: :class:`.art.estimators.estimator.BaseEstimator` - :param attack_feature: The index of the feature to be attacked. - """ - super().__init__(estimator) - @abc.abstractmethod - def infer(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def infer(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Infer membership status of samples from the target estimator. This method should be overridden by all concrete inference attack implementations. @@ -552,7 +539,7 @@ def __init__(self, estimator): super().__init__(estimator) @abc.abstractmethod - def reconstruct(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> Tuple[np.ndarray, np.ndarray]: + def reconstruct(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> tuple[np.ndarray, np.ndarray]: """ Reconstruct the training dataset of and from the targeted estimator. This method should be overridden by all concrete inference attack implementations. diff --git a/art/attacks/evasion/__init__.py b/art/attacks/evasion/__init__.py index 63e62c3e56..dbee974ab0 100644 --- a/art/attacks/evasion/__init__.py +++ b/art/attacks/evasion/__init__.py @@ -2,7 +2,7 @@ Module providing evasion attacks under a common interface. """ -# pylint: disable=C0413 +# pylint: disable=wrong-import-position import importlib from art.attacks.evasion.adversarial_patch.adversarial_patch import AdversarialPatch diff --git a/art/attacks/evasion/adversarial_asr.py b/art/attacks/evasion/adversarial_asr.py index bf81eb7f5f..71a5408250 100644 --- a/art/attacks/evasion/adversarial_asr.py +++ b/art/attacks/evasion/adversarial_asr.py @@ -72,10 +72,10 @@ def __init__( :param num_iter_decrease_eps: Iterations after which to decrease epsilon if attack succeeds (Paper default: 10). :param batch_size: Batch size. """ - # pylint: disable=W0231 + # pylint: disable=super-init-not-called # re-implement init such that inherited methods work - EvasionAttack.__init__(self, estimator=estimator) # pylint: disable=W0233 + EvasionAttack.__init__(self, estimator=estimator) # pylint: disable=non-parent-init-called self.masker = None # type: ignore self.eps = eps self.learning_rate_1 = learning_rate diff --git a/art/attacks/evasion/adversarial_patch/adversarial_patch.py b/art/attacks/evasion/adversarial_patch/adversarial_patch.py index 910eddb972..3e0cb0a9ce 100644 --- a/art/attacks/evasion/adversarial_patch/adversarial_patch.py +++ b/art/attacks/evasion/adversarial_patch/adversarial_patch.py @@ -21,10 +21,10 @@ | Paper link: https://arxiv.org/abs/1712.09665 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Optional, Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -71,7 +71,7 @@ def __init__( learning_rate: float = 5.0, max_iter: int = 500, batch_size: int = 16, - patch_shape: Optional[Tuple[int, int, int]] = None, + patch_shape: tuple[int | int | int] | None = None, targeted: bool = True, verbose: bool = True, ): @@ -98,7 +98,7 @@ def __init__( if self.estimator.clip_values is None: # pragma: no cover raise ValueError("Adversarial Patch attack requires a classifier with clip_values.") - self._attack: Union[AdversarialPatchTensorFlowV2, AdversarialPatchPyTorch, AdversarialPatchNumpy] + self._attack: AdversarialPatchTensorFlowV2 | AdversarialPatchPyTorch | AdversarialPatchNumpy if isinstance(self.estimator, TensorFlowV2Classifier): self._attack = AdversarialPatchTensorFlowV2( classifier=classifier, @@ -145,8 +145,8 @@ def __init__( self._check_params() def generate( # type: ignore - self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs - ) -> Tuple[np.ndarray, np.ndarray]: + self, x: np.ndarray, y: np.ndarray | None = None, **kwargs + ) -> tuple[np.ndarray, np.ndarray]: """ Generate an adversarial patch and return the patch and its mask in arrays. @@ -173,7 +173,7 @@ def generate( # type: ignore return self._attack.generate(x=x, y=y, **kwargs) def apply_patch( - self, x: np.ndarray, scale: float, patch_external: Optional[np.ndarray] = None, **kwargs + self, x: np.ndarray, scale: float, patch_external: np.ndarray | None = None, **kwargs ) -> np.ndarray: """ A function to apply the learned adversarial patch to images or videos. @@ -185,7 +185,7 @@ def apply_patch( """ return self._attack.apply_patch(x, scale, patch_external=patch_external, **kwargs) - def reset_patch(self, initial_patch_value: Optional[Union[float, np.ndarray]]) -> None: + def reset_patch(self, initial_patch_value: float | np.ndarray | None) -> None: """ Reset the adversarial patch. diff --git a/art/attacks/evasion/adversarial_patch/adversarial_patch_numpy.py b/art/attacks/evasion/adversarial_patch/adversarial_patch_numpy.py index 0346df9db4..be36d15503 100644 --- a/art/attacks/evasion/adversarial_patch/adversarial_patch_numpy.py +++ b/art/attacks/evasion/adversarial_patch/adversarial_patch_numpy.py @@ -21,11 +21,11 @@ | Paper link: https://arxiv.org/abs/1712.09665 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging import math -from typing import Optional, Union, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import random import numpy as np @@ -72,7 +72,7 @@ def __init__( scale_max: float = 1.0, learning_rate: float = 5.0, max_iter: int = 500, - clip_patch: Union[list, tuple, None] = None, + clip_patch: list | tuple | None = None, batch_size: int = 16, targeted: bool = True, verbose: bool = True, @@ -153,8 +153,8 @@ def __init__( self.reset_patch(self.mean_value) def generate( # type: ignore - self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs - ) -> Tuple[np.ndarray, np.ndarray]: + self, x: np.ndarray, y: np.ndarray | None = None, **kwargs + ) -> tuple[np.ndarray, np.ndarray]: """ Generate an adversarial patch and return the patch and its mask in arrays. @@ -254,8 +254,8 @@ def apply_patch( self, x: np.ndarray, scale: float, - patch_external: Optional[np.ndarray] = None, - mask: Optional[np.ndarray] = None, + patch_external: np.ndarray | None = None, + mask: np.ndarray | None = None, ) -> np.ndarray: """ A function to apply the learned adversarial patch to images or videos. @@ -606,7 +606,7 @@ def _reverse_transformation(self, gradients: np.ndarray, patch_mask_transformed, return gradients - def reset_patch(self, initial_patch_value: Optional[Union[float, np.ndarray]]) -> None: + def reset_patch(self, initial_patch_value: float | np.ndarray | None) -> None: """ Reset the adversarial patch. diff --git a/art/attacks/evasion/adversarial_patch/adversarial_patch_pytorch.py b/art/attacks/evasion/adversarial_patch/adversarial_patch_pytorch.py index c9aa9bc1b5..0004b324a3 100644 --- a/art/attacks/evasion/adversarial_patch/adversarial_patch_pytorch.py +++ b/art/attacks/evasion/adversarial_patch/adversarial_patch_pytorch.py @@ -21,11 +21,11 @@ | Paper link: https://arxiv.org/abs/1712.09665 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging import math -from typing import Any, Optional, Tuple, Union, TYPE_CHECKING +from typing import Any, TYPE_CHECKING import numpy as np from tqdm.auto import trange @@ -37,7 +37,7 @@ from art.summary_writer import SummaryWriter if TYPE_CHECKING: - # pylint: disable=C0412 + import torch from art.utils import CLASSIFIER_NEURALNETWORK_TYPE @@ -79,12 +79,12 @@ def __init__( learning_rate: float = 5.0, max_iter: int = 500, batch_size: int = 16, - patch_shape: Tuple[int, int, int] = (3, 224, 224), - patch_location: Optional[Tuple[int, int]] = None, + patch_shape: tuple[int, int, int] = (3, 224, 224), + patch_location: tuple[int, int] | None = None, patch_type: str = "circle", optimizer: str = "Adam", targeted: bool = True, - summary_writer: Union[str, bool, SummaryWriter] = False, + summary_writer: str | bool | SummaryWriter = False, verbose: bool = True, ): """ @@ -182,7 +182,7 @@ def __init__( self._optimizer = torch.optim.Adam([self._patch], lr=self.learning_rate) def _train_step( - self, images: "torch.Tensor", target: "torch.Tensor", mask: Optional["torch.Tensor"] = None + self, images: "torch.Tensor", target: "torch.Tensor", mask: "torch.Tensor" | None = None ) -> "torch.Tensor": import torch @@ -211,8 +211,8 @@ def _train_step( return loss def _predictions( - self, images: "torch.Tensor", mask: Optional["torch.Tensor"], target: "torch.Tensor" - ) -> Tuple["torch.Tensor", "torch.Tensor"]: + self, images: "torch.Tensor", mask: "torch.Tensor" | None, target: "torch.Tensor" + ) -> tuple["torch.Tensor", "torch.Tensor"]: import torch patched_input = self._random_overlay(images, self._patch, mask=mask) @@ -222,11 +222,11 @@ def _predictions( max=self.estimator.clip_values[1], ) - predictions, target = self.estimator._predict_framework(patched_input, target) # pylint: disable=W0212 + predictions, target = self.estimator._predict_framework(patched_input, target) return predictions, target - def _loss(self, images: "torch.Tensor", target: "torch.Tensor", mask: Optional["torch.Tensor"]) -> "torch.Tensor": + def _loss(self, images: "torch.Tensor", target: "torch.Tensor", mask: "torch.Tensor" | None) -> "torch.Tensor": import torch if isinstance(target, torch.Tensor): @@ -270,7 +270,7 @@ def _get_circular_patch_mask(self, nb_samples: int, sharpness: int = 40) -> "tor y = np.linspace(-1, 1, diameter) x_grid, y_grid = np.meshgrid(x, y, sparse=True) z_grid = (x_grid**2 + y_grid**2) ** sharpness - image_mask: Union[int, np.ndarray[Any, np.dtype[Any]]] = 1 - np.clip(z_grid, -1, 1) + image_mask: int | np.ndarray[Any, np.dtype[Any]] = 1 - np.clip(z_grid, -1, 1) elif self.patch_type == "square": image_mask = np.ones((diameter, diameter)) @@ -284,8 +284,8 @@ def _random_overlay( self, images: "torch.Tensor", patch: "torch.Tensor", - scale: Optional[float] = None, - mask: Optional["torch.Tensor"] = None, + scale: float | None = None, + mask: "torch.Tensor" | None = None, ) -> "torch.Tensor": import torch import torchvision @@ -474,8 +474,8 @@ def _random_overlay( return patched_images def generate( # type: ignore - self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs - ) -> Tuple[np.ndarray, np.ndarray]: + self, x: np.ndarray, y: np.ndarray | None = None, **kwargs + ) -> tuple[np.ndarray, np.ndarray]: """ Generate an adversarial patch and return the patch and its mask in arrays. @@ -582,7 +582,7 @@ def __getitem__(self, idx): return img, target, mask_i - dataset_object_detection: Union[ObjectDetectionDataset, ObjectDetectionDatasetMask] + dataset_object_detection: ObjectDetectionDataset | ObjectDetectionDatasetMask if mask is None: dataset_object_detection = ObjectDetectionDataset(x, y) else: @@ -662,7 +662,7 @@ def __getitem__(self, idx): self._get_circular_patch_mask(nb_samples=1).cpu().numpy()[0], ) - def _check_mask(self, mask: Optional[np.ndarray], x: np.ndarray) -> Optional[np.ndarray]: + def _check_mask(self, mask: np.ndarray | None, x: np.ndarray) -> np.ndarray | None: if mask is not None and ( # pragma: no cover (mask.dtype != bool) or not (mask.shape[0] == 1 or mask.shape[0] == x.shape[0]) @@ -682,8 +682,8 @@ def apply_patch( self, x: np.ndarray, scale: float, - patch_external: Optional[np.ndarray] = None, - mask: Optional[np.ndarray] = None, + patch_external: np.ndarray | None = None, + mask: np.ndarray | None = None, ) -> np.ndarray: """ A function to apply the learned adversarial patch to images or videos. @@ -717,7 +717,7 @@ def apply_patch( .numpy() ) - def reset_patch(self, initial_patch_value: Optional[Union[float, np.ndarray]] = None) -> None: + def reset_patch(self, initial_patch_value: float | np.ndarray | None = None) -> None: """ Reset the adversarial patch. diff --git a/art/attacks/evasion/adversarial_patch/adversarial_patch_tensorflow.py b/art/attacks/evasion/adversarial_patch/adversarial_patch_tensorflow.py index 3a613d46ef..80a773c0d2 100644 --- a/art/attacks/evasion/adversarial_patch/adversarial_patch_tensorflow.py +++ b/art/attacks/evasion/adversarial_patch/adversarial_patch_tensorflow.py @@ -21,11 +21,11 @@ | Paper link: https://arxiv.org/abs/1712.09665 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging import math -from typing import Optional, Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import trange @@ -38,7 +38,7 @@ from art.summary_writer import SummaryWriter if TYPE_CHECKING: - # pylint: disable=C0412 + import tensorflow as tf from art.utils import CLASSIFIER_NEURALNETWORK_TYPE @@ -78,10 +78,10 @@ def __init__( learning_rate: float = 5.0, max_iter: int = 500, batch_size: int = 16, - patch_shape: Optional[Tuple[int, int, int]] = None, + patch_shape: tuple[int | int | int] | None = None, optimizer: str = "Adam", targeted: bool = True, - summary_writer: Union[str, bool, SummaryWriter] = False, + summary_writer: str | bool | SummaryWriter = False, verbose: bool = True, ): """ @@ -132,7 +132,7 @@ def __init__( if self.estimator.channels_first: # pragma: no cover raise ValueError("Color channel needs to be in last dimension.") - self.use_logits: Optional[bool] = None + self.use_logits: bool | None = None self.i_h_patch = 0 self.i_w_patch = 1 @@ -174,7 +174,7 @@ def __init__( ) def _train_step( - self, images: "tf.Tensor", target: Optional["tf.Tensor"] = None, mask: Optional["tf.Tensor"] = None + self, images: "tf.Tensor", target: "tf.Tensor" | None = None, mask: "tf.Tensor" | None = None ) -> "tf.Tensor": import tensorflow as tf @@ -201,7 +201,7 @@ def _train_step( return loss - def _predictions(self, images: "tf.Tensor", mask: Optional["tf.Tensor"]) -> "tf.Tensor": + def _predictions(self, images: "tf.Tensor", mask: "tf.Tensor" | None) -> "tf.Tensor": import tensorflow as tf patched_input = self._random_overlay(images, self._patch, mask=mask) @@ -212,11 +212,11 @@ def _predictions(self, images: "tf.Tensor", mask: Optional["tf.Tensor"]) -> "tf. clip_value_max=self.estimator.clip_values[1], ) - predictions = self.estimator._predict_framework(patched_input) # pylint: disable=W0212 + predictions = self.estimator._predict_framework(patched_input) return predictions - def _loss(self, images: "tf.Tensor", target: "tf.Tensor", mask: Optional["tf.Tensor"]) -> "tf.Tensor": + def _loss(self, images: "tf.Tensor", target: "tf.Tensor", mask: "tf.Tensor" | None) -> "tf.Tensor": import tensorflow as tf predictions = self._predictions(images, mask) @@ -250,10 +250,10 @@ def _get_circular_patch_mask(self, nb_samples: int, sharpness: int = 40) -> "tf. def _random_overlay( self, - images: Union[np.ndarray, "tf.Tensor"], - patch: Union[np.ndarray, "tf.Variable"], - scale: Optional[float] = None, - mask: Optional[Union[np.ndarray, "tf.Tensor"]] = None, + images: np.ndarray | "tf.Tensor", + patch: np.ndarray | "tf.Variable", + scale: float | None = None, + mask: np.ndarray | "tf.Tensor" | None = None, ) -> "tf.Tensor": import tensorflow as tf import tensorflow_addons as tfa @@ -280,7 +280,7 @@ def _random_overlay( pad_w_before = int((self.image_shape[self.i_w] - image_mask.shape.as_list()[self.i_w_patch + 1]) / 2) pad_w_after = int(self.image_shape[self.i_w] - pad_w_before - image_mask.shape[self.i_w_patch + 1]) - image_mask = tf.pad( # pylint: disable=E1123 + image_mask = tf.pad( image_mask, paddings=tf.constant([[0, 0], [pad_h_before, pad_h_after], [pad_w_before, pad_w_after], [0, 0]]), mode="CONSTANT", @@ -302,7 +302,7 @@ def _random_overlay( name=None, ) - padded_patch = tf.pad( # pylint: disable=E1123 + padded_patch = tf.pad( padded_patch, paddings=tf.constant([[0, 0], [pad_h_before, pad_h_after], [pad_w_before, pad_w_after], [0, 0]]), mode="CONSTANT", @@ -412,8 +412,8 @@ def _random_overlay( return images * inverted_mask + padded_patch * image_mask def generate( # type: ignore - self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs - ) -> Tuple[np.ndarray, np.ndarray]: + self, x: np.ndarray, y: np.ndarray | None = None, **kwargs + ) -> tuple[np.ndarray, np.ndarray]: """ Generate an adversarial patch and return the patch and its mask in arrays. @@ -498,7 +498,7 @@ def generate( # type: ignore self._get_circular_patch_mask(nb_samples=1).numpy()[0], ) - def _check_mask(self, mask: Optional[np.ndarray], x: np.ndarray) -> Optional[np.ndarray]: + def _check_mask(self, mask: np.ndarray | None, x: np.ndarray) -> np.ndarray | None: if mask is not None and ( # pragma: no cover (mask.dtype != bool) or not (mask.shape[0] == 1 or mask.shape[0] == x.shape[0]) @@ -518,8 +518,8 @@ def apply_patch( self, x: np.ndarray, scale: float, - patch_external: Optional[np.ndarray] = None, - mask: Optional[np.ndarray] = None, + patch_external: np.ndarray | None = None, + mask: np.ndarray | None = None, ) -> np.ndarray: """ A function to apply the learned adversarial patch to images or videos. @@ -538,7 +538,7 @@ def apply_patch( patch = patch_external if patch_external is not None else self._patch return self._random_overlay(images=x, patch=patch, scale=scale, mask=mask).numpy() - def reset_patch(self, initial_patch_value: Optional[Union[float, np.ndarray]] = None) -> None: + def reset_patch(self, initial_patch_value: float | np.ndarray | None = None) -> None: """ Reset the adversarial patch. diff --git a/art/attacks/evasion/adversarial_texture/adversarial_texture_pytorch.py b/art/attacks/evasion/adversarial_texture/adversarial_texture_pytorch.py index f1413fcfdd..63798f8d0a 100644 --- a/art/attacks/evasion/adversarial_texture/adversarial_texture_pytorch.py +++ b/art/attacks/evasion/adversarial_texture/adversarial_texture_pytorch.py @@ -20,8 +20,10 @@ | Paper link: https://arxiv.org/abs/1904.11042 """ +from __future__ import annotations + import logging -from typing import Dict, List, Optional, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import trange @@ -32,7 +34,7 @@ from art.summary_writer import SummaryWriter if TYPE_CHECKING: - # pylint: disable=C0412 + import torch logger = logging.getLogger(__name__) @@ -68,7 +70,7 @@ def __init__( step_size: float = 1.0 / 255.0, max_iter: int = 500, batch_size: int = 16, - summary_writer: Union[str, bool, SummaryWriter] = False, + summary_writer: str | bool | SummaryWriter = False, verbose: bool = True, ): """ @@ -133,10 +135,10 @@ def __init__( def _train_step( self, videos: "torch.Tensor", - target: List[Dict[str, "torch.Tensor"]], + target: list[dict[str, "torch.Tensor"]], y_init: "torch.Tensor", - foreground: Optional["torch.Tensor"], - patch_points: Optional[np.ndarray], + foreground: "torch.Tensor" | None, + patch_points: np.ndarray | None, ) -> "torch.Tensor": """ Apply a training step to the batch based on a mini-batch. @@ -188,9 +190,9 @@ def _predictions( self, videos: "torch.Tensor", y_init: "torch.Tensor", - foreground: Optional["torch.Tensor"], - patch_points: Optional[np.ndarray], - ) -> List[Dict[str, "torch.Tensor"]]: + foreground: "torch.Tensor" | None, + patch_points: np.ndarray | None, + ) -> list[dict[str, "torch.Tensor"]]: """ Predict object tracking estimator on patched videos. @@ -211,17 +213,17 @@ def _predictions( max=self.estimator.clip_values[1], ) - predictions = self.estimator.predict(patched_input, y_init=y_init) # pylint: disable=W0212 + predictions = self.estimator.predict(patched_input, y_init=y_init) return predictions def _loss( self, videos: "torch.Tensor", - target: List[Dict[str, "torch.Tensor"]], + target: list[dict[str, "torch.Tensor"]], y_init: "torch.Tensor", - foreground: Optional["torch.Tensor"], - patch_points: Optional[np.ndarray], + foreground: "torch.Tensor" | None, + patch_points: np.ndarray | None, ) -> "torch.Tensor": """ Calculate L1-loss. @@ -265,8 +267,8 @@ def _apply_texture( self, videos: "torch.Tensor", patch: "torch.Tensor", - foreground: Optional["torch.Tensor"], - patch_points: Optional[np.ndarray], + foreground: "torch.Tensor" | None, + patch_points: np.ndarray | None, ) -> "torch.Tensor": """ Apply texture over background and overlay foreground. @@ -407,14 +409,14 @@ def _apply_texture( return combined - def generate( # type: ignore # pylint: disable=W0222 - self, x: np.ndarray, y: List[Dict[str, np.ndarray]], **kwargs + def generate( # type: ignore # pylint: disable=signature-differs + self, x: np.ndarray, y: list[dict[str, np.ndarray]], **kwargs ) -> np.ndarray: """ Generate an adversarial patch and return the patch and its mask in arrays. :param x: Input videos of shape NFHWC. - :param y: True labels of format `List[Dict[str, np.ndarray]]`, one dictionary for each input image. The keys of + :param y: True labels of format `list[dict[str, np.ndarray]]`, one dictionary for each input image. The keys of the dictionary are: - boxes [N_FRAMES, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and @@ -520,9 +522,9 @@ def __getitem__(self, idx): def apply_patch( self, x: np.ndarray, - patch_external: Optional[np.ndarray] = None, - foreground: Optional[np.ndarray] = None, - patch_points: Optional[np.ndarray] = None, + patch_external: np.ndarray | None = None, + foreground: np.ndarray | None = None, + patch_points: np.ndarray | None = None, ) -> np.ndarray: """ A function to apply the learned adversarial texture to videos. @@ -557,7 +559,7 @@ def apply_patch( .numpy() ) - def reset_patch(self, initial_patch_value: Optional[Union[float, np.ndarray]] = None) -> None: + def reset_patch(self, initial_patch_value: float | np.ndarray | None = None) -> None: """ Reset the adversarial texture. diff --git a/art/attacks/evasion/auto_attack.py b/art/attacks/evasion/auto_attack.py index 3d2fa38159..ea4c5d50e0 100644 --- a/art/attacks/evasion/auto_attack.py +++ b/art/attacks/evasion/auto_attack.py @@ -20,9 +20,11 @@ | Paper link: https://arxiv.org/abs/2003.01690 """ +from __future__ import annotations + import logging from copy import deepcopy -from typing import TYPE_CHECKING, List, Optional, Tuple, Union +from typing import TYPE_CHECKING import numpy as np @@ -69,12 +71,12 @@ class AutoAttack(EvasionAttack): def __init__( self, estimator: "CLASSIFIER_TYPE", - norm: Union[int, float, str] = np.inf, + norm: int | float | str = np.inf, eps: float = 0.3, eps_step: float = 0.1, - attacks: Optional[List[EvasionAttack]] = None, + attacks: list[EvasionAttack] | None = None, batch_size: int = 32, - estimator_orig: Optional["CLASSIFIER_TYPE"] = None, + estimator_orig: "CLASSIFIER_TYPE" | None = None, targeted: bool = False, parallel: bool = False, ): @@ -153,7 +155,7 @@ def __init__( self.best_attacks: np.ndarray = np.array([]) self._check_params() - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. @@ -348,10 +350,10 @@ def run_attack( sample_is_robust: np.ndarray, attack: EvasionAttack, estimator_orig: "CLASSIFIER_TYPE", - norm: Union[int, float, str] = np.inf, + norm: int | float | str = np.inf, eps: float = 0.3, **kwargs, -) -> Tuple[np.ndarray, np.ndarray]: +) -> tuple[np.ndarray, np.ndarray]: """ Run attack. diff --git a/art/attacks/evasion/auto_conjugate_gradient.py b/art/attacks/evasion/auto_conjugate_gradient.py index 4e0df74cef..5f268fb702 100644 --- a/art/attacks/evasion/auto_conjugate_gradient.py +++ b/art/attacks/evasion/auto_conjugate_gradient.py @@ -42,10 +42,12 @@ | Paper link: https://arxiv.org/abs/2206.09628 """ +from __future__ import annotations + import abc import logging import math -from typing import Optional, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import trange @@ -87,14 +89,14 @@ class AutoConjugateGradient(EvasionAttack): def __init__( self, estimator: "CLASSIFIER_LOSS_GRADIENTS_TYPE", - norm: Union[int, float, str] = np.inf, + norm: int | float | str = np.inf, eps: float = 0.3, eps_step: float = 0.1, max_iter: int = 100, targeted: bool = False, nb_random_init: int = 5, batch_size: int = 32, - loss_type: Optional[str] = None, + loss_type: str | None = None, verbose: bool = True, ): """ @@ -244,7 +246,7 @@ def __call__(self, y_true: tf.Tensor, y_pred: tf.Tensor, *args, **kwargs) -> tf. "the estimator has to to predict logits." ) - class CrossEntropyLossTorch(torch.nn.modules.loss._Loss): # pylint: disable=W0212 + class CrossEntropyLossTorch(torch.nn.modules.loss._Loss): """Class defining cross entropy loss with reduction options.""" def __init__(self, reduction="sum"): @@ -262,7 +264,7 @@ def __call__(self, y_true: torch.Tensor, y_pred: torch.Tensor, *args, **kwargs) raise NotImplementedError() def forward( - self, input: torch.Tensor, target: torch.Tensor # pylint: disable=W0622 + self, input: torch.Tensor, target: torch.Tensor # pylint: disable=redefined-builtin ) -> torch.Tensor: """ Forward method. @@ -283,7 +285,7 @@ def forward( "If loss_type='difference_logits_ratio' the estimator has to to predict logits." ) - class DifferenceLogitsRatioPyTorch(torch.nn.modules.loss._Loss): # pylint: disable=W0212 + class DifferenceLogitsRatioPyTorch(torch.nn.modules.loss._Loss): """ Callable class for Difference Logits Ratio loss in PyTorch. """ @@ -332,7 +334,7 @@ def __call__(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor: raise NotImplementedError() def forward( - self, input: torch.Tensor, target: torch.Tensor # pylint: disable=W0622 + self, input: torch.Tensor, target: torch.Tensor # pylint: disable=redefined-builtin ) -> torch.Tensor: """ Forward method. @@ -375,7 +377,7 @@ def forward( self.verbose = verbose self._check_params() - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. diff --git a/art/attacks/evasion/auto_projected_gradient_descent.py b/art/attacks/evasion/auto_projected_gradient_descent.py index 5eed274c0f..6e64618b45 100644 --- a/art/attacks/evasion/auto_projected_gradient_descent.py +++ b/art/attacks/evasion/auto_projected_gradient_descent.py @@ -20,10 +20,12 @@ | Paper link: https://arxiv.org/abs/2003.01690 """ +from __future__ import annotations + import abc import logging import math -from typing import Optional, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import trange @@ -64,20 +66,20 @@ class AutoProjectedGradientDescent(EvasionAttack): def __init__( self, estimator: "CLASSIFIER_LOSS_GRADIENTS_TYPE", - norm: Union[int, float, str] = np.inf, + norm: int | float | str = np.inf, eps: float = 0.3, eps_step: float = 0.1, max_iter: int = 100, targeted: bool = False, nb_random_init: int = 5, batch_size: int = 32, - loss_type: Optional[str] = None, + loss_type: str | None = None, verbose: bool = True, ): """ Create a :class:`.AutoProjectedGradientDescent` instance. - :param estimator: An trained estimator. + :param estimator: A trained estimator. :param norm: The norm of the adversarial perturbation. Possible values: "inf", np.inf, 1 or 2. :param eps: Maximum perturbation that the attacker can introduce. :param eps_step: Attack step size (input variation) at each iteration. @@ -224,7 +226,7 @@ def __call__(self, y_true: tf.Tensor, y_pred: tf.Tensor, *args, **kwargs) -> tf. ) # modification for image-wise stepsize update - class CrossEntropyLossTorch(torch.nn.modules.loss._Loss): # pylint: disable=W0212 + class CrossEntropyLossTorch(torch.nn.modules.loss._Loss): """Class defining cross entropy loss with reduction options.""" def __init__(self, reduction="mean"): @@ -242,7 +244,7 @@ def __call__(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor: raise NotImplementedError() def forward( - self, input: torch.Tensor, target: torch.Tensor # pylint: disable=W0622 + self, input: torch.Tensor, target: torch.Tensor # pylint: disable=redefined-builtin ) -> torch.Tensor: """ Forward method. @@ -268,7 +270,7 @@ def forward( "If loss_type='difference_logits_ratio' the estimator has to to predict logits." ) - class DifferenceLogitsRatioPyTorch(torch.nn.modules.loss._Loss): # pylint: disable=W0212 + class DifferenceLogitsRatioPyTorch(torch.nn.modules.loss._Loss): """ Callable class for Difference Logits Ratio loss in PyTorch. """ @@ -318,7 +320,7 @@ def __call__(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor: raise NotImplementedError() def forward( - self, input: torch.Tensor, target: torch.Tensor # pylint: disable=W0622 + self, input: torch.Tensor, target: torch.Tensor # pylint: disable=redefined-builtin ) -> torch.Tensor: """ Forward method. @@ -370,7 +372,7 @@ def forward( self.verbose = verbose self._check_params() - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. diff --git a/art/attacks/evasion/boundary.py b/art/attacks/evasion/boundary.py index d0416ec3f0..c2a600eaea 100644 --- a/art/attacks/evasion/boundary.py +++ b/art/attacks/evasion/boundary.py @@ -21,10 +21,10 @@ | Paper link: https://arxiv.org/abs/1712.04248 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import List, Optional, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import tqdm, trange @@ -110,9 +110,9 @@ def __init__( self.verbose = verbose self._check_params() - self.curr_adv: Optional[np.ndarray] = None + self.curr_adv: np.ndarray | None = None - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. @@ -268,7 +268,7 @@ def _attack( for _ in trange(self.max_iter, desc="Boundary attack - iterations", disable=not self.verbose): # Trust region method to adjust delta for _ in range(self.num_trial): - potential_advs_list: List[np.ndarray] = [] + potential_advs_list: list[np.ndarray] = [] for _ in range(self.sample_size): potential_adv = x_adv + self._orthogonal_perturb(self.curr_delta, x_adv, original_sample) potential_adv = np.clip(potential_adv, clip_min, clip_max) @@ -373,7 +373,7 @@ def _init_sample( adv_init: np.ndarray, clip_min: float, clip_max: float, - ) -> Optional[Tuple[np.ndarray, int]]: + ) -> tuple[np.ndarray, int] | None: """ Find initial adversarial example for the attack. diff --git a/art/attacks/evasion/brendel_bethge.py b/art/attacks/evasion/brendel_bethge.py index a19c218f60..69ed3f7ef3 100644 --- a/art/attacks/evasion/brendel_bethge.py +++ b/art/attacks/evasion/brendel_bethge.py @@ -42,8 +42,10 @@ """ This module implements Brendel and Bethge attack. """ -# pylint: disable=C0103,R0201,C0115,C0116,C0144,C0302,W0612,W0613,E1120,R1716,R1705,R1723,R1720 -from typing import Union, Optional, Tuple, TYPE_CHECKING +from __future__ import annotations + +# pylint: disable=invalid-name,missing-class-docstring,missing-function-docstring,old-non-ascii-name,unused-variable,unused-argument,chained-comparison,no-else-return,no-else-break,no-else-raise +from typing import TYPE_CHECKING import logging import numpy as np @@ -1981,7 +1983,7 @@ class BrendelBethgeAttack(EvasionAttack): def __init__( self, estimator: "CLASSIFIER_LOSS_GRADIENTS_TYPE", - norm: Union[int, float, str] = np.inf, + norm: int | float | str = np.inf, targeted: bool = False, overshoot: float = 1.1, steps: int = 1000, @@ -2166,7 +2168,7 @@ def logits_difference(y_pred, y_true): # type: ignore def generate( self, x: np.ndarray, - y: Optional[np.ndarray] = None, + y: np.ndarray | None = None, **kwargs, ) -> np.ndarray: """ @@ -2387,7 +2389,7 @@ def mid_points( x0: np.ndarray, x1: np.ndarray, epsilons: np.ndarray, - bounds: Tuple[float, float], + bounds: tuple[float, float], ) -> np.ndarray: """ returns a point between x0 and x1 where epsilon = 0 returns x0 and epsilon = 1 returns x1 @@ -2434,7 +2436,7 @@ def _init_sample( adv_init: np.ndarray, clip_min: float, clip_max: float, - ) -> Optional[Union[np.ndarray, Tuple[np.ndarray, int]]]: + ) -> np.ndarray | tuple[np.ndarray, int] | None: """ Find initial adversarial example for the attack. @@ -2523,10 +2525,10 @@ def _binary_search( current_sample: np.ndarray, original_sample: np.ndarray, target: int, - norm: Union[int, float, str], + norm: int | float | str, clip_min: float, clip_max: float, - threshold: Optional[float] = None, + threshold: float | None = None, ) -> np.ndarray: """ Binary search to approach the boundary. @@ -2588,7 +2590,7 @@ def _binary_search( @staticmethod def _interpolate( - current_sample: np.ndarray, original_sample: np.ndarray, alpha: float, norm: Union[int, float, str] + current_sample: np.ndarray, original_sample: np.ndarray, alpha: float, norm: int | float | str ) -> np.ndarray: """ Interpolate a new sample based on the original and the current samples. diff --git a/art/attacks/evasion/carlini.py b/art/attacks/evasion/carlini.py index 383c8a4aeb..a711126d72 100644 --- a/art/attacks/evasion/carlini.py +++ b/art/attacks/evasion/carlini.py @@ -24,11 +24,11 @@ | Paper link: https://arxiv.org/abs/1608.04644 """ -# pylint: disable=C0302 + from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Optional, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import trange @@ -137,7 +137,7 @@ def __init__( def _loss( self, x: np.ndarray, x_adv: np.ndarray, target: np.ndarray, c_weight: np.ndarray - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + ) -> tuple[np.ndarray, np.ndarray, np.ndarray]: """ Compute the objective function value. @@ -226,7 +226,7 @@ def _loss_gradient( return loss_gradient - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. @@ -584,7 +584,7 @@ def __init__( def _loss( self, x_adv: np.ndarray, target: np.ndarray, x, const, tau - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: + ) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: """ Compute the objective function value. @@ -732,7 +732,7 @@ def func_der(x_i): return x_adv_batch - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. @@ -876,7 +876,7 @@ def __init__( binary_search_steps: int = 10, max_iter: int = 10, initial_const: float = 0.01, - mask: Optional[np.ndarray] = None, + mask: np.ndarray | None = None, warm_start: bool = True, max_halving: int = 5, max_doubling: int = 5, @@ -941,7 +941,7 @@ def __init__( # Below this threshold, a difference between values is considered as tanh transformation difference. self._perturbation_threshold = 1e-06 - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. diff --git a/art/attacks/evasion/composite_adversarial_attack.py b/art/attacks/evasion/composite_adversarial_attack.py index 0a2dd2fe8b..50fb79faee 100644 --- a/art/attacks/evasion/composite_adversarial_attack.py +++ b/art/attacks/evasion/composite_adversarial_attack.py @@ -27,7 +27,7 @@ import logging -from typing import Optional, Tuple, List, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import tqdm @@ -39,7 +39,7 @@ from art.utils import compute_success, check_and_transform_label_format if TYPE_CHECKING: - # pylint: disable=C0412 + import torch from art.estimators.classification.pytorch import PyTorchClassifier @@ -78,14 +78,14 @@ class CompositeAdversarialAttackPyTorch(EvasionAttack): def __init__( self, classifier: "PyTorchClassifier", - enabled_attack: Tuple = (0, 1, 2, 3, 4, 5), + enabled_attack: tuple = (0, 1, 2, 3, 4, 5), # Default: Full Attacks; 0: Hue, 1: Saturation, 2: Rotation, 3: Brightness, 4: Contrast, 5: PGD (L-infinity) - hue_epsilon: Tuple[float, float] = (-np.pi, np.pi), - sat_epsilon: Tuple[float, float] = (0.7, 1.3), - rot_epsilon: Tuple[float, float] = (-10.0, 10.0), - bri_epsilon: Tuple[float, float] = (-0.2, 0.2), - con_epsilon: Tuple[float, float] = (0.7, 1.3), - pgd_epsilon: Tuple[float, float] = (-8 / 255, 8 / 255), # L-infinity + hue_epsilon: tuple[float, float] = (-np.pi, np.pi), + sat_epsilon: tuple[float, float] = (0.7, 1.3), + rot_epsilon: tuple[float, float] = (-10.0, 10.0), + bri_epsilon: tuple[float, float] = (-0.2, 0.2), + con_epsilon: tuple[float, float] = (0.7, 1.3), + pgd_epsilon: tuple[float, float] = (-8 / 255, 8 / 255), # L-infinity early_stop: bool = True, max_iter: int = 5, max_inner_iter: int = 10, @@ -176,8 +176,8 @@ def __init__( self._description = "Composite Adversarial Attack" self._is_scheduling: bool = False - self.eps_space: List = [] - self.adv_val_space: List = [] + self.eps_space: list = [] + self.adv_val_space: list = [] self.curr_dsm: "torch.Tensor" = torch.zeros((len(self.enabled_attack), len(self.enabled_attack))) self.curr_seq: "torch.Tensor" = torch.zeros(len(self.enabled_attack)) self.is_attacked: "torch.Tensor" = torch.zeros(self.batch_size, device=self.device).bool() @@ -298,7 +298,7 @@ def _setup_attack(self): [hue_space, sat_space, rot_space, bri_space, con_space, pgd_space][i] for i in self.enabled_attack ] - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate the composite adversarial samples and return them in a Numpy array. @@ -364,7 +364,7 @@ def _comp_pgd( attack_idx: int, attack_parameter: "torch.Tensor", ori_is_attacked: "torch.Tensor", - ) -> Tuple["torch.Tensor", "torch.Tensor"]: + ) -> tuple["torch.Tensor", "torch.Tensor"]: """ Compute the adversarial examples for each attack component. @@ -409,7 +409,7 @@ def _comp_pgd( def caa_hue( self, data: "torch.Tensor", hue: "torch.Tensor", labels: "torch.Tensor" - ) -> Tuple["torch.Tensor", "torch.Tensor"]: + ) -> tuple["torch.Tensor", "torch.Tensor"]: """ Compute the adversarial examples for hue component. @@ -429,7 +429,7 @@ def caa_hue( def caa_saturation( self, data: "torch.Tensor", saturation: "torch.Tensor", labels: "torch.Tensor" - ) -> Tuple["torch.Tensor", "torch.Tensor"]: + ) -> tuple["torch.Tensor", "torch.Tensor"]: """ Compute the adversarial examples for saturation component. @@ -453,7 +453,7 @@ def caa_saturation( def caa_rotation( self, data: "torch.Tensor", theta: "torch.Tensor", labels: "torch.Tensor" - ) -> Tuple["torch.Tensor", "torch.Tensor"]: + ) -> tuple["torch.Tensor", "torch.Tensor"]: """ Compute the adversarial examples for rotation component. @@ -473,7 +473,7 @@ def caa_rotation( def caa_brightness( self, data: "torch.Tensor", brightness: "torch.Tensor", labels: "torch.Tensor" - ) -> Tuple["torch.Tensor", "torch.Tensor"]: + ) -> tuple["torch.Tensor", "torch.Tensor"]: """ Compute the adversarial examples for brightness component. @@ -497,7 +497,7 @@ def caa_brightness( def caa_contrast( self, data: "torch.Tensor", contrast: "torch.Tensor", labels: "torch.Tensor" - ) -> Tuple["torch.Tensor", "torch.Tensor"]: + ) -> tuple["torch.Tensor", "torch.Tensor"]: """ Compute the adversarial examples for contrast component. @@ -521,7 +521,7 @@ def caa_contrast( def caa_linf( self, data: "torch.Tensor", eta: "torch.Tensor", labels: "torch.Tensor" - ) -> Tuple["torch.Tensor", "torch.Tensor"]: + ) -> tuple["torch.Tensor", "torch.Tensor"]: """ Compute the adversarial examples for L-infinity (PGD) component. @@ -556,7 +556,7 @@ def caa_linf( return adv_data, eta - def update_attack_order(self, images: "torch.Tensor", labels: "torch.Tensor", adv_val: List) -> None: + def update_attack_order(self, images: "torch.Tensor", labels: "torch.Tensor", adv_val: list) -> None: """ Update the specified attack ordering. diff --git a/art/attacks/evasion/decision_tree_attack.py b/art/attacks/evasion/decision_tree_attack.py index 6da2dcc798..8d7374694b 100644 --- a/art/attacks/evasion/decision_tree_attack.py +++ b/art/attacks/evasion/decision_tree_attack.py @@ -18,10 +18,10 @@ """ This module implements attacks on Decision Trees. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import List, Optional, Union + import numpy as np from tqdm.auto import trange @@ -63,9 +63,9 @@ def __init__( def _df_subtree( self, position: int, - original_class: Union[int, np.ndarray], - target: Optional[int] = None, - ) -> List[int]: + original_class: int | np.ndarray, + target: int | None = None, + ) -> list[int]: """ Search a decision tree for a mis-classifying instance. @@ -105,7 +105,7 @@ def _df_subtree( return path - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial examples and return them as an array. diff --git a/art/attacks/evasion/deepfool.py b/art/attacks/evasion/deepfool.py index 45afb92872..14da64d8f1 100644 --- a/art/attacks/evasion/deepfool.py +++ b/art/attacks/evasion/deepfool.py @@ -23,7 +23,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Optional, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import trange @@ -90,7 +90,7 @@ def __init__( "the adversarial example." ) - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. diff --git a/art/attacks/evasion/dpatch.py b/art/attacks/evasion/dpatch.py index 52a3b9979c..f0d30d7382 100644 --- a/art/attacks/evasion/dpatch.py +++ b/art/attacks/evasion/dpatch.py @@ -20,10 +20,12 @@ | Paper link: https://arxiv.org/abs/1806.02299v4 """ +from __future__ import annotations + import logging import math import random -from typing import Dict, List, Optional, Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import trange @@ -59,7 +61,7 @@ class DPatch(EvasionAttack): def __init__( self, estimator: "OBJECT_DETECTOR_TYPE", - patch_shape: Tuple[int, int, int] = (40, 40, 3), + patch_shape: tuple[int, int, int] = (40, 40, 3), learning_rate: float = 5.0, max_iter: int = 500, batch_size: int = 16, @@ -94,20 +96,20 @@ def __init__( + self.estimator.clip_values[0] ).astype(config.ART_NUMPY_DTYPE) - self.target_label: Optional[Union[int, np.ndarray, List[int]]] = [] + self.target_label: int | np.ndarray | list[int] | None = [] - def generate( # pylint: disable=W0221 + def generate( self, x: np.ndarray, - y: Optional[np.ndarray] = None, - target_label: Optional[Union[int, List[int], np.ndarray]] = None, - **kwargs + y: np.ndarray | None = None, + target_label: int | np.ndarray | list[int] | None = None, + **kwargs, ) -> np.ndarray: """ Generate DPatch. :param x: Sample images. - :param y: True labels of type `List[Dict[np.ndarray]]` for untargeted attack, one dictionary per input image. + :param y: True labels of type `list[dict[np.ndarray]]` for untargeted attack, one dictionary per input image. The keys and values of the dictionary are: - boxes [N, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. @@ -161,7 +163,7 @@ def generate( # pylint: disable=W0221 mask=mask, transforms=None, ) - patch_target: List[Dict[str, np.ndarray]] = [] + patch_target: list[dict[str, np.ndarray]] = [] if self.target_label and y is None: @@ -263,9 +265,9 @@ def _augment_images_with_patch( patch: np.ndarray, random_location: bool, channels_first: bool, - mask: Optional[np.ndarray] = None, - transforms: Optional[List[Dict[str, int]]] = None, - ) -> Tuple[np.ndarray, List[Dict[str, int]]]: + mask: np.ndarray | None = None, + transforms: list[dict[str, int]] | None = None, + ) -> tuple[np.ndarray, list[dict[str, int]]]: """ Augment images with patch. @@ -350,9 +352,9 @@ def _augment_images_with_patch( def apply_patch( self, x: np.ndarray, - patch_external: Optional[np.ndarray] = None, + patch_external: np.ndarray | None = None, random_location: bool = False, - mask: Optional[np.ndarray] = None, + mask: np.ndarray | None = None, ) -> np.ndarray: """ Apply the adversarial patch to images. diff --git a/art/attacks/evasion/dpatch_robust.py b/art/attacks/evasion/dpatch_robust.py index b7c42c5a78..68ec8ca35f 100644 --- a/art/attacks/evasion/dpatch_robust.py +++ b/art/attacks/evasion/dpatch_robust.py @@ -24,10 +24,12 @@ | Paper link (original DPatch): https://arxiv.org/abs/1806.02299v4 | Paper link (physical-world patch from Lee & Kolter): https://arxiv.org/abs/1906.11897 """ +from __future__ import annotations + import logging import math import random -from typing import Dict, List, Optional, Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import trange @@ -75,17 +77,17 @@ class RobustDPatch(EvasionAttack): def __init__( self, estimator: "OBJECT_DETECTOR_TYPE", - patch_shape: Tuple[int, int, int] = (40, 40, 3), - patch_location: Tuple[int, int] = (0, 0), - crop_range: Tuple[int, int] = (0, 0), - brightness_range: Tuple[float, float] = (1.0, 1.0), - rotation_weights: Union[Tuple[float, float, float, float], Tuple[int, int, int, int]] = (1, 0, 0, 0), + patch_shape: tuple[int, int, int] = (40, 40, 3), + patch_location: tuple[int, int] = (0, 0), + crop_range: tuple[int, int] = (0, 0), + brightness_range: tuple[float, float] = (1.0, 1.0), + rotation_weights: tuple[float, float, float, float] | tuple[int, int, int, int] = (1, 0, 0, 0), sample_size: int = 1, learning_rate: float = 5.0, max_iter: int = 500, batch_size: int = 16, targeted: bool = False, - summary_writer: Union[str, bool, SummaryWriter] = False, + summary_writer: str | bool | SummaryWriter = False, verbose: bool = True, ): """ @@ -138,7 +140,7 @@ def __init__( self._check_params() def generate( # type: ignore - self, x: np.ndarray, y: Optional[List[Dict[str, np.ndarray]]] = None, **kwargs + self, x: np.ndarray, y: list[dict[str, np.ndarray]] | None = None, **kwargs ) -> np.ndarray: """ Generate RobustDPatch. @@ -262,8 +264,8 @@ def generate( # type: ignore return self._patch def _augment_images_with_patch( - self, x: np.ndarray, y: Optional[List[Dict[str, np.ndarray]]], patch: np.ndarray, channels_first: bool - ) -> Tuple[np.ndarray, List[Dict[str, np.ndarray]], Dict[str, Union[int, float]]]: + self, x: np.ndarray, y: list[dict[str, np.ndarray]] | None, patch: np.ndarray, channels_first: bool + ) -> tuple[np.ndarray, list[dict[str, np.ndarray]], dict[str, int | float]]: """ Augment images with patch. @@ -273,7 +275,7 @@ def _augment_images_with_patch( :param channels_first: Set channels first or last. """ - transformations: Dict[str, Union[float, int]] = {} + transformations: dict[str, float | int] = {} x_copy = x.copy() patch_copy = patch.copy() x_patch = x.copy() @@ -308,7 +310,7 @@ def _augment_images_with_patch( if y is not None: - y_copy: List[Dict[str, np.ndarray]] = [] + y_copy: list[dict[str, np.ndarray]] = [] for i_image in range(x_copy.shape[0]): y_b = y[i_image]["boxes"].copy() @@ -366,7 +368,7 @@ def _augment_images_with_patch( logger.debug("Transformations: %s", str(transformations)) - patch_target: List[Dict[str, np.ndarray]] = [] + patch_target: list[dict[str, np.ndarray]] = [] if self.targeted: predictions = y_copy @@ -391,7 +393,7 @@ def _augment_images_with_patch( def _untransform_gradients( self, gradients: np.ndarray, - transforms: Dict[str, Union[int, float]], + transforms: dict[str, int | float], channels_first: bool, ) -> np.ndarray: """ @@ -428,7 +430,7 @@ def _untransform_gradients( return gradients - def apply_patch(self, x: np.ndarray, patch_external: Optional[np.ndarray] = None) -> np.ndarray: + def apply_patch(self, x: np.ndarray, patch_external: np.ndarray | None = None) -> np.ndarray: """ Apply the adversarial patch to images. diff --git a/art/attacks/evasion/elastic_net.py b/art/attacks/evasion/elastic_net.py index dbc5afc2a8..7c20b322e8 100644 --- a/art/attacks/evasion/elastic_net.py +++ b/art/attacks/evasion/elastic_net.py @@ -23,7 +23,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Optional, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np import six @@ -194,7 +194,7 @@ def _decay_learning_rate(self, global_step: int, end_learning_rate: float, decay return decayed_learning_rate - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. @@ -288,7 +288,7 @@ def _update_const( c_batch: np.ndarray, c_lower_bound: np.ndarray, c_upper_bound: np.ndarray, - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + ) -> tuple[np.ndarray, np.ndarray, np.ndarray]: """ Update constants. diff --git a/art/attacks/evasion/fast_gradient.py b/art/attacks/evasion/fast_gradient.py index 060bc68612..5db802b864 100644 --- a/art/attacks/evasion/fast_gradient.py +++ b/art/attacks/evasion/fast_gradient.py @@ -21,10 +21,10 @@ | Paper link: https://arxiv.org/abs/1412.6572 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Optional, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -71,14 +71,14 @@ class FastGradientMethod(EvasionAttack): def __init__( self, estimator: "CLASSIFIER_LOSS_GRADIENTS_TYPE", - norm: Union[int, float, str] = np.inf, - eps: Union[int, float, np.ndarray] = 0.3, - eps_step: Union[int, float, np.ndarray] = 0.1, + norm: int | float | str = np.inf, + eps: int | float | np.ndarray = 0.3, + eps_step: int | float | np.ndarray = 0.1, targeted: bool = False, num_random_init: int = 0, batch_size: int = 32, minimal: bool = False, - summary_writer: Union[str, bool, SummaryWriter] = False, + summary_writer: str | bool | SummaryWriter = False, ) -> None: """ Create a :class:`.FastGradientMethod` instance. @@ -159,8 +159,8 @@ def _minimal_perturbation(self, x: np.ndarray, y: np.ndarray, mask: np.ndarray) # Get current predictions active_indices = np.arange(len(batch)) - current_eps: Union[int, float, np.ndarray] - partial_stop_condition: Union[bool, np.ndarray] + current_eps: int | float | np.ndarray + partial_stop_condition: bool | np.ndarray if isinstance(self.eps, np.ndarray) and isinstance(self.eps_step, np.ndarray): if len(self.eps.shape) == len(x.shape) and self.eps.shape[0] == x.shape[0]: @@ -207,7 +207,7 @@ def _minimal_perturbation(self, x: np.ndarray, y: np.ndarray, mask: np.ndarray) return adv_x - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """Generate adversarial samples and return them in an array. :param x: An array with the original inputs. @@ -390,9 +390,9 @@ def _compute_perturbation( self, x: np.ndarray, y: np.ndarray, - mask: Optional[np.ndarray], - decay: Optional[float] = None, - momentum: Optional[np.ndarray] = None, + mask: np.ndarray | None, + decay: float | None = None, + momentum: np.ndarray | None = None, ) -> np.ndarray: # Get gradient wrt loss; invert it if attack is targeted grad = self.estimator.loss_gradient(x, y) * (1 - 2 * int(self.targeted)) @@ -472,7 +472,7 @@ def _apply_norm(norm, grad, object_type=False): return grad def _apply_perturbation( - self, x: np.ndarray, perturbation: np.ndarray, eps_step: Union[int, float, np.ndarray] + self, x: np.ndarray, perturbation: np.ndarray, eps_step: int | float | np.ndarray ) -> np.ndarray: perturbation_step = eps_step * perturbation @@ -502,14 +502,14 @@ def _compute( x: np.ndarray, x_init: np.ndarray, y: np.ndarray, - mask: Optional[np.ndarray], - eps: Union[int, float, np.ndarray], - eps_step: Union[int, float, np.ndarray], + mask: np.ndarray | None, + eps: int | float | np.ndarray, + eps_step: int | float | np.ndarray, project: bool, random_init: bool, - batch_id_ext: Optional[int] = None, - decay: Optional[float] = None, - momentum: Optional[np.ndarray] = None, + batch_id_ext: int | None = None, + decay: float | None = None, + momentum: np.ndarray | None = None, ) -> np.ndarray: if random_init: n = x.shape[0] @@ -549,8 +549,8 @@ def _compute( # Get perturbation perturbation = self._compute_perturbation(batch, batch_labels, mask_batch, decay, momentum) - batch_eps: Union[int, float, np.ndarray] - batch_eps_step: Union[int, float, np.ndarray] + batch_eps: int | float | np.ndarray + batch_eps_step: int | float | np.ndarray # Compute batch_eps and batch_eps_step if isinstance(eps, np.ndarray) and isinstance(eps_step, np.ndarray): diff --git a/art/attacks/evasion/feature_adversaries/feature_adversaries_numpy.py b/art/attacks/evasion/feature_adversaries/feature_adversaries_numpy.py index defe6fa94e..6ce4ee8a6a 100644 --- a/art/attacks/evasion/feature_adversaries/feature_adversaries_numpy.py +++ b/art/attacks/evasion/feature_adversaries/feature_adversaries_numpy.py @@ -20,8 +20,10 @@ | Paper link: https://arxiv.org/abs/1511.05122 """ +from __future__ import annotations + import logging -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING import numpy as np @@ -52,8 +54,8 @@ class FeatureAdversariesNumpy(EvasionAttack): def __init__( self, classifier: "CLASSIFIER_NEURALNETWORK_TYPE", - delta: Optional[float] = None, - layer: Optional[int] = None, + delta: float | None = None, + layer: int | None = None, batch_size: int = 32, ): """ @@ -71,7 +73,7 @@ def __init__( self.batch_size = batch_size self._check_params() - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. diff --git a/art/attacks/evasion/feature_adversaries/feature_adversaries_pytorch.py b/art/attacks/evasion/feature_adversaries/feature_adversaries_pytorch.py index 7442c58542..289f8967ea 100644 --- a/art/attacks/evasion/feature_adversaries/feature_adversaries_pytorch.py +++ b/art/attacks/evasion/feature_adversaries/feature_adversaries_pytorch.py @@ -20,8 +20,10 @@ | Paper link: https://arxiv.org/abs/1511.05122 """ +from __future__ import annotations + import logging -from typing import TYPE_CHECKING, Optional, Tuple, Union +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import trange @@ -30,7 +32,7 @@ from art.estimators.estimator import BaseEstimator, NeuralNetworkMixin if TYPE_CHECKING: - # pylint: disable=C0412 + import torch from torch.optim import Optimizer @@ -65,13 +67,13 @@ def __init__( self, estimator: "PYTORCH_ESTIMATOR_TYPE", delta: float, - optimizer: Optional["Optimizer"] = None, - optimizer_kwargs: Optional[dict] = None, + optimizer: "Optimizer" | None = None, + optimizer_kwargs: dict | None = None, lambda_: float = 0.0, - layer: Union[int, str, Tuple[int, ...], Tuple[str, ...]] = -1, + layer: int | str | tuple[int, ...] | tuple[str, ...] = -1, max_iter: int = 100, batch_size: int = 32, - step_size: Optional[Union[int, float]] = None, + step_size: int | float | None = None, random_start: bool = False, verbose: bool = True, ): @@ -180,7 +182,7 @@ def closure(): adv.data = torch.clamp(adv.detach(), *self.estimator.clip_values) return adv.detach().cpu() - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. diff --git a/art/attacks/evasion/feature_adversaries/feature_adversaries_tensorflow.py b/art/attacks/evasion/feature_adversaries/feature_adversaries_tensorflow.py index 8b35092387..f8d3561388 100644 --- a/art/attacks/evasion/feature_adversaries/feature_adversaries_tensorflow.py +++ b/art/attacks/evasion/feature_adversaries/feature_adversaries_tensorflow.py @@ -20,8 +20,10 @@ | Paper link: https://arxiv.org/abs/1511.05122 """ +from __future__ import annotations + import logging -from typing import TYPE_CHECKING, Optional, Tuple, Union +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import trange @@ -30,7 +32,7 @@ from art.estimators.estimator import BaseEstimator, NeuralNetworkMixin if TYPE_CHECKING: - # pylint: disable=C0412 + import tensorflow as tf from tensorflow.keras.optimizers import Optimizer @@ -65,13 +67,13 @@ def __init__( self, estimator: "TENSORFLOWV2_ESTIMATOR_TYPE", delta: float, - optimizer: Optional["Optimizer"] = None, - optimizer_kwargs: Optional[dict] = None, + optimizer: "Optimizer" | None = None, + optimizer_kwargs: dict | None = None, lambda_: float = 0.0, - layer: Union[int, str, Tuple[int, ...], Tuple[str, ...]] = -1, + layer: int | str | tuple[int, ...] | tuple[str, ...] = -1, max_iter: int = 100, batch_size: int = 32, - step_size: Optional[Union[int, float]] = None, + step_size: int | float | None = None, random_start: bool = False, verbose: bool = True, ): @@ -174,7 +176,7 @@ def loss_fn(source_orig, source_adv, guide): perturbation.assign(adv - x) return adv - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. diff --git a/art/attacks/evasion/frame_saliency.py b/art/attacks/evasion/frame_saliency.py index ba7f7752f7..b5b28fc55a 100644 --- a/art/attacks/evasion/frame_saliency.py +++ b/art/attacks/evasion/frame_saliency.py @@ -24,7 +24,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Optional, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import trange @@ -93,7 +93,7 @@ def __init__( self.verbose = verbose self._check_params() - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. @@ -193,7 +193,7 @@ def _compute_attack_failure_array(self, x: np.ndarray, targets: np.ndarray, x_ad return np.invert(attack_success) def _compute_frames_to_perturb( - self, x_adv: np.ndarray, targets: np.ndarray, disregard: Optional[np.ndarray] = None + self, x_adv: np.ndarray, targets: np.ndarray, disregard: np.ndarray | None = None ) -> np.ndarray: saliency_score = self.estimator.loss_gradient(x_adv, targets) saliency_score = np.swapaxes(saliency_score, 1, self.frame_index) diff --git a/art/attacks/evasion/geometric_decision_based_attack.py b/art/attacks/evasion/geometric_decision_based_attack.py index bfc6c59b42..ba31897e6f 100644 --- a/art/attacks/evasion/geometric_decision_based_attack.py +++ b/art/attacks/evasion/geometric_decision_based_attack.py @@ -20,10 +20,12 @@ | Paper link: https://arxiv.org/abs/2003.06468 """ +from __future__ import annotations + import os import math import logging -from typing import List, Optional, Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import trange @@ -65,7 +67,7 @@ def __init__( self, estimator: "CLASSIFIER_TYPE", batch_size: int = 64, - norm: Union[int, float, str] = 2, + norm: int | float | str = 2, sub_dim: int = 10, max_iter: int = 4000, bin_search_tol: float = 0.1, @@ -159,7 +161,7 @@ def dct(i_x: int, i_y: int, i_v: int, i_u: int, num: int) -> float: return dct_basis_array - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples. @@ -303,7 +305,7 @@ def _binary_search(self, x: np.ndarray, y: np.ndarray, x_random: np.ndarray, tol return x_adv - def _opt_query_iteration(self, var_nq: int, var_t: int, lambda_param: float) -> Tuple[List[int], int]: + def _opt_query_iteration(self, var_nq: int, var_t: int, lambda_param: float) -> tuple[list[int], int]: """ Determine optimal distribution of number of queries. """ @@ -322,13 +324,13 @@ def _opt_query_iteration(self, var_nq: int, var_t: int, lambda_param: float) -> def _black_grad_batch( self, x_boundary: np.ndarray, q_max: int, batch_size: int, original_label: np.ndarray - ) -> Tuple[np.ndarray, int]: + ) -> tuple[np.ndarray, int]: """ Calculate gradient towards decision boundary. """ self.nb_calls += q_max - grad_tmp: List[np.ndarray] = [] # estimated gradients in each estimate_batch - z_list: List[int] = [] # sign of grad_tmp + grad_tmp: list[np.ndarray] = [] # estimated gradients in each estimate_batch + z_list: list[int] = [] # sign of grad_tmp outs = [] num_batches = math.ceil(q_max / batch_size) last_batch = q_max - (num_batches - 1) * batch_size diff --git a/art/attacks/evasion/graphite/graphite_blackbox.py b/art/attacks/evasion/graphite/graphite_blackbox.py index 28a8c498e5..8bec5156b1 100644 --- a/art/attacks/evasion/graphite/graphite_blackbox.py +++ b/art/attacks/evasion/graphite/graphite_blackbox.py @@ -44,10 +44,10 @@ | Paper link: https://arxiv.org/abs/2002.07088 | Original github link: https://github.com/ryan-feng/GRAPHITE """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Optional, Tuple, Union, TYPE_CHECKING, List +from typing import TYPE_CHECKING import random import numpy as np @@ -110,10 +110,10 @@ class GRAPHITEBlackbox(EvasionAttack): def __init__( self, classifier: "CLASSIFIER_NEURALNETWORK_TYPE", - noise_size: Tuple[int, int], - net_size: Tuple[int, int], - heat_patch_size: Tuple[int, int] = (4, 4), - heat_patch_stride: Tuple[int, int] = (1, 1), + noise_size: tuple[int, int], + net_size: tuple[int, int], + heat_patch_size: tuple[int, int] = (4, 4), + heat_patch_stride: tuple[int, int] = (1, 1), heatmap_mode: str = "Target", tr_lo: float = 0.65, tr_hi: float = 0.85, @@ -123,13 +123,13 @@ def __init__( eta: float = 500, num_xforms_boost: int = 100, num_boost_queries: int = 20000, - rotation_range: Tuple[float, float] = (-30.0, 30.0), - dist_range: Tuple[float, float] = (0.0, 0.0), - gamma_range: Tuple[float, float] = (1.0, 2.0), - crop_percent_range: Tuple[float, float] = (-0.03125, 0.03125), - off_x_range: Tuple[float, float] = (-0.03125, 0.03125), - off_y_range: Tuple[float, float] = (-0.03125, 0.03125), - blur_kernels: Union[Tuple[int, int], List[int]] = (0, 3), + rotation_range: tuple[float, float] = (-30.0, 30.0), + dist_range: tuple[float, float] = (0.0, 0.0), + gamma_range: tuple[float, float] = (1.0, 2.0), + crop_percent_range: tuple[float, float] = (-0.03125, 0.03125), + off_x_range: tuple[float, float] = (-0.03125, 0.03125), + off_y_range: tuple[float, float] = (-0.03125, 0.03125), + blur_kernels: tuple[int, int] | list[int] = (0, 3), batch_size: int = 64, ) -> None: """ @@ -183,7 +183,7 @@ def __init__( self._check_params() - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. @@ -311,8 +311,8 @@ def _perturb( focal: float, clip_min: float, clip_max: float, - mask: Optional[np.ndarray] = None, - pts: Optional[np.ndarray] = None, + mask: np.ndarray | None = None, + pts: np.ndarray | None = None, ) -> np.ndarray: """ Internal attack function for one example. @@ -376,7 +376,7 @@ def _generate_mask( focal: float, clip_min: float, clip_max: float, - pts: Optional[np.ndarray] = None, + pts: np.ndarray | None = None, ) -> np.ndarray: """ Function to generate a mask. @@ -501,12 +501,12 @@ def _get_heatmap( x_tar_noise: np.ndarray, mask: np.ndarray, y: int, - patches: List[np.ndarray], - xforms: List, + patches: list[np.ndarray], + xforms: list, clip_min: float, clip_max: float, - pts: Optional[np.ndarray] = None, - ) -> List[float]: + pts: np.ndarray | None = None, + ) -> list[float]: """ Function to generate a heatmap. @@ -517,8 +517,8 @@ def _get_heatmap( broadcastable to the shape of x. Any features for which the mask is zero will not be adversarially perturbed. :param y: The target label. - :param patches: list of patches from heatmap. - :param xforms: list of transform params. + :param patches: List of patches from heatmap. + :param xforms: List of transform params. :param clip_min: Minimum value of an example. :param clip_max: Maximum value of an example. :param pts: Optional. A set of points that will set the crop size in the perspective transform. @@ -545,13 +545,13 @@ def _evaluate_transform_robustness_at_pivot( x_tar_noise: np.ndarray, y: int, mask: np.ndarray, - patches: List[np.ndarray], - xforms: List, + patches: list[np.ndarray], + xforms: list, clip_min: float, clip_max: float, pivot: int, - pts: Optional[np.ndarray] = None, - ) -> Tuple[float, np.ndarray]: + pts: np.ndarray | None = None, + ) -> tuple[float, np.ndarray]: """ Function as a binary search plug-in that evaluates the transform-robustness at the specified pivot. @@ -562,8 +562,8 @@ def _evaluate_transform_robustness_at_pivot( :param mask: An array with a mask to be applied to the adversarial perturbations. Shape needs to be broadcastable to the shape of x. Any features for which the mask is zero will not be adversarially perturbed. - :param patches: list of patches from heatmap. - :param xforms: list of transform params. + :param patches: List of patches from heatmap. + :param xforms: List of transform params. :param clip_min: Minimum value of an example. :param clip_max: Maximum value of an example. :param pivot: Pivot point to evaluate transform-robustness at. @@ -591,13 +591,13 @@ def _get_coarse_reduced_mask( x_tar_noise: np.ndarray, y: int, mask: np.ndarray, - patches: List[np.ndarray], - indices: List[Tuple[int, int]], - xforms: List, + patches: list[np.ndarray], + indices: list[tuple[int, int]], + xforms: list, clip_min: float, clip_max: float, - pts: Optional[np.ndarray] = None, - ) -> Tuple[np.ndarray, List[np.ndarray], List[Tuple[int, int]]]: + pts: np.ndarray | None = None, + ) -> tuple[np.ndarray, list[np.ndarray], list[tuple[int, int]]]: """ Function to coarsely reduce mask. @@ -608,9 +608,9 @@ def _get_coarse_reduced_mask( :param mask: An array with a mask to be applied to the adversarial perturbations. Shape needs to be broadcastable to the shape of x. Any features for which the mask is zero will not be adversarially perturbed. - :param patches: list of patches from heatmap. - :param indices: list of indices for the heatmap patches. - :param xforms: list of transform params. + :param patches: List of patches from heatmap. + :param indices: List of indices for the heatmap patches. + :param xforms: List of transform params. :param clip_min: Minimum value of an example. :param clip_max: Maximum value of an example. :param pts: Optional. A set of points that will set the crop size in the perspective transform. @@ -656,13 +656,13 @@ def _get_fine_reduced_mask( x_tar_noise: np.ndarray, y: int, mask: np.ndarray, - patches: List[np.ndarray], - xforms: List, + patches: list[np.ndarray], + xforms: list, object_size: float, clip_min: float, clip_max: float, lbd: float = 5, - pts: Optional[np.ndarray] = None, + pts: np.ndarray | None = None, ) -> np.ndarray: """ Function to finely reduce mask. @@ -674,8 +674,8 @@ def _get_fine_reduced_mask( :param mask: An array with a mask to be applied to the adversarial perturbations. Shape needs to be broadcastable to the shape of x. Any features for which the mask is zero will not be adversarially perturbed. - :param patches: list of patches from heatmap. - :param xforms: list of transform params. + :param patches: List of patches from heatmap. + :param xforms: List of transform params. :param obj_size: Estimated width of object in inches for perspective transform. :param clip_min: Minimum value of an example. :param clip_max: Maximum value of an example. @@ -738,7 +738,7 @@ def _boost( focal: float, clip_min: float, clip_max: float, - pts: Optional[np.ndarray] = None, + pts: np.ndarray | None = None, ) -> np.ndarray: """ Function to boost transform-robustness. diff --git a/art/attacks/evasion/graphite/graphite_whitebox_pytorch.py b/art/attacks/evasion/graphite/graphite_whitebox_pytorch.py index 70ce9a89e8..fe2c8c8788 100644 --- a/art/attacks/evasion/graphite/graphite_whitebox_pytorch.py +++ b/art/attacks/evasion/graphite/graphite_whitebox_pytorch.py @@ -44,10 +44,10 @@ | Paper link: https://arxiv.org/abs/2002.07088 | Original github link: https://github.com/ryan-feng/GRAPHITE """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Optional, Tuple, Union, TYPE_CHECKING, List +from typing import TYPE_CHECKING import numpy as np @@ -99,7 +99,7 @@ class GRAPHITEWhiteboxPyTorch(EvasionAttack): def __init__( self, classifier: "PyTorchClassifier", - net_size: Tuple[int, int], + net_size: tuple[int, int], min_tr: float = 0.8, num_xforms: int = 100, step_size: float = 0.0157, @@ -108,14 +108,14 @@ def __init__( patch_removal_size: float = 4, patch_removal_interval: float = 2, num_patches_to_remove: int = 4, - rand_start_epsilon_range: Tuple[float, float] = (-8 / 255, 8 / 255), - rotation_range: Tuple[float, float] = (-30.0, 30.0), - dist_range: Tuple[float, float] = (0.0, 0.0), - gamma_range: Tuple[float, float] = (1.0, 2.0), - crop_percent_range: Tuple[float, float] = (-0.03125, 0.03125), - off_x_range: Tuple[float, float] = (-0.03125, 0.03125), - off_y_range: Tuple[float, float] = (-0.03125, 0.03125), - blur_kernels: Union[Tuple[int, int], List[int]] = (0, 3), + rand_start_epsilon_range: tuple[float, float] = (-8 / 255, 8 / 255), + rotation_range: tuple[float, float] = (-30.0, 30.0), + dist_range: tuple[float, float] = (0.0, 0.0), + gamma_range: tuple[float, float] = (1.0, 2.0), + crop_percent_range: tuple[float, float] = (-0.03125, 0.03125), + off_x_range: tuple[float, float] = (-0.03125, 0.03125), + off_y_range: tuple[float, float] = (-0.03125, 0.03125), + blur_kernels: tuple[int, int] | list[int] = (0, 3), batch_size: int = 64, ) -> None: """ @@ -163,7 +163,7 @@ def __init__( self._check_params() - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. @@ -292,10 +292,10 @@ def _eval( mask: "torch.Tensor", target_label: np.ndarray, y_onehot: "torch.Tensor", - xforms: List[Tuple[float, float, float, int, float, float, float, float, float]], + xforms: list[tuple[float, float, float, int, float, float, float, float, float]], clip_min: float, clip_max: float, - pts: Optional[np.ndarray], + pts: np.ndarray | None, ) -> float: """ Compute transform-robustness. @@ -319,9 +319,7 @@ def _eval( if len(x_adv.shape) == 3: x_adv = x_adv.unsqueeze(0) transformed_x_adv = transform_wb(x, x_adv, mask, xform, self.net_size, clip_min, clip_max, pts) - logits, _ = self.estimator._predict_framework( # pylint: disable=W0212 - transformed_x_adv.to(self.estimator.device), y_onehot - ) + logits, _ = self.estimator._predict_framework(transformed_x_adv.to(self.estimator.device), y_onehot) success = int(logits.argmax(dim=1).detach().cpu().numpy()[0] == target_label) successes += success return successes / len(xforms) @@ -335,7 +333,7 @@ def _perturb( focal: float, clip_min: float, clip_max: float, - pts: Optional[np.ndarray], + pts: np.ndarray | None, ) -> np.ndarray: """ Internal attack function for one example. @@ -417,9 +415,7 @@ def _perturb( pts, ) - logits, _ = self.estimator._predict_framework( # pylint: disable=W0212 - xform_img.to(self.estimator.device), y_onehot_tensor - ) + logits, _ = self.estimator._predict_framework(xform_img.to(self.estimator.device), y_onehot_tensor) if self.use_logits: loss = torch.nn.functional.cross_entropy( input=logits, diff --git a/art/attacks/evasion/graphite/utils.py b/art/attacks/evasion/graphite/utils.py index e6e1ec4665..ba08efa136 100644 --- a/art/attacks/evasion/graphite/utils.py +++ b/art/attacks/evasion/graphite/utils.py @@ -43,8 +43,9 @@ | Paper link: https://arxiv.org/abs/2002.07088 | Original github link: https://github.com/ryan-feng/GRAPHITE """ +from __future__ import annotations -from typing import Optional, Tuple, Union, TYPE_CHECKING, List +from typing import TYPE_CHECKING import math import numpy as np from art.estimators.estimator import BaseEstimator @@ -71,7 +72,7 @@ def dist2pixels(dist: float, width: float, obj_width: float = 30) -> float: return 1.0 * dist_inches * width / obj_width -def convert_to_network(x: np.ndarray, net_size: Tuple[int, int], clip_min: float, clip_max: float) -> np.ndarray: +def convert_to_network(x: np.ndarray, net_size: tuple[int, int], clip_min: float, clip_max: float) -> np.ndarray: """ Convert image to network format. @@ -102,12 +103,12 @@ def apply_transformation( crop_percent: float, crop_off_x: float, crop_off_y: float, - net_size: Tuple[int, int], + net_size: tuple[int, int], obj_width: float, focal: float, clip_min: float, clip_max: float, - pts: Optional[np.ndarray] = None, + pts: np.ndarray | None = None, ) -> np.ndarray: """ Apply transformation to input image. @@ -164,16 +165,16 @@ def apply_transformation( def get_transform_params( num_xforms: int, - rotation_range: Tuple[float, float], - dist_range: Tuple[float, float], - gamma_range: Tuple[float, float], - crop_percent_range: Tuple[float, float], - off_x_range: Tuple[float, float], - off_y_range: Tuple[float, float], - blur_kernels: Union[Tuple[int, int], List[int]], + rotation_range: tuple[float, float], + dist_range: tuple[float, float], + gamma_range: tuple[float, float], + crop_percent_range: tuple[float, float], + off_x_range: tuple[float, float], + off_y_range: tuple[float, float], + blur_kernels: tuple[int, int] | list[int], obj_width: float, focal: float, -) -> List[Tuple[float, float, float, int, float, float, float, float, float]]: +) -> list[tuple[float, float, float, int, float, float, float, float, float]]: """ Sample transformation params. @@ -214,7 +215,7 @@ def add_noise( lbd: float, theta: np.ndarray, clip: bool = True, -) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: +) -> tuple[np.ndarray, np.ndarray, np.ndarray]: """ Combines the image and noise to create a perturbed image. @@ -247,14 +248,14 @@ def add_noise( def get_transformed_images( x: np.ndarray, mask: np.ndarray, - xforms: List[Tuple[float, float, float, int, float, float, float, float, float]], + xforms: list[tuple[float, float, float, int, float, float, float, float, float]], lbd: float, theta: np.ndarray, - net_size: Tuple[int, int], + net_size: tuple[int, int], clip_min: float, clip_max: float, - pts: Optional[np.ndarray] = None, -) -> List[np.ndarray]: + pts: np.ndarray | None = None, +) -> list[np.ndarray]: """ Get transformed images. @@ -305,11 +306,11 @@ def transform_wb( x: "torch.Tensor", x_adv: "torch.Tensor", mask: "torch.Tensor", - xform: Tuple[float, float, float, int, float, float, float, float, float], - net_size: Tuple[int, int], + xform: tuple[float, float, float, int, float, float, float, float, float], + net_size: tuple[int, int], clip_min: float, clip_max: float, - pts: Optional[np.ndarray], + pts: np.ndarray | None, ) -> "torch.Tensor": """ Get transformed image, white-box setting. @@ -370,7 +371,7 @@ def transform_wb( def convert_to_network_wb( - x: "torch.Tensor", net_size: Tuple[int, int], clip_min: float, clip_max: float + x: "torch.Tensor", net_size: tuple[int, int], clip_min: float, clip_max: float ) -> "torch.Tensor": """ Convert image to network format. @@ -401,7 +402,7 @@ def get_perspective_transform( crop_percent: float, crop_off_x: float, crop_off_y: float, - pts: Optional[np.ndarray] = None, + pts: np.ndarray | None = None, ) -> np.ndarray: """ Computes parameters for perspective transform for blackbox attack. @@ -437,7 +438,7 @@ def get_perspective_transform_wb( crop_percent: float, crop_off_x: float, crop_off_y: float, - pts: Optional[np.ndarray] = None, + pts: np.ndarray | None = None, ) -> "torch.Tensor": """ Computes perspective transform for whitebox attack. @@ -479,8 +480,8 @@ def _get_perspective_transform( crop_percent: float, crop_off_x: float, crop_off_y: float, - pts: Optional[np.ndarray] = None, -) -> Tuple[np.ndarray, int, int]: + pts: np.ndarray | None = None, +) -> tuple[np.ndarray, int, int]: """ Computes parameters for perspective transform. @@ -545,8 +546,8 @@ def get_offset_and_crop_size( crop_off_x: float, crop_off_y: float, ratio: float, - pts: Optional[np.ndarray] = None, -) -> Tuple[float, float, float]: + pts: np.ndarray | None = None, +) -> tuple[float, float, float]: """ Compute offsets and crop size for perspective transform. @@ -625,7 +626,7 @@ def get_offset_and_crop_size( def run_predictions( estimator: "CLASSIFIER_NEURALNETWORK_TYPE", - imgs: List[np.ndarray], + imgs: list[np.ndarray], target: int, batch_size: int, err_rate: bool = True, diff --git a/art/attacks/evasion/hclu.py b/art/attacks/evasion/hclu.py index 7237d88a3d..fff41d2df1 100644 --- a/art/attacks/evasion/hclu.py +++ b/art/attacks/evasion/hclu.py @@ -24,7 +24,7 @@ import copy import logging -from typing import Optional + import numpy as np from scipy.optimize import minimize @@ -71,7 +71,7 @@ def __init__( self.verbose = verbose self._check_params() - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial examples and return them as an array. diff --git a/art/attacks/evasion/hop_skip_jump.py b/art/attacks/evasion/hop_skip_jump.py index 0e3ae7433f..a0477c8074 100644 --- a/art/attacks/evasion/hop_skip_jump.py +++ b/art/attacks/evasion/hop_skip_jump.py @@ -21,10 +21,10 @@ | Paper link: https://arxiv.org/abs/1904.02144 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Optional, Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import tqdm @@ -67,7 +67,7 @@ def __init__( classifier: "CLASSIFIER_TYPE", batch_size: int = 64, targeted: bool = False, - norm: Union[int, float, str] = 2, + norm: int | float | str = 2, max_iter: int = 50, max_eval: int = 10000, init_eval: int = 100, @@ -106,7 +106,7 @@ def __init__( else: self.theta = 0.01 / np.prod(self.estimator.input_shape) - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. @@ -235,7 +235,7 @@ def _perturb( y_p: int, init_pred: int, adv_init: np.ndarray, - mask: Optional[np.ndarray], + mask: np.ndarray | None, clip_min: float, clip_max: float, ) -> np.ndarray: @@ -273,10 +273,10 @@ def _init_sample( y_p: int, init_pred: int, adv_init: np.ndarray, - mask: Optional[np.ndarray], + mask: np.ndarray | None, clip_min: float, clip_max: float, - ) -> Optional[Union[np.ndarray, Tuple[np.ndarray, int]]]: + ) -> np.ndarray | tuple[np.ndarray, int] | None: """ Find initial adversarial example for the attack. @@ -376,7 +376,7 @@ def _attack( initial_sample: np.ndarray, original_sample: np.ndarray, target: int, - mask: Optional[np.ndarray], + mask: np.ndarray | None, clip_min: float, clip_max: float, ) -> np.ndarray: @@ -466,10 +466,10 @@ def _binary_search( current_sample: np.ndarray, original_sample: np.ndarray, target: int, - norm: Union[int, float, str], + norm: int | float | str, clip_min: float, clip_max: float, - threshold: Optional[float] = None, + threshold: float | None = None, ) -> np.ndarray: """ Binary search to approach the boundary. @@ -565,7 +565,7 @@ def _compute_update( num_eval: int, delta: float, target: int, - mask: Optional[np.ndarray], + mask: np.ndarray | None, clip_min: float, clip_max: float, ) -> np.ndarray: @@ -653,7 +653,7 @@ def _adversarial_satisfactory( @staticmethod def _interpolate( - current_sample: np.ndarray, original_sample: np.ndarray, alpha: float, norm: Union[int, float, str] + current_sample: np.ndarray, original_sample: np.ndarray, alpha: float, norm: int | float | str ) -> np.ndarray: """ Interpolate a new sample based on the original and the current samples. diff --git a/art/attacks/evasion/imperceptible_asr/imperceptible_asr.py b/art/attacks/evasion/imperceptible_asr/imperceptible_asr.py index 0d933dd716..ec1d5e6d77 100644 --- a/art/attacks/evasion/imperceptible_asr/imperceptible_asr.py +++ b/art/attacks/evasion/imperceptible_asr/imperceptible_asr.py @@ -21,10 +21,10 @@ | Paper link: http://proceedings.mlr.press/v97/qin19a.html """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import TYPE_CHECKING, Optional, Tuple, Union +from typing import TYPE_CHECKING import numpy as np import scipy.signal as ss @@ -37,7 +37,7 @@ from art.utils import pad_sequence_input if TYPE_CHECKING: - # pylint: disable=C0412 + from tensorflow.compat.v1 import Tensor from torch import Tensor as PTensor @@ -141,7 +141,7 @@ def __init__( self._hop_size = masker.hop_size self._sample_rate = masker.sample_rate - self._framework: Optional[str] = None + self._framework: str | None = None if isinstance(self.estimator, TensorFlowV2Estimator): import tensorflow.compat.v1 as tf1 @@ -167,7 +167,7 @@ def __init__( # set framework attribute self._framework = "pytorch" - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate imperceptible, adversarial examples. @@ -357,7 +357,7 @@ def _create_imperceptible(self, x: np.ndarray, x_adversarial: np.ndarray, y: np. return np.array(x_imperceptible, dtype=dtype) - def _stabilized_threshold_and_psd_maximum(self, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + def _stabilized_threshold_and_psd_maximum(self, x: np.ndarray) -> tuple[np.ndarray, np.ndarray]: """ Return batch of stabilized masking thresholds and PSD maxima. @@ -384,7 +384,7 @@ def _loss_gradient_masking_threshold( x: np.ndarray, masking_threshold_stabilized: np.ndarray, psd_maximum_stabilized: np.ndarray, - ) -> Tuple[np.ndarray, np.ndarray]: + ) -> tuple[np.ndarray, np.ndarray]: """ Compute loss gradient of the global masking threshold w.r.t. the PSD approximate of the perturbation. @@ -409,7 +409,7 @@ def _loss_gradient_masking_threshold( self._power_spectral_density_maximum_tf: psd_maximum_stabilized, self._masking_threshold_tf: masking_threshold_stabilized, } - # pylint: disable=W0212 + gradients_padded, loss = self.estimator._sess.run(self._loss_gradient_masking_threshold_op_tf, feed_dict) elif self._framework == "pytorch": # get loss gradients (TensorFlow) @@ -432,12 +432,12 @@ def _loss_gradient_masking_threshold( def _loss_gradient_masking_threshold_tf( self, perturbation: "Tensor", psd_maximum_stabilized: "Tensor", masking_threshold_stabilized: "Tensor" - ) -> Union["Tensor", "Tensor"]: + ) -> "Tensor" | "Tensor": """ Compute loss gradient of the masking threshold loss in TensorFlow. Note that the PSD maximum and masking threshold are required to be stabilized, i.e. have the `10*log10`-term - canceled out. Following Qin et al (2019) this mitigates optimization instabilities. + canceled out. Following Qin et al. (2019) this mitigates optimization instabilities. :param perturbation: Adversarial perturbation. :param psd_maximum_stabilized: Stabilized maximum across frames, i.e. shape is `(batch_size, frame_length)`, of @@ -461,7 +461,7 @@ def _loss_gradient_masking_threshold_tf( def _loss_gradient_masking_threshold_torch( self, perturbation: np.ndarray, psd_maximum_stabilized: np.ndarray, masking_threshold_stabilized: np.ndarray - ) -> Tuple[np.ndarray, np.ndarray]: + ) -> tuple[np.ndarray, np.ndarray]: """ Compute loss gradient of the masking threshold loss in PyTorch. @@ -470,7 +470,7 @@ def _loss_gradient_masking_threshold_torch( import torch # define tensors - # pylint: disable=W0212 + perturbation_torch = torch.from_numpy(perturbation).to(self.estimator._device) masking_threshold_stabilized_torch = torch.from_numpy(masking_threshold_stabilized).to(self.estimator._device) psd_maximum_stabilized_torch = torch.from_numpy(psd_maximum_stabilized).to(self.estimator._device) @@ -539,7 +539,7 @@ def _approximate_power_spectral_density_torch( import torch # compute short-time Fourier transform (STFT) - # pylint: disable=W0212 + stft_matrix = torch.view_as_real( torch.stft( perturbation, @@ -654,11 +654,11 @@ def __init__(self, window_size: int = 2048, hop_size: int = 512, sample_rate: in self._sample_rate = sample_rate # init some private properties for lazy loading - self._fft_frequencies: Optional[np.ndarray] = None - self._bark: Optional[np.ndarray] = None - self._absolute_threshold_hearing: Optional[np.ndarray] = None + self._fft_frequencies: np.ndarray | None = None + self._bark: np.ndarray | None = None + self._absolute_threshold_hearing: np.ndarray | None = None - def calculate_threshold_and_psd_maximum(self, audio: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + def calculate_threshold_and_psd_maximum(self, audio: np.ndarray) -> tuple[np.ndarray, np.ndarray]: """ Compute the global masking threshold for an audio input and also return its maximum power spectral density. @@ -748,7 +748,7 @@ def absolute_threshold_hearing(self) -> np.ndarray: ) return self._absolute_threshold_hearing - def power_spectral_density(self, audio: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + def power_spectral_density(self, audio: np.ndarray) -> tuple[np.ndarray, np.ndarray]: """ Compute the power spectral density matrix for an audio input. @@ -782,7 +782,7 @@ def power_spectral_density(self, audio: np.ndarray) -> Tuple[np.ndarray, np.ndar return psd_matrix_normalized, psd_matrix_max @staticmethod - def find_maskers(psd_vector: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + def find_maskers(psd_vector: np.ndarray) -> tuple[np.ndarray, np.ndarray]: """ Identify maskers. @@ -799,7 +799,7 @@ def find_maskers(psd_vector: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: psd_maskers = 10 * np.log10(np.sum([10 ** (psd_vector[masker_idx + i] / 10) for i in range(-1, 2)], axis=0)) return psd_maskers, masker_idx - def filter_maskers(self, maskers: np.ndarray, masker_idx: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + def filter_maskers(self, maskers: np.ndarray, masker_idx: np.ndarray) -> tuple[np.ndarray, np.ndarray]: """ Filter maskers. diff --git a/art/attacks/evasion/imperceptible_asr/imperceptible_asr_pytorch.py b/art/attacks/evasion/imperceptible_asr/imperceptible_asr_pytorch.py index c7211125e9..84e2a8bcfe 100644 --- a/art/attacks/evasion/imperceptible_asr/imperceptible_asr_pytorch.py +++ b/art/attacks/evasion/imperceptible_asr/imperceptible_asr_pytorch.py @@ -22,10 +22,10 @@ | Paper link: https://arxiv.org/abs/1903.10346 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import TYPE_CHECKING, Optional, Tuple, List +from typing import TYPE_CHECKING import numpy as np import scipy @@ -90,8 +90,8 @@ def __init__( max_iter_2: int = 4000, learning_rate_1: float = 0.001, learning_rate_2: float = 5e-4, - optimizer_1: Optional["torch.optim.Optimizer"] = None, - optimizer_2: Optional["torch.optim.Optimizer"] = None, + optimizer_1: "torch.optim.Optimizer" | None = None, + optimizer_2: "torch.optim.Optimizer" | None = None, global_max_length: int = 200000, initial_rescale: float = 1.0, decrease_factor_eps: float = 0.8, @@ -208,7 +208,7 @@ def __init__( # Setup for AMP use if self._use_amp: # pragma: no cover - from apex import amp # pylint: disable=E0611 + from apex import amp if self.estimator.device.type == "cpu": enabled = False @@ -223,7 +223,7 @@ def __init__( loss_scale=1.0, ) - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. @@ -337,7 +337,7 @@ class only supports targeted attack. return results - def _attack_1st_stage(self, x: np.ndarray, y: np.ndarray) -> Tuple["torch.Tensor", np.ndarray]: + def _attack_1st_stage(self, x: np.ndarray, y: np.ndarray) -> tuple["torch.Tensor", np.ndarray]: """ The first stage of the attack. @@ -370,7 +370,7 @@ class only supports targeted attack. original_input[local_batch_size_idx, : len(x[local_batch_size_idx])] = x[local_batch_size_idx] # Optimization loop - successful_adv_input: List[Optional["torch.Tensor"]] = [None] * local_batch_size + successful_adv_input: list["torch.Tensor" | None] = [None] * local_batch_size trans = [None] * local_batch_size for iter_1st_stage_idx in range(self.max_iter_1): @@ -390,7 +390,7 @@ class only supports targeted attack. # Actual training if self._use_amp: # pragma: no cover - from apex import amp # pylint: disable=E0611 + from apex import amp with amp.scale_loss(loss, self.optimizer_1) as scaled_loss: scaled_loss.backward() @@ -442,7 +442,7 @@ def _forward_1st_stage( rescale: np.ndarray, input_mask: np.ndarray, real_lengths: np.ndarray, - ) -> Tuple["torch.Tensor", "torch.Tensor", np.ndarray, "torch.Tensor", "torch.Tensor"]: + ) -> tuple["torch.Tensor", "torch.Tensor", np.ndarray, "torch.Tensor", "torch.Tensor"]: """ The forward pass of the first stage of the attack. @@ -483,7 +483,7 @@ def _forward_1st_stage( return loss, local_delta, decoded_output, masked_adv_input, local_delta_rescale def _attack_2nd_stage( - self, x: np.ndarray, y: np.ndarray, theta_batch: List[np.ndarray], original_max_psd_batch: List[np.ndarray] + self, x: np.ndarray, y: np.ndarray, theta_batch: list[np.ndarray], original_max_psd_batch: list[np.ndarray] ) -> "torch.Tensor": """ The second stage of the attack. @@ -518,7 +518,7 @@ class only supports targeted attack. original_input[local_batch_size_idx, : len(x[local_batch_size_idx])] = x[local_batch_size_idx] # Optimization loop - successful_adv_input: List[Optional["torch.Tensor"]] = [None] * local_batch_size + successful_adv_input: list["torch.Tensor" | None] = [None] * local_batch_size best_loss_2nd_stage = [np.inf] * local_batch_size trans = [None] * local_batch_size @@ -551,7 +551,7 @@ class only supports targeted attack. # Actual training if self._use_amp: # pragma: no cover - from apex import amp # pylint: disable=E0611 + from apex import amp with amp.scale_loss(loss, self.optimizer_2) as scaled_loss: scaled_loss.backward() @@ -598,8 +598,8 @@ class only supports targeted attack. def _forward_2nd_stage( self, local_delta_rescale: "torch.Tensor", - theta_batch: List[np.ndarray], - original_max_psd_batch: List[np.ndarray], + theta_batch: list[np.ndarray], + original_max_psd_batch: list[np.ndarray], real_lengths: np.ndarray, ) -> "torch.Tensor": """ @@ -629,7 +629,7 @@ def _forward_2nd_stage( return losses_stack - def _compute_masking_threshold(self, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + def _compute_masking_threshold(self, x: np.ndarray) -> tuple[np.ndarray, np.ndarray]: """ Compute the masking threshold and the maximum psd of the original audio. diff --git a/art/attacks/evasion/iterative_method.py b/art/attacks/evasion/iterative_method.py index 9b55187759..5d1ee244e8 100644 --- a/art/attacks/evasion/iterative_method.py +++ b/art/attacks/evasion/iterative_method.py @@ -21,10 +21,10 @@ | Paper link: https://arxiv.org/abs/1607.02533 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -48,8 +48,8 @@ class BasicIterativeMethod(ProjectedGradientDescent): def __init__( self, estimator: "CLASSIFIER_LOSS_GRADIENTS_TYPE", - eps: Union[int, float, np.ndarray] = 0.3, - eps_step: Union[int, float, np.ndarray] = 0.1, + eps: int | float | np.ndarray = 0.3, + eps_step: int | float | np.ndarray = 0.1, max_iter: int = 100, targeted: bool = False, batch_size: int = 32, diff --git a/art/attacks/evasion/laser_attack/algorithms.py b/art/attacks/evasion/laser_attack/algorithms.py index a182719b50..65aaac4dc9 100644 --- a/art/attacks/evasion/laser_attack/algorithms.py +++ b/art/attacks/evasion/laser_attack/algorithms.py @@ -20,7 +20,7 @@ | Paper link: https://arxiv.org/abs/2103.06504 """ -from typing import Optional, Tuple +from __future__ import annotations import numpy as np @@ -35,8 +35,8 @@ def greedy_search( actual_class_confidence: float, adv_object_generator: AdvObjectGenerator, image_generator: ImageGenerator, - debug: Optional[DebugInfo] = None, -) -> Tuple[Optional[AdversarialObject], Optional[int]]: + debug: DebugInfo | None = None, +) -> tuple[AdversarialObject | None, int | None]: """ Greedy search algorithm used to generate parameters of an adversarial object that added to the :image will mislead the neural network. diff --git a/art/attacks/evasion/laser_attack/laser_attack.py b/art/attacks/evasion/laser_attack/laser_attack.py index f58d8615fc..60ab83c6fa 100644 --- a/art/attacks/evasion/laser_attack/laser_attack.py +++ b/art/attacks/evasion/laser_attack/laser_attack.py @@ -20,9 +20,11 @@ | Paper link: https://arxiv.org/abs/2103.06504 """ +from __future__ import annotations +from collections.abc import Callable import logging -from typing import Callable, List, Optional, Tuple, Union, Any +from typing import Any import numpy as np @@ -63,7 +65,7 @@ def __init__( image_generator: ImageGenerator = ImageGenerator(), random_initializations: int = 1, optimisation_algorithm: Callable = greedy_search, - debug: Optional[DebugInfo] = None, + debug: DebugInfo | None = None, ) -> None: """ :param estimator: Predictor of the image class. @@ -85,7 +87,7 @@ def __init__( self._check_params() - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial examples. @@ -112,8 +114,8 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n def generate_parameters( self, x: np.ndarray, - y: Optional[np.ndarray] = None, - ) -> List[Tuple[Optional[AdversarialObject], Optional[int]]]: + y: np.ndarray | None = None, + ) -> list[tuple[AdversarialObject | None, int | None]]: """ Generate adversarial parameters for given images. @@ -130,8 +132,8 @@ def generate_parameters( return result def _generate_params_for_single_input( - self, x: np.ndarray, y: Optional[int] = None - ) -> Tuple[Optional[AdversarialObject], Optional[int]]: + self, x: np.ndarray, y: int | None = None + ) -> tuple[tuple[AdversarialObject | None, int | None]]: """ Generate adversarial example params for a single image. @@ -168,7 +170,7 @@ def _check_params(self) -> None: def _attack_single_image( self, x: np.ndarray, y: int, confidence: float - ) -> Tuple[Optional[AdversarialObject], Optional[int]]: + ) -> tuple[tuple[AdversarialObject | None, int | None]]: """ Attack particular image with given class. @@ -229,7 +231,7 @@ def __repr__(self) -> str: @staticmethod def from_numpy(theta: np.ndarray) -> "LaserBeam": """ - :param theta: List of the laser beam parameters, passed as List int the order: + :param theta: List of the laser beam parameters, passed as list in the order: wavelength[nm], slope angle[radians], bias[pixels], width[pixels]. :returns: New class object based on :theta. """ @@ -240,11 +242,11 @@ def from_numpy(theta: np.ndarray) -> "LaserBeam": ) @staticmethod - def from_array(theta: List) -> "LaserBeam": + def from_array(theta: list) -> "LaserBeam": """ Create instance of the class using parameters :theta. - :param theta: List of the laser beam parameters, passed as List int the order: + :param theta: List of the laser beam parameters, passed as list in the order: wavelength[nm], slope angle[radians], bias[pixels], width[pixels]. :returns: New class object based on :theta. """ @@ -258,7 +260,7 @@ def to_numpy(self) -> np.ndarray: line = self.line return np.array([self.wavelength, line.angle, line.bias, self.width]) - def __mul__(self, other: Union[float, int, list, np.ndarray]) -> "LaserBeam": + def __mul__(self, other: float | int | list | np.ndarray) -> "LaserBeam": if isinstance(other, (float, int)): return LaserBeam.from_numpy(other * self.to_numpy()) if isinstance(other, np.ndarray): @@ -365,11 +367,11 @@ def __init__( self, estimator, iterations: int, - max_laser_beam: Union[LaserBeam, Tuple[float, float, float, int]], - min_laser_beam: Union[LaserBeam, Tuple[float, float, float, int]] = (380.0, 0.0, 1.0, 1), + max_laser_beam: LaserBeam | tuple[float, float, float, int], + min_laser_beam: LaserBeam | tuple[float, float, float, int] = (380.0, 0.0, 1.0, 1), random_initializations: int = 1, image_generator: ImageGenerator = ImageGenerator(), - debug: Optional[DebugInfo] = None, + debug: DebugInfo | None = None, ) -> None: """ :param estimator: Predictor of the image class. diff --git a/art/attacks/evasion/laser_attack/utils.py b/art/attacks/evasion/laser_attack/utils.py index e8d3204e6d..65d36cfac3 100644 --- a/art/attacks/evasion/laser_attack/utils.py +++ b/art/attacks/evasion/laser_attack/utils.py @@ -20,11 +20,14 @@ | Paper link: https://arxiv.org/abs/2103.06504 """ +from __future__ import annotations + from abc import ABC, abstractmethod +from collections.abc import Callable from logging import Logger from pathlib import Path import string -from typing import Any, Callable, List, Tuple, Union +from typing import Any import numpy as np @@ -164,7 +167,7 @@ def add_images(image1: np.ndarray, image2: np.ndarray) -> np.ndarray: return add_images(image1, image2) @staticmethod - def generate_image(adv_object: Callable, shape: Tuple) -> np.ndarray: + def generate_image(adv_object: Callable, shape: tuple) -> np.ndarray: """ Generate image of the adversarial object. @@ -188,7 +191,7 @@ def generate_image(adv_object: Callable, shape: Tuple) -> np.ndarray: return laser_image -def wavelength_to_rgb(wavelength: Union[float, int]) -> List[float]: +def wavelength_to_rgb(wavelength: float | int) -> list[float]: """ Converts wavelength in nanometers to the RGB color. diff --git a/art/attacks/evasion/lowprofool.py b/art/attacks/evasion/lowprofool.py index 0c38290009..59014db9f3 100644 --- a/art/attacks/evasion/lowprofool.py +++ b/art/attacks/evasion/lowprofool.py @@ -27,8 +27,11 @@ | Paper link: https://arxiv.org/abs/1911.03274 """ +from __future__ import annotations + +from collections.abc import Callable import logging -from typing import Callable, Optional, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from scipy.stats import pearsonr @@ -69,13 +72,13 @@ def __init__( self, classifier: "CLASSIFIER_CLASS_LOSS_GRADIENTS_TYPE", n_steps: int = 100, - threshold: Union[float, None] = 0.5, + threshold: float | None = 0.5, lambd: float = 1.5, eta: float = 0.2, eta_decay: float = 0.98, eta_min: float = 1e-7, - norm: Union[int, float, str] = 2, - importance: Union[Callable, str, np.ndarray] = "pearson", + norm: int | float | str = 2, + importance: Callable | str | np.ndarray = "pearson", verbose: bool = False, ) -> None: """ @@ -141,7 +144,7 @@ def __weighted_lp_norm(self, perturbations: np.ndarray) -> np.ndarray: :param perturbations: Perturbations of samples towards being adversarial. :return: Array with weighted Lp-norm of perturbations. """ - order: Union[int, float] = np.inf if self.norm == "inf" else float(self.norm) + order: int | float = np.inf if self.norm == "inf" else float(self.norm) return self.lambd * np.linalg.norm(self.importance_vec * perturbations, axis=1, ord=order).reshape(-1, 1) def __weighted_lp_norm_gradient(self, perturbations: np.ndarray) -> np.ndarray: @@ -239,10 +242,10 @@ def __calculate_feature_importances(self, x: np.ndarray, y: np.ndarray) -> None: def fit_importances( self, - x: Optional[np.ndarray] = None, - y: Optional[np.ndarray] = None, - importance_array: Optional[np.ndarray] = None, - normalize: Optional[bool] = True, + x: np.ndarray | None = None, + y: np.ndarray | None = None, + importance_array: np.ndarray | None = None, + normalize: bool | None = True, ): """ This function allows one to easily calculate the feature importance vector using the pre-specified function, @@ -274,7 +277,7 @@ def fit_importances( return self - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversaries for the samples passed in the `x` data matrix, whose targets are specified in `y`, one-hot-encoded target matrix. This procedure makes use of the LowProFool algorithm. In the case of failure, diff --git a/art/attacks/evasion/momentum_iterative_method.py b/art/attacks/evasion/momentum_iterative_method.py index dc412e4230..ae71693b35 100644 --- a/art/attacks/evasion/momentum_iterative_method.py +++ b/art/attacks/evasion/momentum_iterative_method.py @@ -21,8 +21,10 @@ | Paper link: https://arxiv.org/abs/1710.06081 """ +from __future__ import annotations + import logging -from typing import Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -47,9 +49,9 @@ class MomentumIterativeMethod(ProjectedGradientDescent): def __init__( self, estimator: "CLASSIFIER_LOSS_GRADIENTS_TYPE", - norm: Union[int, float, str] = np.inf, - eps: Union[int, float, np.ndarray] = 0.3, - eps_step: Union[int, float, np.ndarray] = 0.1, + norm: int | float | str = np.inf, + eps: int | float | np.ndarray = 0.3, + eps_step: int | float | np.ndarray = 0.1, decay: float = 1.0, max_iter: int = 100, targeted: bool = False, diff --git a/art/attacks/evasion/newtonfool.py b/art/attacks/evasion/newtonfool.py index 4384824b7f..9187f76028 100644 --- a/art/attacks/evasion/newtonfool.py +++ b/art/attacks/evasion/newtonfool.py @@ -23,7 +23,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Optional, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import trange @@ -74,7 +74,7 @@ def __init__( self.verbose = verbose self._check_params() - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in a Numpy array. diff --git a/art/attacks/evasion/over_the_air_flickering/over_the_air_flickering_pytorch.py b/art/attacks/evasion/over_the_air_flickering/over_the_air_flickering_pytorch.py index 3690440b7e..aaca3a5977 100644 --- a/art/attacks/evasion/over_the_air_flickering/over_the_air_flickering_pytorch.py +++ b/art/attacks/evasion/over_the_air_flickering/over_the_air_flickering_pytorch.py @@ -21,9 +21,10 @@ | Paper link: https://arxiv.org/abs/2002.05123 """ +from __future__ import annotations import logging -from typing import Optional, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import tqdm @@ -35,7 +36,7 @@ from art.utils import check_and_transform_label_format, get_labels_np_array if TYPE_CHECKING: - # pylint: disable=C0412 + import torch from art.estimators.classification.pytorch import PyTorchClassifier @@ -79,7 +80,7 @@ def __init__( loss_margin: float = 0.05, batch_size: int = 1, start_frame_index: int = 0, - num_frames: Optional[int] = None, + num_frames: int | None = None, round_samples: float = 0.0, targeted: bool = False, verbose: bool = True, @@ -120,7 +121,7 @@ def __init__( self.verbose = verbose self._check_params() - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial examples. @@ -255,7 +256,7 @@ def _get_loss_gradients(self, x: "torch.Tensor", y: "torch.Tensor", perturbation ) x_in = x[[i]] + torch.repeat_interleave(torch.repeat_interleave(eps, x.shape[2], dim=2), x.shape[3], dim=3) x_in = self._clip_and_round_pytorch(x_in) - preds, _ = self.estimator._predict_framework(x=x_in) # pylint: disable=W0212 + preds, _ = self.estimator._predict_framework(x=x_in) # calculate adversarial loss y_preds = softmax(preds)[0] diff --git a/art/attacks/evasion/overload/box_iou.py b/art/attacks/evasion/overload/box_iou.py index 3f6a6c0fbc..24b063c45c 100644 --- a/art/attacks/evasion/overload/box_iou.py +++ b/art/attacks/evasion/overload/box_iou.py @@ -1,4 +1,4 @@ -# pylint: disable=C0103,C0114 +# pylint: disable=invalid-name,missing-module-docstring # GNU AFFERO GENERAL PUBLIC LICENSE # Version 3, 19 November 2007 # @@ -664,7 +664,7 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: - # pylint: disable=C0412 + import torch diff --git a/art/attacks/evasion/overload/overload.py b/art/attacks/evasion/overload/overload.py index ec9e11ef13..19da29451c 100644 --- a/art/attacks/evasion/overload/overload.py +++ b/art/attacks/evasion/overload/overload.py @@ -20,10 +20,10 @@ | Paper link: https://arxiv.org/abs/2304.05370 """ -# pylint: disable=C0302 + import logging -from typing import Optional, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -31,7 +31,7 @@ from art.attacks.evasion.overload.box_iou import box_iou if TYPE_CHECKING: - # pylint: disable=C0412 + import torch from art.utils import PYTORCH_OBJECT_DETECTOR_TYPE @@ -78,7 +78,7 @@ def __init__( self.batch_size = batch_size self._check_params() - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. @@ -143,7 +143,7 @@ def _attack(self, x_adv: "torch.Tensor", x: "torch.Tensor") -> "torch.Tensor": x_adv.requires_grad_(False) return x_adv - def _loss(self, x: "torch.Tensor") -> Tuple["torch.Tensor", "torch.Tensor"]: + def _loss(self, x: "torch.Tensor") -> tuple["torch.Tensor", "torch.Tensor"]: """ Compute the weight of each pixel and the overload loss for a given image. diff --git a/art/attacks/evasion/pe_malware_attack.py b/art/attacks/evasion/pe_malware_attack.py index 8e47cd4075..46eb5ca227 100644 --- a/art/attacks/evasion/pe_malware_attack.py +++ b/art/attacks/evasion/pe_malware_attack.py @@ -22,11 +22,12 @@ 3) Slack manipulation attacks (example paper link: https://arxiv.org/abs/1810.08280) 4) DOS Header Attacks (example paper link: https://arxiv.org/abs/1901.03583) """ +from __future__ import annotations import json import random import logging -from typing import Optional, Union, Tuple, List, Dict, TYPE_CHECKING +from typing import TYPE_CHECKING from tqdm.auto import trange import numpy as np @@ -36,7 +37,7 @@ from art.attacks.attack import EvasionAttack if TYPE_CHECKING: - # pylint: disable=C0412 + import tensorflow as tf from art.utils import CLASSIFIER_NEURALNETWORK_TYPE @@ -68,9 +69,9 @@ def __init__( self, classifier: "CLASSIFIER_NEURALNETWORK_TYPE", embedding_weights: np.ndarray, - param_dic: Dict[str, int], + param_dic: dict[str, int], num_of_iterations: int = 10, - l_0: Union[float, int] = 0.1, + l_0: float | int = 0.1, l_r: float = 1.0, use_sign: bool = False, verbose: bool = False, @@ -139,8 +140,8 @@ def initialise_sample( y: np.ndarray, sample_sizes: np.ndarray, perturbation_size: np.ndarray, - perturb_sizes: Optional[List[List[int]]], - perturb_starts: Optional[List[List[int]]], + perturb_sizes: list[list[int]] | None, + perturb_starts: list[list[int]] | None, ) -> np.ndarray: """ Randomly append bytes at the end of the malware to initialise it, or if perturbation regions are provided, @@ -199,8 +200,8 @@ def generate_mask( y: np.ndarray, sample_sizes: np.ndarray, perturbation_size: np.ndarray, - perturb_sizes: Optional[List[List[int]]], - perturb_starts: Optional[List[List[int]]], + perturb_sizes: list[list[int]] | None, + perturb_starts: list[list[int]] | None, ) -> "tf.Tensor": """ Makes a mask to apply to the gradients to control which samples in the batch are perturbed. @@ -245,7 +246,7 @@ def generate_mask( # repeat it so that it matches the 8 dimensional embedding layer expanded_masks = np.concatenate(expanded_masks, axis=-1) expanded_masks = tf.convert_to_tensor(expanded_masks) - expanded_masks = tf.cast(expanded_masks, dtype="float32") # pylint: disable=E1123 disable=E1120 + expanded_masks = tf.cast(expanded_masks, dtype="float32") return expanded_masks def update_embeddings(self, embeddings: "tf.Tensor", gradients: "tf.Tensor", mask: "tf.Tensor") -> "tf.Tensor": @@ -271,8 +272,8 @@ def get_adv_malware( labels: np.ndarray, fsize: np.ndarray, perturbation_size: np.ndarray, - perturb_sizes: Optional[List[List[int]]] = None, - perturb_starts: Optional[List[List[int]]] = None, + perturb_sizes: list[list[int]] | None = None, + perturb_starts: list[list[int]] | None = None, ) -> np.ndarray: """ Project the adversarial example back though the closest l2 vector. @@ -322,12 +323,12 @@ def pull_out_adversarial_malware( y: np.ndarray, sample_sizes: np.ndarray, initial_dtype: np.dtype, - input_perturb_sizes: Optional[List[List[int]]] = None, - input_perturb_starts: Optional[List[List[int]]] = None, - ) -> Union[ - Tuple[np.ndarray, np.ndarray, np.ndarray], - Tuple[np.ndarray, np.ndarray, np.ndarray, List[List[int]], List[List[int]]], - ]: + input_perturb_sizes: list[list[int]] | None = None, + input_perturb_starts: list[list[int]] | None = None, + ) -> ( + tuple[np.ndarray, np.ndarray, np.ndarray] + | tuple[np.ndarray, np.ndarray, np.ndarray, list[list[int]], list[list[int]]] + ): """ Fetches the malware from the data @@ -372,9 +373,9 @@ def pull_out_adversarial_malware( def compute_perturbation_regions( self, input_perturbation_size: np.ndarray, - input_perturb_sizes: List[List[int]], + input_perturb_sizes: list[list[int]], automatically_append: bool, - ) -> Tuple[np.ndarray, List[List[int]]]: + ) -> tuple[np.ndarray, list[list[int]]]: """ Based on the l0 budget and the provided allowable perturbation regions we iteratively mark regions of the PE file for modification until we exhaust our budget. @@ -423,12 +424,12 @@ def pull_out_valid_samples( y: np.ndarray, sample_sizes: np.ndarray, automatically_append: bool = True, - perturb_sizes: Optional[List[List[int]]] = None, - perturb_starts: Optional[List[List[int]]] = None, - ) -> Union[ - Tuple[np.ndarray, np.ndarray, np.ndarray], - Tuple[np.ndarray, np.ndarray, np.ndarray, List[List[int]], List[List[int]]], - ]: + perturb_sizes: list[list[int]] | None = None, + perturb_starts: list[list[int]] | None = None, + ) -> ( + tuple[np.ndarray, np.ndarray, np.ndarray] + | tuple[np.ndarray, np.ndarray, np.ndarray, list[list[int]], list[list[int]]] + ): """ Filters the input data for samples that can be made adversarial. @@ -462,15 +463,15 @@ def pull_out_valid_samples( return self.pull_out_adversarial_malware(x, y, sample_sizes, initial_dtype) - def generate( # pylint: disable=W0221 + def generate( self, x: np.ndarray, - y: Optional[np.ndarray] = None, - sample_sizes: Optional[np.ndarray] = None, + y: np.ndarray | None = None, + sample_sizes: np.ndarray | None = None, automatically_append: bool = True, verify_input_data: bool = True, - perturb_sizes: Optional[List[List[int]]] = None, - perturb_starts: Optional[List[List[int]]] = None, + perturb_sizes: list[list[int]] | None = None, + perturb_starts: list[list[int]] | None = None, **kwargs, ) -> np.ndarray: """ @@ -584,7 +585,7 @@ def generate( # pylint: disable=W0221 @staticmethod def process_file( filepath: str, padding_char: int = 256, maxlen: int = 2**20 - ) -> Tuple[np.ndarray, int]: # pragma: no cover + ) -> tuple[np.ndarray, int]: # pragma: no cover """ Go from raw file to numpy array. @@ -606,8 +607,8 @@ def process_file( @staticmethod def get_peinfo( - filepath: str, save_to_json_path: Optional[str] = None - ) -> Tuple[List[int], List[int]]: # pragma: no cover + filepath: str, save_to_json_path: str | None = None + ) -> tuple[list[int], list[int]]: # pragma: no cover """ Given a PE file we extract out the section information to determine the slack regions in the file. We return two lists 1) with the start location of the slack regions and 2) with the size of the slack region. @@ -618,14 +619,14 @@ def get_peinfo( :return start_of_slack: A list with the slack starts :return size_of_slack: A list with the slack start positions """ - import lief # pylint: disable=C0415 + import lief # pylint: disable=import-outside-toplevel start_of_slack = [] size_of_slack = [] cleaned_dump = {} - binary_load = lief.parse(filepath) # pylint: disable=I1101 + binary_load = lief.parse(filepath) if binary_load is not None: binary = binary_load else: @@ -650,15 +651,15 @@ def get_peinfo( def insert_section( self, - datapoint: Union[List[int], str], - sample_size: Optional[int] = None, + datapoint: list[int] | str, + sample_size: int | None = None, padding_char: int = 256, maxlen: int = 2**20, - bytes_to_assign: Optional[int] = None, + bytes_to_assign: int | None = None, verbose: bool = False, - ) -> Union[ - Tuple[np.ndarray, int, int, int, List[int], List[int]], Tuple[None, None, None, None, None, None] - ]: # pragma: no cover + ) -> ( + tuple[np.ndarray, int, int, int, list[int], list[int]] | tuple[None, None, None, None, None, None] + ): # pragma: no cover """ Create a new section in a PE file that the attacker can perturb to create an adversarial example. we are using the lief library (https://github.com/lief-project/LIEF) to manipulate the PE file. @@ -688,8 +689,8 @@ def insert_section( :return size_of_slack: Size of slack regions in this executable (including from the section we just inserted) :return start_of_slack: Start of slack regions in this executable (including from the section we just inserted) """ - # pylint: disable=I1101 - import lief # pylint: disable=C0415 + + import lief # pylint: disable=import-outside-toplevel if not verbose: lief.logging.disable() @@ -789,7 +790,7 @@ def insert_section( return None, None, None, None, None, None @staticmethod - def get_dos_locations(x: np.ndarray) -> Tuple[List[List[int]], List[List[int]]]: + def get_dos_locations(x: np.ndarray) -> tuple[list[list[int]], list[list[int]]]: """ We identify the regions in the DOS header which we can perturb adversarially. diff --git a/art/attacks/evasion/pixel_threshold.py b/art/attacks/evasion/pixel_threshold.py index 860453457e..5671e2e353 100644 --- a/art/attacks/evasion/pixel_threshold.py +++ b/art/attacks/evasion/pixel_threshold.py @@ -22,12 +22,12 @@ | One Pixel Attack Paper link: https://arxiv.org/ans/1710.08864 | Pixel and Threshold Attack Paper link: https://arxiv.org/abs/1906.06026 """ -# pylint: disable=C0302,C0413 -from __future__ import absolute_import, division, print_function, unicode_literals +# pylint: disable=wrong-import-position +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging from itertools import product -from typing import List, Optional, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -36,7 +36,7 @@ # from scipy.optimize import differential_evolution # In the meantime, the modified implementation is used which is defined in the # lines `453-1457`. -# Otherwise may use Tensorflow's implementation of DE. +# Otherwise, may use Tensorflow's implementation of DE. from six import string_types import scipy @@ -44,9 +44,9 @@ scipy_version = list(map(int, scipy.__version__.lower().split("."))) if scipy_version[1] >= 8: - from scipy.optimize._optimize import _status_message # pylint: disable=E0611 + from scipy.optimize._optimize import _status_message else: - from scipy.optimize.optimize import _status_message # pylint: disable=E0611 + from scipy.optimize.optimize import _status_message from scipy.optimize import OptimizeResult, minimize # noqa from tqdm.auto import tqdm # noqa @@ -76,7 +76,7 @@ class PixelThreshold(EvasionAttack): def __init__( self, classifier: "CLASSIFIER_NEURALNETWORK_TYPE", - th: Optional[int] = None, + th: int | None = None, es: int = 0, max_iter: int = 100, targeted: bool = False, @@ -97,8 +97,8 @@ def __init__( self._project = True self.type_attack = -1 - self.th = th # pylint: disable=C0103 - self.es = es # pylint: disable=C0103 + self.th = th # pylint: disable=invalid-name + self.es = es # pylint: disable=invalid-name self.max_iter = max_iter self._targeted = targeted self.verbose = verbose @@ -140,7 +140,7 @@ def rescale_input(self, x): x = (x * (self.estimator.clip_values[1] - self.estimator.clip_values[0])) + self.estimator.clip_values[0] return x - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. @@ -232,7 +232,7 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n return adv_x_best_array - def _get_bounds(self, img: np.ndarray, limit) -> Tuple[List[list], list]: + def _get_bounds(self, img: np.ndarray, limit) -> tuple[list[list], list]: """ Define the bounds for the image `img` within the limits `limit`. """ @@ -256,7 +256,7 @@ def bound_limit(value): return bounds, initial - def _perturb_image(self, x: np.ndarray, img: np.ndarray) -> np.ndarray: # pylint: disable=W0613,R0201 + def _perturb_image(self, x: np.ndarray, img: np.ndarray) -> np.ndarray: """ Perturbs the given image `img` with the given perturbation `x`. """ @@ -277,7 +277,7 @@ def _attack_success(self, adv_x, x, target_class): or (not self.targeted and predicted_class != target_class) ) - def _attack(self, image: np.ndarray, target_class: np.ndarray, limit: int) -> Tuple[bool, np.ndarray]: + def _attack(self, image: np.ndarray, target_class: np.ndarray, limit: int) -> tuple[bool, np.ndarray]: """ Attack the given image `image` with the threshold `limit` for the `target_class` which is true label for untargeted attack and targeted label for targeted attack. @@ -293,7 +293,7 @@ def predict_fn(x): predictions = self.estimator.predict(adv)[:, target_class] return predictions if not self.targeted else 1 - predictions - def callback_fn(x, convergence=None): # pylint: disable=R1710,W0613 + def callback_fn(x, convergence=None): # pylint: disable=inconsistent-return-statements,unused-argument if self.es == 0: if self._attack_success(x.result[0], image, target_class): raise CMAEarlyStoppingException("Attack Completed :) Earlier than expected") @@ -365,7 +365,7 @@ class PixelAttack(PixelThreshold): def __init__( self, classifier: "CLASSIFIER_NEURALNETWORK_TYPE", - th: Optional[int] = None, + th: int | None = None, es: int = 1, max_iter: int = 100, targeted: bool = False, @@ -401,12 +401,12 @@ def _perturb_image(self, x: np.ndarray, img: np.ndarray) -> np.ndarray: image[:, x_pos % self.img_rows, y_pos % self.img_cols] = rgb return imgs - def _get_bounds(self, img: np.ndarray, limit) -> Tuple[List[list], list]: + def _get_bounds(self, img: np.ndarray, limit) -> tuple[list[list], list]: """ Define the bounds for the image `img` within the limits `limit`. """ - initial: List[int] = [] - bounds: List[List[int]] + initial: list[int] = [] + bounds: list[list[int]] if self.es == 0: for count, (i, j) in enumerate(product(range(self.img_rows), range(self.img_cols))): initial += [i, j] @@ -446,7 +446,7 @@ class ThresholdAttack(PixelThreshold): def __init__( self, classifier: "CLASSIFIER_NEURALNETWORK_TYPE", - th: Optional[int] = None, + th: int | None = None, es: int = 0, max_iter: int = 100, targeted: bool = False, @@ -493,7 +493,7 @@ class CMAEarlyStoppingException(Exception): # TODO: Make the attack compatible with current version of SciPy Optimize # Differential Evolution -# pylint: disable=W0105 +# pylint: disable=pointless-string-statemen """ A slight modification to Scipy's implementation of differential evolution. To speed up predictions, the entire parameters array is passed to `self.func`, @@ -1415,7 +1415,7 @@ def _ensure_constraint(self, trial): for index in np.where((trial < 0) | (trial > 1))[0]: trial[index] = self.random_number_generator.rand() - def _mutate(self, candidate): # pylint: disable=R1710 + def _mutate(self, candidate): # pylint: disable=inconsistent-return-statements """ create a trial vector based on a mutation strategy """ diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent.py index bb4f58b5fb..143acea8cb 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent.py @@ -23,10 +23,10 @@ | Paper link: https://arxiv.org/abs/1706.06083 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Optional, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -78,17 +78,17 @@ class ProjectedGradientDescent(EvasionAttack): def __init__( self, - estimator: Union["CLASSIFIER_LOSS_GRADIENTS_TYPE", "OBJECT_DETECTOR_TYPE"], - norm: Union[int, float, str] = np.inf, - eps: Union[int, float, np.ndarray] = 0.3, - eps_step: Union[int, float, np.ndarray] = 0.1, - decay: Optional[float] = None, + estimator: "CLASSIFIER_LOSS_GRADIENTS_TYPE" | "OBJECT_DETECTOR_TYPE", + norm: int | float | str = np.inf, + eps: int | float | np.ndarray = 0.3, + eps_step: int | float | np.ndarray = 0.1, + decay: float | None = None, max_iter: int = 100, targeted: bool = False, num_random_init: int = 0, batch_size: int = 32, random_eps: bool = False, - summary_writer: Union[str, bool, SummaryWriter] = False, + summary_writer: str | bool | SummaryWriter = False, verbose: bool = True, ): """ @@ -132,9 +132,9 @@ def __init__( self.verbose = verbose ProjectedGradientDescent._check_params(self) - self._attack: Union[ - ProjectedGradientDescentPyTorch, ProjectedGradientDescentTensorFlowV2, ProjectedGradientDescentNumpy - ] + self._attack: ( + ProjectedGradientDescentPyTorch | ProjectedGradientDescentTensorFlowV2 | ProjectedGradientDescentNumpy + ) if isinstance(self.estimator, PyTorchClassifier) and self.estimator.all_framework_preprocessing: self._attack = ProjectedGradientDescentPyTorch( estimator=estimator, # type: ignore @@ -183,7 +183,7 @@ def __init__( verbose=verbose, ) - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_numpy.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_numpy.py index 89da8dc80b..d768628976 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_numpy.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_numpy.py @@ -23,10 +23,10 @@ | Paper link: https://arxiv.org/abs/1706.06083 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Optional, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from scipy.stats import truncnorm @@ -60,17 +60,17 @@ class ProjectedGradientDescentCommon(FastGradientMethod): def __init__( self, - estimator: Union["CLASSIFIER_LOSS_GRADIENTS_TYPE", "OBJECT_DETECTOR_TYPE"], - norm: Union[int, float, str] = np.inf, - eps: Union[int, float, np.ndarray] = 0.3, - eps_step: Union[int, float, np.ndarray] = 0.1, - decay: Optional[float] = None, + estimator: "CLASSIFIER_LOSS_GRADIENTS_TYPE" | "OBJECT_DETECTOR_TYPE", + norm: int | float | str = np.inf, + eps: int | float | np.ndarray = 0.3, + eps_step: int | float | np.ndarray = 0.1, + decay: float | None = None, max_iter: int = 100, targeted: bool = False, num_random_init: int = 0, batch_size: int = 32, random_eps: bool = False, - summary_writer: Union[str, bool, SummaryWriter] = False, + summary_writer: str | bool | SummaryWriter = False, verbose: bool = True, ) -> None: """ @@ -118,10 +118,10 @@ def __init__( self.verbose = verbose ProjectedGradientDescentCommon._check_params(self) - lower: Union[int, float, np.ndarray] - upper: Union[int, float, np.ndarray] - var_mu: Union[int, float, np.ndarray] - sigma: Union[int, float, np.ndarray] + lower: int | float | np.ndarray + upper: int | float | np.ndarray + var_mu: int | float | np.ndarray + sigma: int | float | np.ndarray if self.random_eps: if isinstance(eps, (int, float)): @@ -147,7 +147,7 @@ def _random_eps(self): self.eps_step = ratio * self.eps - def _set_targets(self, x: np.ndarray, y: Optional[np.ndarray], classifier_mixin: bool = True) -> np.ndarray: + def _set_targets(self, x: np.ndarray, y: np.ndarray | None, classifier_mixin: bool = True) -> np.ndarray: """ Check and set up targets. @@ -249,17 +249,17 @@ class ProjectedGradientDescentNumpy(ProjectedGradientDescentCommon): def __init__( self, - estimator: Union["CLASSIFIER_LOSS_GRADIENTS_TYPE", "OBJECT_DETECTOR_TYPE"], - norm: Union[int, float, str] = np.inf, - eps: Union[int, float, np.ndarray] = 0.3, - eps_step: Union[int, float, np.ndarray] = 0.1, - decay: Optional[float] = None, + estimator: "CLASSIFIER_LOSS_GRADIENTS_TYPE" | "OBJECT_DETECTOR_TYPE", + norm: int | float | str = np.inf, + eps: int | float | np.ndarray = 0.3, + eps_step: int | float | np.ndarray = 0.1, + decay: float | None = None, max_iter: int = 100, targeted: bool = False, num_random_init: int = 0, batch_size: int = 32, random_eps: bool = False, - summary_writer: Union[str, bool, SummaryWriter] = False, + summary_writer: str | bool | SummaryWriter = False, verbose: bool = True, ) -> None: """ @@ -309,7 +309,7 @@ def __init__( self._project = True - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py index 51c13b3330..6c7be88172 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py @@ -23,10 +23,10 @@ | Paper link: https://arxiv.org/abs/1706.06083 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Optional, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import tqdm @@ -41,7 +41,7 @@ from art.utils import compute_success, random_sphere, compute_success_array if TYPE_CHECKING: - # pylint: disable=C0412 + import torch from art.estimators.classification.pytorch import PyTorchClassifier @@ -61,17 +61,17 @@ class ProjectedGradientDescentPyTorch(ProjectedGradientDescentCommon): def __init__( self, - estimator: Union["PyTorchClassifier"], - norm: Union[int, float, str] = np.inf, - eps: Union[int, float, np.ndarray] = 0.3, - eps_step: Union[int, float, np.ndarray] = 0.1, - decay: Optional[float] = None, + estimator: "PyTorchClassifier", + norm: int | float | str = np.inf, + eps: int | float | np.ndarray = 0.3, + eps_step: int | float | np.ndarray = 0.1, + decay: float | None = None, max_iter: int = 100, targeted: bool = False, num_random_init: int = 0, batch_size: int = 32, random_eps: bool = False, - summary_writer: Union[str, bool, SummaryWriter] = False, + summary_writer: str | bool | SummaryWriter = False, verbose: bool = True, ): """ @@ -127,7 +127,7 @@ def __init__( self._batch_id = 0 self._i_max_iter = 0 - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. @@ -200,8 +200,8 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n batch_index_1, batch_index_2 = batch_id * self.batch_size, (batch_id + 1) * self.batch_size - batch_eps: Union[int, float, np.ndarray] - batch_eps_step: Union[int, float, np.ndarray] + batch_eps: int | float | np.ndarray + batch_eps_step: int | float | np.ndarray # Compute batch_eps and batch_eps_step if isinstance(self.eps, np.ndarray) and isinstance(self.eps_step, np.ndarray): @@ -254,8 +254,8 @@ def _generate_batch( x: "torch.Tensor", targets: "torch.Tensor", mask: "torch.Tensor", - eps: Union[int, float, np.ndarray], - eps_step: Union[int, float, np.ndarray], + eps: int | float | np.ndarray, + eps_step: int | float | np.ndarray, ) -> np.ndarray: """ Generate a batch of adversarial samples and return them in an array. @@ -287,8 +287,8 @@ def _generate_batch( return adv_x.cpu().detach().numpy() - def _compute_perturbation_pytorch( # pylint: disable=W0221 - self, x: "torch.Tensor", y: "torch.Tensor", mask: Optional["torch.Tensor"], momentum: "torch.Tensor" + def _compute_perturbation_pytorch( + self, x: "torch.Tensor", y: "torch.Tensor", mask: "torch.Tensor" | None, momentum: "torch.Tensor" ) -> "torch.Tensor": """ Compute perturbations. @@ -362,8 +362,8 @@ def _compute_perturbation_pytorch( # pylint: disable=W0221 return grad - def _apply_perturbation_pytorch( # pylint: disable=W0221 - self, x: "torch.Tensor", perturbation: "torch.Tensor", eps_step: Union[int, float, np.ndarray] + def _apply_perturbation_pytorch( + self, x: "torch.Tensor", perturbation: "torch.Tensor", eps_step: int | float | np.ndarray ) -> "torch.Tensor": """ Apply perturbation on examples. @@ -394,8 +394,8 @@ def _compute_pytorch( x_init: "torch.Tensor", y: "torch.Tensor", mask: "torch.Tensor", - eps: Union[int, float, np.ndarray], - eps_step: Union[int, float, np.ndarray], + eps: int | float | np.ndarray, + eps_step: int | float | np.ndarray, random_init: bool, momentum: "torch.Tensor", ) -> "torch.Tensor": @@ -458,8 +458,8 @@ def _compute_pytorch( @staticmethod def _projection( values: "torch.Tensor", - eps: Union[int, float, np.ndarray], - norm_p: Union[int, float, str], + eps: int | float | np.ndarray, + norm_p: int | float | str, *, suboptimal: bool = True, ) -> "torch.Tensor": diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py index 41538aacd1..71d7129cf1 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py @@ -23,10 +23,10 @@ | Paper link: https://arxiv.org/abs/1706.06083 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Optional, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import tqdm @@ -41,7 +41,7 @@ from art.summary_writer import SummaryWriter if TYPE_CHECKING: - # pylint: disable=C0412 + import tensorflow as tf from art.estimators.classification.tensorflow import TensorFlowV2Classifier @@ -62,16 +62,16 @@ class ProjectedGradientDescentTensorFlowV2(ProjectedGradientDescentCommon): def __init__( self, estimator: "TensorFlowV2Classifier", - norm: Union[int, float, str] = np.inf, - eps: Union[int, float, np.ndarray] = 0.3, - eps_step: Union[int, float, np.ndarray] = 0.1, - decay: Optional[float] = None, + norm: int | float | str = np.inf, + eps: int | float | np.ndarray = 0.3, + eps_step: int | float | np.ndarray = 0.1, + decay: float | None = None, max_iter: int = 100, targeted: bool = False, num_random_init: int = 0, batch_size: int = 32, random_eps: bool = False, - summary_writer: Union[str, bool, SummaryWriter] = False, + summary_writer: str | bool | SummaryWriter = False, verbose: bool = True, ): """ @@ -125,7 +125,7 @@ def __init__( verbose=verbose, ) - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. @@ -201,8 +201,8 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n batch_index_1, batch_index_2 = batch_id * self.batch_size, (batch_id + 1) * self.batch_size - batch_eps: Union[int, float, np.ndarray] - batch_eps_step: Union[int, float, np.ndarray] + batch_eps: int | float | np.ndarray + batch_eps_step: int | float | np.ndarray # Compute batch_eps and batch_eps_step if isinstance(self.eps, np.ndarray) and isinstance(self.eps_step, np.ndarray): @@ -254,8 +254,8 @@ def _generate_batch( x: "tf.Tensor", targets: "tf.Tensor", mask: "tf.Tensor", - eps: Union[int, float, np.ndarray], - eps_step: Union[int, float, np.ndarray], + eps: int | float | np.ndarray, + eps_step: int | float | np.ndarray, ) -> "tf.Tensor": """ Generate a batch of adversarial samples and return them in an array. @@ -289,13 +289,13 @@ def _generate_batch( return adv_x - def _compute_perturbation( # pylint: disable=W0221 + def _compute_perturbation( self, x: "tf.Tensor", y: "tf.Tensor", - mask: Optional["tf.Tensor"], - decay: Optional[float] = None, - momentum: Optional["tf.Tensor"] = None, + mask: "tf.Tensor" | None, + decay: float | None = None, + momentum: "tf.Tensor" | None = None, ) -> "tf.Tensor": """ Compute perturbations. @@ -376,8 +376,8 @@ def _compute_perturbation( # pylint: disable=W0221 return grad - def _apply_perturbation( # pylint: disable=W0221 - self, x: "tf.Tensor", perturbation: "tf.Tensor", eps_step: Union[int, float, np.ndarray] + def _apply_perturbation( + self, x: "tf.Tensor", perturbation: "tf.Tensor", eps_step: int | float | np.ndarray ) -> "tf.Tensor": """ Apply perturbation on examples. @@ -404,9 +404,9 @@ def _compute_tf( x_init: "tf.Tensor", y: "tf.Tensor", mask: "tf.Tensor", - eps: Union[int, float, np.ndarray], - eps_step: Union[int, float, np.ndarray], - momentum: Optional["tf.Tensor"], + eps: int | float | np.ndarray, + eps_step: int | float | np.ndarray, + momentum: "tf.Tensor" | None, random_init: bool, ) -> "tf.Tensor": """ @@ -465,8 +465,8 @@ def _compute_tf( @staticmethod def _projection( values: "tf.Tensor", - eps: Union[int, float, np.ndarray], - norm_p: Union[int, float, str], + eps: int | float | np.ndarray, + norm_p: int | float | str, *, suboptimal: bool = True, ) -> "tf.Tensor": diff --git a/art/attacks/evasion/saliency_map.py b/art/attacks/evasion/saliency_map.py index cb9d0a27a3..9322775b1f 100644 --- a/art/attacks/evasion/saliency_map.py +++ b/art/attacks/evasion/saliency_map.py @@ -23,7 +23,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Optional, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import trange @@ -74,7 +74,7 @@ def __init__( self.verbose = verbose self._check_params() - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. @@ -192,7 +192,7 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n return x_adv - def _saliency_map(self, x: np.ndarray, target: Union[np.ndarray, int], search_space: np.ndarray) -> np.ndarray: + def _saliency_map(self, x: np.ndarray, target: np.ndarray | int, search_space: np.ndarray) -> np.ndarray: """ Compute the saliency map of `x`. Return the top 2 coefficients in `search_space` that maximize / minimize the saliency map. diff --git a/art/attacks/evasion/shadow_attack.py b/art/attacks/evasion/shadow_attack.py index 2fcb44f41d..21b705e343 100644 --- a/art/attacks/evasion/shadow_attack.py +++ b/art/attacks/evasion/shadow_attack.py @@ -20,8 +20,10 @@ | Paper link: https://arxiv.org/abs/2003.08937 """ +from __future__ import annotations + import logging -from typing import Optional, Union + import numpy as np from tqdm.auto import trange @@ -63,9 +65,9 @@ class ShadowAttack(EvasionAttack): def __init__( self, - estimator: Union[ - TensorFlowV2Classifier, TensorFlowV2RandomizedSmoothing, PyTorchClassifier, PyTorchRandomizedSmoothing - ], + estimator: ( + TensorFlowV2Classifier | TensorFlowV2RandomizedSmoothing | PyTorchClassifier | PyTorchRandomizedSmoothing + ), sigma: float = 0.5, nb_steps: int = 300, learning_rate: float = 0.1, @@ -103,7 +105,7 @@ def __init__( self.verbose = verbose self._check_params() - self.framework: Optional[str] + self.framework: str | None if isinstance(self.estimator, (TensorFlowV2Classifier, TensorFlowV2RandomizedSmoothing)): self.framework = "tensorflow" elif isinstance(self.estimator, (PyTorchClassifier, PyTorchRandomizedSmoothing)): @@ -111,7 +113,7 @@ def __init__( else: self.framework = None - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. This requires a lot of memory, therefore it accepts only a single samples as input, e.g. a batch of size 1. diff --git a/art/attacks/evasion/shapeshifter.py b/art/attacks/evasion/shapeshifter.py index 3a1cfd958c..d92718ffff 100644 --- a/art/attacks/evasion/shapeshifter.py +++ b/art/attacks/evasion/shapeshifter.py @@ -20,9 +20,11 @@ | Paper link: https://arxiv.org/abs/1804.05810 """ -# pylint: disable=C0302 +from __future__ import annotations + + import logging -from typing import List, Dict, Optional, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -30,7 +32,6 @@ from art.estimators.object_detection.tensorflow_faster_rcnn import TensorFlowFasterRCNN if TYPE_CHECKING: - # pylint: disable=C0302,E0611 from collections.abc import Callable from tensorflow.python.framework.ops import Tensor @@ -182,7 +183,7 @@ def __init__( # Check validity of attack attributes self._check_params() - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. @@ -201,7 +202,7 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n assumed to map to the first non-background class. - `groundtruth_weights_list`: A list of `nb_samples` size of 1-D tf.float32 tensors of shape [num_boxes] containing weights for groundtruth boxes. - :type label: Dict[str, List[np.ndarray]] + :type label: dict[str, list[np.ndarray]] :param mask: Input mask. :type mask: `np.ndarray`. :param target_class: Target class. @@ -220,7 +221,7 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n assert list(x.shape[1:]) == self.estimator.input_shape # Check if label is provided - label: Optional[Dict[str, List[np.ndarray]]] = kwargs.get("label") + label: dict[str, list[np.ndarray]] | None = kwargs.get("label") if label is None and not self.texture_as_input: # pragma: no cover raise ValueError("Need the target labels for image as input.") @@ -307,8 +308,8 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n def _attack_training( self, x: np.ndarray, - y: Dict[str, List[np.ndarray]], - mask: Optional[np.ndarray], + y: dict[str, list[np.ndarray]], + mask: np.ndarray | None, target_class: int, victim_class: int, project_texture_op: "Tensor", @@ -317,7 +318,7 @@ def _attack_training( final_attack_optimization_op: "Tensor", current_variable: "Tensor", current_value: "Tensor", - ) -> List[np.ndarray]: + ) -> list[np.ndarray]: """ Do attack optimization. @@ -436,10 +437,10 @@ def _attack_training( def _build_graph( self, - initial_shape: Tuple[int, ...], - custom_loss: Optional["Tensor"] = None, - rendering_function: Optional["Callable"] = None, - ) -> Tuple["Tensor", ...]: + initial_shape: tuple[int, ...], + custom_loss: "Tensor" | None = None, + rendering_function: "Callable" | None = None, + ) -> tuple["Tensor", ...]: """ Build the TensorFlow graph for the attack. @@ -558,7 +559,7 @@ def _build_graph( # Create variables to store gradients if self.texture_as_input: - sum_gradients = tf.Variable( # pylint: disable=E1123 + sum_gradients = tf.Variable( initial_value=np.zeros(current_texture_variable.shape.as_list()), trainable=False, name="sum_gradients", @@ -567,7 +568,7 @@ def _build_graph( ) else: - sum_gradients = tf.Variable( # pylint: disable=E1123 + sum_gradients = tf.Variable( initial_value=np.zeros(current_image_variable.shape.as_list()), trainable=False, name="sum_gradients", @@ -575,7 +576,7 @@ def _build_graph( collections=[tf.GraphKeys.GLOBAL_VARIABLES, tf.GraphKeys.LOCAL_VARIABLES], ) - num_gradients = tf.Variable( # pylint: disable=E1123 + num_gradients = tf.Variable( initial_value=0.0, trainable=False, name="count_gradients", @@ -666,7 +667,7 @@ def _create_attack_loss( self, initial_input: "Tensor", current_value: "Tensor", - custom_loss: Optional["Tensor"] = None, + custom_loss: "Tensor" | None = None, ) -> "Tensor": """ Create the loss tensor of this attack. @@ -779,13 +780,13 @@ def _create_box_loss(self) -> "Tensor": class_predictions_with_background = class_predictions_with_background[:, 1:] # Convert to 1-hot - # pylint: disable=E1120 + target_class_one_hot = tf.one_hot([target_class_phd - 1], class_predictions_with_background.shape[-1]) victim_class_one_hot = tf.one_hot([victim_class_phd - 1], class_predictions_with_background.shape[-1]) box_iou_tensor = default_graph.get_tensor_by_name("Loss/BoxClassifierLoss/Compare/IOU/Select:0") box_iou_tensor = tf.reshape(box_iou_tensor, (-1,)) - box_target = tf.cast(box_iou_tensor >= box_iou_threshold, dtype=tf.float32) # pylint: disable=E1123 + box_target = tf.cast(box_iou_tensor >= box_iou_threshold, dtype=tf.float32) # Compute box target loss box_target_weight = tf.placeholder(dtype=tf.float32, shape=[], name="box_target_weight") @@ -862,7 +863,7 @@ def _create_rpn_loss(self) -> "Tensor": ) rpn_iou_tensor = default_graph.get_tensor_by_name("Loss/RPNLoss/Compare/IOU/Select:0") rpn_iou_tensor = tf.reshape(rpn_iou_tensor, (-1,)) - rpn_target = tf.cast(rpn_iou_tensor >= rpn_iou_threshold, dtype=tf.float32) # pylint: disable=E1123,E1120 + rpn_target = tf.cast(rpn_iou_tensor >= rpn_iou_threshold, dtype=tf.float32) # Compute RPN background loss rpn_background_weight = tf.placeholder(dtype=tf.float32, shape=[], name="rpn_background_weight") diff --git a/art/attacks/evasion/sign_opt.py b/art/attacks/evasion/sign_opt.py index 3d621e3409..c879b8accb 100644 --- a/art/attacks/evasion/sign_opt.py +++ b/art/attacks/evasion/sign_opt.py @@ -44,9 +44,10 @@ | Paper link: https://arxiv.org/pdf/1909.10773.pdf """ +from __future__ import annotations import logging -from typing import Optional, TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING import time import numpy as np @@ -145,7 +146,7 @@ def __init__( self.enable_clipped = False self._check_params() - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. @@ -234,8 +235,8 @@ def _fine_grained_binary_search( theta: np.ndarray, initial_lbd: float, current_best: float, - target: Optional[int] = None, - ) -> Tuple[float, int]: + target: int | None = None, + ) -> tuple[float, int]: """ Perform fine-grained line search plus binary search for finding a good starting direction @@ -285,10 +286,10 @@ def _fine_grained_binary_search_local( x_0: np.ndarray, y_0: int, theta: np.ndarray, - target: Optional[int] = None, + target: int | None = None, initial_lbd: float = 1.0, tol: float = 1e-5, - ) -> Tuple[float, int]: + ) -> tuple[float, int]: """ Perform the line search in a local region plus binary search. Details in paper (Chen and Zhang, 2019), paper link: https://openreview.net/pdf?id=rJlk6iRqKX @@ -340,7 +341,7 @@ def _fine_grained_binary_search_local( lbd_lo = lbd_mid return lbd_hi, nquery - def _is_label(self, x_0: np.ndarray, label: Optional[int]) -> bool: + def _is_label(self, x_0: np.ndarray, label: int | None) -> bool: """ Helper method to check if self.estimator predict input with label @@ -367,8 +368,8 @@ def _predict_label(self, x_0: np.ndarray) -> np.signedinteger: return np.argmax(pred) def _sign_grad( - self, x_0: np.ndarray, y_0: int, epsilon: float, theta: np.ndarray, initial_lbd: float, target: Optional[int] - ) -> Tuple[np.ndarray, int]: + self, x_0: np.ndarray, y_0: int, epsilon: float, theta: np.ndarray, initial_lbd: float, target: int | None + ) -> tuple[np.ndarray, int]: """ Evaluate the sign of gradient @@ -410,10 +411,10 @@ def _attack( self, x_0: np.ndarray, y_0: int, - target: Optional[int] = None, - x_init: Optional[np.ndarray] = None, - distortion: Optional[float] = None, - ) -> Tuple[np.ndarray, np.ndarray, bool]: + target: int | None = None, + x_init: np.ndarray | None = None, + distortion: float | None = None, + ) -> tuple[np.ndarray, np.ndarray, bool]: """ Perform attack diff --git a/art/attacks/evasion/simba.py b/art/attacks/evasion/simba.py index bab36accc9..bb4fac4c02 100644 --- a/art/attacks/evasion/simba.py +++ b/art/attacks/evasion/simba.py @@ -23,7 +23,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Optional, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from scipy.fftpack import idct @@ -102,7 +102,7 @@ def __init__( self.verbose = verbose self._check_params() - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. @@ -443,7 +443,9 @@ def diagonal_order(self, image_size, channels): for i in range(image_size): order[i, : (image_size - i)] = i + x[i:] for i in range(1, image_size): - reverse = order[image_size - i - 1].take([i for i in range(i - 1, -1, -1)]) # pylint: disable=R1721 + reverse = order[image_size - i - 1].take( + [i for i in range(i - 1, -1, -1)] + ) # pylint: disable=unnecessary-comprehension order[i, (image_size - i) :] = image_size * image_size - 1 - reverse if channels > 1: order_2d = order diff --git a/art/attacks/evasion/spatial_transformation.py b/art/attacks/evasion/spatial_transformation.py index 25a5bb1776..b581f8a9fe 100644 --- a/art/attacks/evasion/spatial_transformation.py +++ b/art/attacks/evasion/spatial_transformation.py @@ -22,10 +22,10 @@ | Paper link: https://arxiv.org/abs/1712.02779 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Optional, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from scipy.ndimage import rotate, shift @@ -85,12 +85,12 @@ def __init__( self.verbose = verbose self._check_params() - self.fooling_rate: Optional[float] = None - self.attack_trans_x: Optional[int] = None - self.attack_trans_y: Optional[int] = None - self.attack_rot: Optional[float] = None + self.fooling_rate: float | None = None + self.attack_trans_x: int | None = None + self.attack_trans_y: int | None = None + self.attack_rot: float | None = None - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. diff --git a/art/attacks/evasion/square_attack.py b/art/attacks/evasion/square_attack.py index 4a86a2fc0d..abab3db4d1 100644 --- a/art/attacks/evasion/square_attack.py +++ b/art/attacks/evasion/square_attack.py @@ -20,11 +20,14 @@ | Paper link: https://arxiv.org/abs/1912.00049 """ +from __future__ import annotations + import bisect +from collections.abc import Callable import logging import math import random -from typing import Optional, Union, Callable, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import trange @@ -65,9 +68,9 @@ class SquareAttack(EvasionAttack): def __init__( self, estimator: "CLASSIFIER_TYPE", - norm: Union[int, float, str] = np.inf, - adv_criterion: Union[Callable[[np.ndarray, np.ndarray], bool], None] = None, - loss: Union[Callable[[np.ndarray, np.ndarray], np.ndarray], None] = None, + norm: int | float | str = np.inf, + adv_criterion: Callable[[np.ndarray, np.ndarray], bool] | None = None, + loss: Callable[[np.ndarray, np.ndarray], np.ndarray] | None = None, max_iter: int = 100, eps: float = 0.3, p_init: float = 0.8, @@ -133,7 +136,7 @@ def _get_percentage_of_elements(self, i_iter: int) -> float: return self.p_init * p_ratio[i_ratio] - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. diff --git a/art/attacks/evasion/targeted_universal_perturbation.py b/art/attacks/evasion/targeted_universal_perturbation.py index e2b6b50cab..64258babf7 100644 --- a/art/attacks/evasion/targeted_universal_perturbation.py +++ b/art/attacks/evasion/targeted_universal_perturbation.py @@ -20,12 +20,12 @@ | Paper link: https://arxiv.org/abs/1911.06502 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging import random import types -from typing import Any, Dict, Optional, Union, TYPE_CHECKING +from typing import Any, TYPE_CHECKING import numpy as np @@ -60,11 +60,11 @@ def __init__( self, classifier: "CLASSIFIER_TYPE", attacker: str = "fgsm", - attacker_params: Optional[Dict[str, Any]] = None, + attacker_params: dict[str, Any] | None = None, delta: float = 0.2, max_iter: int = 20, eps: float = 10.0, - norm: Union[int, float, str] = np.inf, + norm: int | float | str = np.inf, ): """ :param classifier: A trained classifier. @@ -92,7 +92,7 @@ def __init__( self._targeted = True self._check_params() - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. @@ -182,7 +182,7 @@ def _check_params(self) -> None: if not isinstance(self.eps, (float, int)) or self.eps <= 0: raise ValueError("The eps coefficient must be a positive float.") - def _get_attack(self, a_name: str, params: Optional[Dict[str, Any]] = None) -> EvasionAttack: + def _get_attack(self, a_name: str, params: dict[str, Any] | None = None) -> EvasionAttack: """ Get an attack object from its name. diff --git a/art/attacks/evasion/universal_perturbation.py b/art/attacks/evasion/universal_perturbation.py index 5ae501eaa2..e61f0b6955 100644 --- a/art/attacks/evasion/universal_perturbation.py +++ b/art/attacks/evasion/universal_perturbation.py @@ -21,12 +21,12 @@ | Paper link: https://arxiv.org/abs/1610.08401 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging import random import types -from typing import Any, Dict, Optional, Union, TYPE_CHECKING +from typing import Any, TYPE_CHECKING import numpy as np from tqdm.auto import tqdm @@ -79,11 +79,11 @@ def __init__( self, classifier: "CLASSIFIER_TYPE", attacker: str = "deepfool", - attacker_params: Optional[Dict[str, Any]] = None, + attacker_params: dict[str, Any] | None = None, delta: float = 0.2, max_iter: int = 20, eps: float = 10.0, - norm: Union[int, float, str] = np.inf, + norm: int | float | str = np.inf, batch_size: int = 32, verbose: bool = True, ) -> None: @@ -112,12 +112,12 @@ def __init__( self._check_params() # Attack properties - self._fooling_rate: Optional[float] = None - self._converged: Optional[bool] = None - self._noise: Optional[np.ndarray] = None + self._fooling_rate: float | None = None + self._converged: bool | None = None + self._noise: np.ndarray | None = None @property - def fooling_rate(self) -> Optional[float]: + def fooling_rate(self) -> float | None: """ The fooling rate of the universal perturbation on the most recent call to `generate`. @@ -126,7 +126,7 @@ def fooling_rate(self) -> Optional[float]: return self._fooling_rate @property - def converged(self) -> Optional[bool]: + def converged(self) -> bool | None: """ The convergence of universal perturbation generation. @@ -135,7 +135,7 @@ def converged(self) -> Optional[bool]: return self._converged @property - def noise(self) -> Optional[np.ndarray]: + def noise(self) -> np.ndarray | None: """ The universal perturbation. @@ -143,7 +143,7 @@ def noise(self) -> Optional[np.ndarray]: """ return self._noise - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. @@ -223,7 +223,7 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n return x_adv - def _get_attack(self, a_name: str, params: Optional[Dict[str, Any]] = None) -> EvasionAttack: + def _get_attack(self, a_name: str, params: dict[str, Any] | None = None) -> EvasionAttack: """ Get an attack object from its name. diff --git a/art/attacks/evasion/virtual_adversarial.py b/art/attacks/evasion/virtual_adversarial.py index ebe296f8e4..2441289627 100644 --- a/art/attacks/evasion/virtual_adversarial.py +++ b/art/attacks/evasion/virtual_adversarial.py @@ -23,7 +23,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Optional, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import trange @@ -82,7 +82,7 @@ def __init__( self.verbose = verbose self._check_params() - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. diff --git a/art/attacks/evasion/wasserstein.py b/art/attacks/evasion/wasserstein.py index aa83a2dcac..c25d376ddc 100644 --- a/art/attacks/evasion/wasserstein.py +++ b/art/attacks/evasion/wasserstein.py @@ -23,7 +23,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Optional, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from scipy.special import lambertw @@ -113,7 +113,7 @@ def __init__( self._targeted = targeted self.regularization = regularization - self.p = p # pylint: disable=C0103 + self.p = p # pylint: disable=invalid-name self.kernel_size = kernel_size self.eps_step = eps_step self.norm = norm @@ -128,7 +128,7 @@ def __init__( self.verbose = verbose self._check_params() - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. diff --git a/art/attacks/evasion/zoo.py b/art/attacks/evasion/zoo.py index 62b5f3e38a..cc543edc94 100644 --- a/art/attacks/evasion/zoo.py +++ b/art/attacks/evasion/zoo.py @@ -25,7 +25,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Optional, Tuple, Any, TYPE_CHECKING +from typing import Any, TYPE_CHECKING import numpy as np from scipy.ndimage import zoom @@ -165,13 +165,13 @@ def __init__( self._current_noise = np.zeros((batch_size,) + self.estimator.input_shape, dtype=ART_NUMPY_DTYPE) self._sample_prob = np.ones(self._current_noise.size, dtype=ART_NUMPY_DTYPE) / self._current_noise.size - self.adam_mean: Optional[np.ndarray] = None - self.adam_var: Optional[np.ndarray] = None - self.adam_epochs: Optional[np.ndarray] = None + self.adam_mean: np.ndarray | None = None + self.adam_var: np.ndarray | None = None + self.adam_epochs: np.ndarray | None = None def _loss( self, x: np.ndarray, x_adv: np.ndarray, target: np.ndarray, c_weight: np.ndarray - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + ) -> tuple[np.ndarray, np.ndarray, np.ndarray]: """ Compute the loss function values. @@ -201,7 +201,7 @@ def _loss( return preds, l2dist, c_weight * loss + l2dist - def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Generate adversarial samples and return them in an array. @@ -263,7 +263,7 @@ def _generate_batch(self, x_batch: np.ndarray, y_batch: np.ndarray) -> np.ndarra c_lower_bound = np.zeros(x_batch.shape[0]) c_upper_bound = 1e10 * np.ones(x_batch.shape[0]) - # Initialize best distortions and best attacks globally + # Initialize the best distortions and best attacks globally o_best_dist = np.inf * np.ones(x_batch.shape[0]) o_best_attack = x_batch.copy() @@ -297,7 +297,7 @@ def _update_const( c_batch: np.ndarray, c_lower_bound: np.ndarray, c_upper_bound: np.ndarray, - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + ) -> tuple[np.ndarray, np.ndarray, np.ndarray]: """ Update constant `c_batch` from the ZOO objective. This characterizes the trade-off between attack strength and amount of noise introduced. @@ -341,14 +341,14 @@ def _compare(self, object1: Any, object2: Any) -> bool: def _generate_bss( self, x_batch: np.ndarray, y_batch: np.ndarray, c_batch: np.ndarray - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + ) -> tuple[np.ndarray, np.ndarray, np.ndarray]: """ Generate adversarial examples for a batch of inputs with a specific batch of constants. :param x_batch: A batch of original examples. :param y_batch: A batch of targets (0-1 hot). :param c_batch: A batch of constants. - :return: A tuple of best elastic distances, best labels, best attacks. + :return: A tuple of the best elastic distances, best labels, best attacks. """ x_orig = x_batch.astype(ART_NUMPY_DTYPE) @@ -370,7 +370,7 @@ def _generate_bss( self._current_noise = np.zeros(x_batch.shape, dtype=ART_NUMPY_DTYPE) x_adv = x_orig.copy() - # Initialize best distortions, best changed labels and best attacks + # Initialize the best distortions, best changed labels and best attacks best_dist = np.inf * np.ones(x_adv.shape[0]) best_label = -np.inf * np.ones(x_adv.shape[0]) best_attack = np.array([x_adv[i] for i in range(x_adv.shape[0])]) @@ -567,7 +567,7 @@ def _optimizer_adam_coordinate( return current_noise.reshape(orig_shape) - def _reset_adam(self, nb_vars: int, indices: Optional[np.ndarray] = None) -> None: + def _reset_adam(self, nb_vars: int, indices: np.ndarray | None = None) -> None: # If variables are already there and at the right size, reset values if self.adam_mean is not None and self.adam_mean.size == nb_vars: if indices is None: diff --git a/art/attacks/extraction/copycat_cnn.py b/art/attacks/extraction/copycat_cnn.py index 9d853743dc..28fa392ab4 100644 --- a/art/attacks/extraction/copycat_cnn.py +++ b/art/attacks/extraction/copycat_cnn.py @@ -23,7 +23,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Optional, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -82,7 +82,7 @@ def __init__( self.use_probability = use_probability self._check_params() - def extract(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> "CLASSIFIER_TYPE": + def extract(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> "CLASSIFIER_TYPE": """ Extract a thieved classifier. diff --git a/art/attacks/extraction/functionally_equivalent_extraction.py b/art/attacks/extraction/functionally_equivalent_extraction.py index f929fe853b..ac28cae248 100644 --- a/art/attacks/extraction/functionally_equivalent_extraction.py +++ b/art/attacks/extraction/functionally_equivalent_extraction.py @@ -27,9 +27,11 @@ | Paper link: https://arxiv.org/abs/1909.01838 """ +from __future__ import annotations + import logging import os -from typing import List, Optional, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from scipy.optimize import least_squares @@ -43,7 +45,7 @@ if TYPE_CHECKING: from art.utils import CLASSIFIER_TYPE -NUMPY_DTYPE = np.float64 # pylint: disable=C0103 +NUMPY_DTYPE = np.float64 # pylint: disable=invalid-name logger = logging.getLogger(__name__) @@ -58,7 +60,7 @@ class FunctionallyEquivalentExtraction(ExtractionAttack): _estimator_requirements = (BaseEstimator, NeuralNetworkMixin, ClassifierMixin) - def __init__(self, classifier: "CLASSIFIER_TYPE", num_neurons: Optional[int] = None) -> None: + def __init__(self, classifier: "CLASSIFIER_TYPE", num_neurons: int | None = None) -> None: """ Create a `FunctionallyEquivalentExtraction` instance. @@ -73,17 +75,17 @@ def __init__(self, classifier: "CLASSIFIER_TYPE", num_neurons: Optional[int] = N self.vector_u = np.random.normal(0, 1, (1, self.num_features)).astype(dtype=NUMPY_DTYPE) self.vector_v = np.random.normal(0, 1, (1, self.num_features)).astype(dtype=NUMPY_DTYPE) - self.critical_points: List[np.ndarray] = [] + self.critical_points: list[np.ndarray] = [] - self.w_0: Optional[np.ndarray] = None # Weight matrix of first dense layer - self.b_0: Optional[np.ndarray] = None # Bias vector of first dense layer - self.w_1: Optional[np.ndarray] = None # Weight matrix of second dense layer - self.b_1: Optional[np.ndarray] = None # Bias vector of second dense layer + self.w_0: np.ndarray | None = None # Weight matrix of first dense layer + self.b_0: np.ndarray | None = None # Bias vector of first dense layer + self.w_1: np.ndarray | None = None # Weight matrix of second dense layer + self.b_1: np.ndarray | None = None # Bias vector of second dense layer - def extract( # pylint: disable=W0221 + def extract( self, x: np.ndarray, - y: Optional[np.ndarray] = None, + y: np.ndarray | None = None, delta_0: float = 0.05, fraction_true: float = 0.3, rel_diff_slope: float = 0.00001, @@ -154,7 +156,7 @@ def predict(x: np.ndarray) -> np.ndarray: return extracted_classifier - def _o_l(self, x: np.ndarray, e_j: Optional[np.ndarray] = None) -> np.ndarray: + def _o_l(self, x: np.ndarray, e_j: np.ndarray | None = None) -> np.ndarray: """ Predict the target model. @@ -369,7 +371,7 @@ def f_z(z_i): e_i[i, 0] = unit_vector_scale def f_v(v_i): - # pylint: disable=W0640 + # pylint: disable=cell-var-from-loop return np.squeeze(np.matmul(-a0_pairwise_ratios_inverse.T, np.expand_dims(v_i, axis=0).T) - e_i) v_0 = np.random.normal(0, 1, self.num_features) @@ -424,16 +426,16 @@ def f_w_1_b_1(w_1_b_1_i): self.b_1 = result_a1_b1.x[self.num_neurons * self.num_classes :].reshape(self.num_classes, 1) -# pylint: disable=C0103, E0401 +# pylint: disable=invalid-name if __name__ == "__main__": import tensorflow as tf tf.compat.v1.disable_eager_execution() tf.keras.backend.set_floatx("float64") - from tensorflow.keras.datasets import mnist # pylint: disable=E0611 - from tensorflow.keras.models import Sequential # pylint: disable=E0611 - from tensorflow.keras.layers import Dense # pylint: disable=E0611 + from tensorflow.keras.datasets import mnist + from tensorflow.keras.models import Sequential + from tensorflow.keras.layers import Dense np.random.seed(1) number_neurons = 16 diff --git a/art/attacks/extraction/knockoff_nets.py b/art/attacks/extraction/knockoff_nets.py index c630777b7f..8ba20bb668 100644 --- a/art/attacks/extraction/knockoff_nets.py +++ b/art/attacks/extraction/knockoff_nets.py @@ -23,7 +23,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Optional, TYPE_CHECKING, Union +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import trange @@ -97,7 +97,7 @@ def __init__( self.use_probability = use_probability self._check_params() - def extract(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> "CLASSIFIER_TYPE": + def extract(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> "CLASSIFIER_TYPE": """ Extract a thieved classifier. @@ -302,7 +302,7 @@ def _sample_data(x: np.ndarray, y: np.ndarray, action: int) -> np.ndarray: return x_index[rnd_idx] - def _reward(self, y_output: np.ndarray, y_hat: np.ndarray, n: int) -> Union[float, np.ndarray]: + def _reward(self, y_output: np.ndarray, y_hat: np.ndarray, n: int) -> float | np.ndarray: """ Compute reward value. diff --git a/art/attacks/inference/attribute_inference/baseline.py b/art/attacks/inference/attribute_inference/baseline.py index bf137d499e..e02e1d9764 100644 --- a/art/attacks/inference/attribute_inference/baseline.py +++ b/art/attacks/inference/attribute_inference/baseline.py @@ -18,10 +18,10 @@ """ This module implements attribute inference attacks. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Optional, Union, List, Any, TYPE_CHECKING +from typing import Any, TYPE_CHECKING import numpy as np from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor @@ -69,11 +69,11 @@ class AttributeInferenceBaseline(AttributeInferenceAttack): def __init__( self, attack_model_type: str = "nn", - attack_model: Optional[Union["CLASSIFIER_TYPE", "REGRESSOR_TYPE"]] = None, - attack_feature: Union[int, slice] = 0, - is_continuous: Optional[bool] = False, - non_numerical_features: Optional[List[int]] = None, - encoder: Optional[Union[OrdinalEncoder, OneHotEncoder, ColumnTransformer]] = None, + attack_model: "CLASSIFIER_TYPE" | "REGRESSOR_TYPE" | None = None, + attack_feature: int | slice = 0, + is_continuous: bool | None = False, + non_numerical_features: list[int] | None = None, + encoder: OrdinalEncoder | OneHotEncoder | ColumnTransformer = None, nn_model_epochs: int = 100, nn_model_batch_size: int = 100, nn_model_learning_rate: float = 0.0001, @@ -110,8 +110,8 @@ def __init__( self._encoder = encoder self._non_numerical_features = non_numerical_features self._is_continuous = is_continuous - self._attack_model_type: Optional[str] = attack_model_type - self.attack_model: Optional[Any] = None + self._attack_model_type: str | None = attack_model_type + self.attack_model: Any | None = None self.epochs = nn_model_epochs self.batch_size = nn_model_batch_size self.learning_rate = nn_model_learning_rate @@ -307,7 +307,7 @@ def forward(self, x): elif self.attack_model is not None: self.attack_model.fit(x_train, y_ready) - def infer(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def infer(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Infer the attacked feature. diff --git a/art/attacks/inference/attribute_inference/black_box.py b/art/attacks/inference/attribute_inference/black_box.py index edce93d74d..746a26026e 100644 --- a/art/attacks/inference/attribute_inference/black_box.py +++ b/art/attacks/inference/attribute_inference/black_box.py @@ -18,10 +18,10 @@ """ This module implements attribute inference attacks. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Optional, Union, Tuple, List, Any, TYPE_CHECKING +from typing import Any, TYPE_CHECKING import numpy as np from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor @@ -74,16 +74,16 @@ class AttributeInferenceBlackBox(AttributeInferenceAttack): def __init__( self, - estimator: Union["CLASSIFIER_TYPE", "REGRESSOR_TYPE"], + estimator: "CLASSIFIER_TYPE" | "REGRESSOR_TYPE", attack_model_type: str = "nn", - attack_model: Optional[Union["CLASSIFIER_TYPE", "REGRESSOR_TYPE"]] = None, - attack_feature: Union[int, slice] = 0, - is_continuous: Optional[bool] = False, - scale_range: Optional[Tuple[float, float]] = None, - prediction_normal_factor: Optional[float] = 1, - scaler_type: Optional[str] = "standard", - non_numerical_features: Optional[List[int]] = None, - encoder: Optional[Union[OrdinalEncoder, OneHotEncoder, ColumnTransformer]] = None, + attack_model: "CLASSIFIER_TYPE" | "REGRESSOR_TYPE" | None = None, + attack_feature: int | slice = 0, + is_continuous: bool | None = False, + scale_range: tuple[float, float] | None = None, + prediction_normal_factor: float | None = 1, + scaler_type: str | None = "standard", + non_numerical_features: list[int] | None = None, + encoder: OrdinalEncoder | OneHotEncoder | ColumnTransformer | None = None, nn_model_epochs: int = 100, nn_model_batch_size: int = 100, nn_model_learning_rate: float = 0.0001, @@ -129,15 +129,15 @@ def __init__( super().__init__(estimator=estimator, attack_feature=attack_feature) self._values: list = [] - self._attack_model_type: Optional[str] = attack_model_type + self._attack_model_type: str | None = attack_model_type self._encoder = encoder self._non_numerical_features = non_numerical_features self._is_continuous = is_continuous - self.attack_model: Optional[Any] = None + self.attack_model: Any | None = None self.prediction_normal_factor = prediction_normal_factor self.scale_range = scale_range self.scaler_type = scaler_type - self.scaler: Optional[Any] = None + self.scaler: Any | None = None self.epochs = nn_model_epochs self.batch_size = nn_model_batch_size self.learning_rate = nn_model_learning_rate @@ -187,7 +187,7 @@ def __init__( remove_attacked_feature(self.attack_feature, self._non_numerical_features) - def fit(self, x: np.ndarray, y: Optional[np.ndarray] = None) -> None: + def fit(self, x: np.ndarray, y: np.ndarray | None = None) -> None: """ Train the attack model. @@ -369,7 +369,7 @@ def forward(self, x): elif self.attack_model is not None: self.attack_model.fit(x_train, y_attack_ready) - def infer(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def infer(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Infer the attacked feature. @@ -386,13 +386,13 @@ def infer(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.n :type values: list, optional :return: The inferred feature values. """ - values: Optional[list] = kwargs.get("values") + values: list | None = kwargs.get("values") # if provided, override the values computed in fit() if values is not None: self._values = values - pred: Optional[np.ndarray] = kwargs.get("pred") + pred: np.ndarray | None = kwargs.get("pred") if pred is None: raise ValueError("Please provide param `pred` of model predictions.") diff --git a/art/attacks/inference/attribute_inference/meminf_based.py b/art/attacks/inference/attribute_inference/meminf_based.py index df886847c2..f380f48f4c 100644 --- a/art/attacks/inference/attribute_inference/meminf_based.py +++ b/art/attacks/inference/attribute_inference/meminf_based.py @@ -21,7 +21,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Optional, Union, List, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -49,9 +49,9 @@ class AttributeInferenceMembership(AttributeInferenceAttack): def __init__( self, - estimator: Union["CLASSIFIER_TYPE", "REGRESSOR_TYPE"], + estimator: "CLASSIFIER_TYPE" | "REGRESSOR_TYPE", membership_attack: MembershipInferenceAttack, - attack_feature: Union[int, slice] = 0, + attack_feature: int | slice = 0, ): """ Create an AttributeInferenceMembership attack instance. @@ -69,7 +69,7 @@ def __init__( self.membership_attack = membership_attack self._check_params() - def infer(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def infer(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Infer the attacked feature. @@ -89,7 +89,7 @@ def infer(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.n if "values" not in kwargs: raise ValueError("Missing parameter `values`.") - values: Optional[List] = kwargs.get("values") + values: list | None = kwargs.get("values") if not values: raise ValueError("`values` cannot be None or empty") diff --git a/art/attacks/inference/attribute_inference/true_label_baseline.py b/art/attacks/inference/attribute_inference/true_label_baseline.py index 9430e4dfb4..61e68770b6 100644 --- a/art/attacks/inference/attribute_inference/true_label_baseline.py +++ b/art/attacks/inference/attribute_inference/true_label_baseline.py @@ -18,10 +18,10 @@ """ This module implements attribute inference attacks. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Optional, Union, Tuple, List, Any, TYPE_CHECKING +from typing import Any, TYPE_CHECKING import numpy as np from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor @@ -72,14 +72,14 @@ class AttributeInferenceBaselineTrueLabel(AttributeInferenceAttack): def __init__( self, attack_model_type: str = "nn", - attack_model: Optional[Union["CLASSIFIER_TYPE", "REGRESSOR_TYPE"]] = None, - attack_feature: Union[int, slice] = 0, - is_continuous: Optional[bool] = False, - is_regression: Optional[bool] = False, - scale_range: Optional[Tuple[float, float]] = None, + attack_model: "CLASSIFIER_TYPE" | "REGRESSOR_TYPE" | None = None, + attack_feature: int | slice = 0, + is_continuous: bool | None = False, + is_regression: bool | None = False, + scale_range: tuple[float, float] | None = None, prediction_normal_factor: float = 1, - non_numerical_features: Optional[List[int]] = None, - encoder: Optional[Union[OrdinalEncoder, OneHotEncoder, ColumnTransformer]] = None, + non_numerical_features: list[int] | None = None, + encoder: OrdinalEncoder | OneHotEncoder | ColumnTransformer | None = None, nn_model_epochs: int = 100, nn_model_batch_size: int = 100, nn_model_learning_rate: float = 0.0001, @@ -122,8 +122,8 @@ def __init__( self._encoder = encoder self._non_numerical_features = non_numerical_features self._is_continuous = is_continuous - self._attack_model_type: Optional[str] = attack_model_type - self.attack_model: Optional[Any] = None + self._attack_model_type: str | None = attack_model_type + self.attack_model: Any | None = None self.epochs = nn_model_epochs self.batch_size = nn_model_batch_size self.learning_rate = nn_model_learning_rate @@ -335,7 +335,7 @@ def forward(self, x): elif self.attack_model is not None: self.attack_model.fit(x_train, y_ready) - def infer(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def infer(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Infer the attacked feature. diff --git a/art/attacks/inference/attribute_inference/white_box_decision_tree.py b/art/attacks/inference/attribute_inference/white_box_decision_tree.py index 7647a34375..1db23c5234 100644 --- a/art/attacks/inference/attribute_inference/white_box_decision_tree.py +++ b/art/attacks/inference/attribute_inference/white_box_decision_tree.py @@ -18,10 +18,10 @@ """ This module implements attribute inference attacks. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Optional + import numpy as np @@ -57,7 +57,7 @@ def __init__(self, classifier: ScikitlearnDecisionTreeClassifier, attack_feature self.attack_feature: int self._check_params() - def infer(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def infer(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Infer the attacked feature. @@ -76,8 +76,8 @@ def infer(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.n raise ValueError("Missing parameter `priors`.") if "values" not in kwargs: # pragma: no cover raise ValueError("Missing parameter `values`.") - priors: Optional[list] = kwargs.get("priors") - values: Optional[list] = kwargs.get("values") + priors: list | None = kwargs.get("priors") + values: list | None = kwargs.get("values") if self.estimator.input_shape[0] != x.shape[1] + 1: # pragma: no cover raise ValueError("Number of features in x + 1 does not match input_shape of classifier") @@ -141,6 +141,3 @@ def infer(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.n for index, value in enumerate(predicted_pred) ] ) - - def _check_params(self) -> None: - super()._check_params() diff --git a/art/attacks/inference/attribute_inference/white_box_lifestyle_decision_tree.py b/art/attacks/inference/attribute_inference/white_box_lifestyle_decision_tree.py index b0e18236c8..4cbc673210 100644 --- a/art/attacks/inference/attribute_inference/white_box_lifestyle_decision_tree.py +++ b/art/attacks/inference/attribute_inference/white_box_lifestyle_decision_tree.py @@ -18,10 +18,10 @@ """ This module implements attribute inference attacks. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Optional, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -47,7 +47,7 @@ class AttributeInferenceWhiteBoxLifestyleDecisionTree(AttributeInferenceAttack): _estimator_requirements = ((ScikitlearnDecisionTreeClassifier, ScikitlearnDecisionTreeRegressor),) - def __init__(self, estimator: Union["CLASSIFIER_TYPE", "REGRESSOR_TYPE"], attack_feature: int = 0): + def __init__(self, estimator: "CLASSIFIER_TYPE" | "REGRESSOR_TYPE", attack_feature: int = 0): """ Create an AttributeInferenceWhiteBoxLifestyle attack instance. @@ -58,7 +58,7 @@ def __init__(self, estimator: Union["CLASSIFIER_TYPE", "REGRESSOR_TYPE"], attack self.attack_feature: int self._check_params() - def infer(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def infer(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Infer the attacked feature. @@ -71,8 +71,8 @@ def infer(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.n :return: The inferred feature values. :rtype: `np.ndarray` """ - priors: Optional[list] = kwargs.get("priors") - values: Optional[list] = kwargs.get("values") + priors: list | None = kwargs.get("priors") + values: list | None = kwargs.get("values") # Checks: if self.estimator.input_shape[0] != x.shape[1] + 1: # pragma: no cover @@ -110,7 +110,7 @@ def infer(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.n ] prob_values.append(prob_value) - # Choose the value with highest probability for each sample + # Choose the value with the highest probability for each sample return np.array([values[np.argmax(list(prob))] for prob in zip(*prob_values)]) def _calculate_phi(self, x, values, n_samples): @@ -130,6 +130,3 @@ def _calculate_phi(self, x, values, n_samples): phi.append(num_value) return phi - - def _check_params(self) -> None: - super()._check_params() diff --git a/art/attacks/inference/membership_inference/black_box.py b/art/attacks/inference/membership_inference/black_box.py index 1704346c44..6de9e520b8 100644 --- a/art/attacks/inference/membership_inference/black_box.py +++ b/art/attacks/inference/membership_inference/black_box.py @@ -20,10 +20,10 @@ This module implements membership inference attacks. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Any, Optional, Union, TYPE_CHECKING +from typing import Any, TYPE_CHECKING import numpy as np from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier @@ -66,11 +66,11 @@ class MembershipInferenceBlackBox(MembershipInferenceAttack): def __init__( self, - estimator: Union["CLASSIFIER_TYPE", "REGRESSOR_TYPE"], + estimator: "CLASSIFIER_TYPE" | "REGRESSOR_TYPE", input_type: str = "prediction", attack_model_type: str = "nn", - attack_model: Optional[Any] = None, - scaler_type: Optional[str] = "standard", + attack_model: Any | None = None, + scaler_type: str | None = "standard", nn_model_epochs: int = 100, nn_model_batch_size: int = 100, nn_model_learning_rate: float = 0.0001, @@ -105,7 +105,7 @@ def __init__( self.attack_model_type = attack_model_type self.attack_model = attack_model self.scaler_type = scaler_type - self.scaler: Optional[Any] = None + self.scaler: Any | None = None self.epochs = nn_model_epochs self.batch_size = nn_model_batch_size self.learning_rate = nn_model_learning_rate @@ -136,15 +136,15 @@ def __init__( elif attack_model_type != "nn": raise ValueError("Illegal value for parameter `attack_model_type`.") - def fit( # pylint: disable=W0613 + def fit( self, - x: Optional[np.ndarray] = None, - y: Optional[np.ndarray] = None, - test_x: Optional[np.ndarray] = None, - test_y: Optional[np.ndarray] = None, - pred: Optional[np.ndarray] = None, - test_pred: Optional[np.ndarray] = None, - **kwargs + x: np.ndarray | None = None, + y: np.ndarray | None = None, + test_x: np.ndarray | None = None, + test_y: np.ndarray | None = None, + pred: np.ndarray | None = None, + test_pred: np.ndarray | None = None, + **kwargs, ): """ Train the attack model. @@ -247,7 +247,7 @@ def fit( # pylint: disable=W0613 test_labels = np.zeros(test_len) x_1 = np.concatenate((features, test_features)) - x_2: Optional[np.ndarray] = None + x_2: np.ndarray | None = None if y is not None and test_y is not None: x_2 = np.concatenate((y, test_y)) if self._regressor_model and x_2 is not None: @@ -429,7 +429,7 @@ def forward(self, x_1): x_1 = self.scaler.transform(x_1) self.attack_model.fit(x_1, y_ready.ravel()) # type: ignore - def infer(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def infer(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Infer membership in the training set of the target estimator. @@ -503,7 +503,7 @@ def infer(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.n features = self.scaler.transform(features) self.attack_model.eval() # type: ignore - predictions: Optional[np.ndarray] = None + predictions: np.ndarray | None = None if y is not None and self.use_label: test_set = self._get_attack_dataset(f_1=features, f_2=y) diff --git a/art/attacks/inference/membership_inference/black_box_rule_based.py b/art/attacks/inference/membership_inference/black_box_rule_based.py index ac4450c28b..a703accf67 100644 --- a/art/attacks/inference/membership_inference/black_box_rule_based.py +++ b/art/attacks/inference/membership_inference/black_box_rule_based.py @@ -22,7 +22,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Optional, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -56,7 +56,7 @@ def __init__(self, classifier: "CLASSIFIER_TYPE"): """ super().__init__(estimator=classifier) - def infer(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def infer(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Infer membership in the training set of the target estimator. diff --git a/art/attacks/inference/membership_inference/label_only_boundary_distance.py b/art/attacks/inference/membership_inference/label_only_boundary_distance.py index d902768cc3..e2b8468e6c 100644 --- a/art/attacks/inference/membership_inference/label_only_boundary_distance.py +++ b/art/attacks/inference/membership_inference/label_only_boundary_distance.py @@ -21,8 +21,10 @@ | Paper link: https://arxiv.org/abs/2007.14321 (Choquette-Choo et al.) | Paper link: https://arxiv.org/abs/2007.15528 (Li and Zhang) """ +from __future__ import annotations + import logging -from typing import Optional, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -52,7 +54,7 @@ class LabelOnlyDecisionBoundary(MembershipInferenceAttack): ] _estimator_requirements = (BaseEstimator, ClassifierMixin) - def __init__(self, estimator: "CLASSIFIER_TYPE", distance_threshold_tau: Optional[float] = None): + def __init__(self, estimator: "CLASSIFIER_TYPE", distance_threshold_tau: float | None = None): """ Create a `LabelOnlyDecisionBoundary` instance for Label-Only Inference Attack based on Decision Boundary. @@ -65,7 +67,7 @@ def __init__(self, estimator: "CLASSIFIER_TYPE", distance_threshold_tau: Optiona self.threshold_bins: list = [] self._check_params() - def infer(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def infer(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Infer membership of input `x` in estimator's training data. diff --git a/art/attacks/inference/membership_inference/shadow_models.py b/art/attacks/inference/membership_inference/shadow_models.py index 92b37668bd..70619f9c99 100644 --- a/art/attacks/inference/membership_inference/shadow_models.py +++ b/art/attacks/inference/membership_inference/shadow_models.py @@ -20,11 +20,12 @@ This module implements membership inference attacks. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations +from collections.abc import Callable, Sequence import math from functools import reduce -from typing import Callable, Tuple, TYPE_CHECKING, List, Optional, Sequence +from typing import TYPE_CHECKING import numpy as np @@ -58,7 +59,7 @@ def __init__( """ self._shadow_models = [shadow_model_template.clone_for_refitting() for _ in range(num_shadow_models)] - self._shadow_models_train_sets: List[Optional[Tuple[np.ndarray, np.ndarray]]] = [None] * num_shadow_models + self._shadow_models_train_sets: list[tuple[np.ndarray, np.ndarray] | None] = [None] * num_shadow_models self._input_shape = shadow_model_template.input_shape self._rng = np.random.default_rng(seed=random_state) self._disjoint_datasets = disjoint_datasets @@ -68,7 +69,7 @@ def generate_shadow_dataset( x: np.ndarray, y: np.ndarray, member_ratio: float = 0.5, - ) -> Tuple[Tuple[np.ndarray, np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray, np.ndarray]]: + ) -> tuple[tuple[np.ndarray, np.ndarray, np.ndarray], tuple[np.ndarray, np.ndarray, np.ndarray]]: """ Generates a shadow dataset (member and nonmember samples and their corresponding model predictions) by splitting the dataset into training and testing samples, and then training the shadow models on the result. @@ -160,12 +161,12 @@ def _hill_climbing_synthesis( target_classifier: "CLASSIFIER_TYPE", target_class: int, min_confidence: float, - max_features_randomized: Optional[int], + max_features_randomized: int | None, max_iterations: int = 40, max_rejections: int = 3, min_features_randomized: int = 1, - random_record_fn: Optional[Callable[[], np.ndarray]] = None, - randomize_features_fn: Optional[Callable[[np.ndarray, int], np.ndarray]] = None, + random_record_fn: Callable[[], np.ndarray] | None = None, + randomize_features_fn: Callable[[np.ndarray, int], np.ndarray] | None = None, ) -> np.ndarray: """ This method implements the hill climbing algorithm from R. Shokri et al. (2017) @@ -243,13 +244,13 @@ def generate_synthetic_shadow_dataset( self, target_classifier: "CLASSIFIER_TYPE", dataset_size: int, - max_features_randomized: Optional[int], + max_features_randomized: int | None, member_ratio: float = 0.5, min_confidence: float = 0.4, max_retries: int = 6, - random_record_fn: Optional[Callable[[], np.ndarray]] = None, - randomize_features_fn: Optional[Callable[[np.ndarray, int], np.ndarray]] = None, - ) -> Tuple[Tuple[np.ndarray, np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray, np.ndarray]]: + random_record_fn: Callable[[], np.ndarray] | None = None, + randomize_features_fn: Callable[[np.ndarray, int], np.ndarray] | None = None, + ) -> tuple[tuple[np.ndarray, np.ndarray, np.ndarray], tuple[np.ndarray, np.ndarray, np.ndarray]]: """ Generates a shadow dataset (member and nonmember samples and their corresponding model predictions) by training the shadow models on a synthetic dataset generated from the target classifier using the hill climbing algorithm @@ -323,7 +324,7 @@ def get_shadow_models( """ return self._shadow_models - def get_shadow_models_train_sets(self) -> List[Optional[Tuple[np.ndarray, np.ndarray]]]: + def get_shadow_models_train_sets(self) -> list[tuple[np.ndarray, np.ndarray] | None]: """ Returns a list of tuples the form (shadow_x_train, shadow_y_train) for each shadow model. `generate_shadow_dataset` or `generate_synthetic_shadow_dataset` must be called before, or a list of Nones will diff --git a/art/attacks/inference/model_inversion/mi_face.py b/art/attacks/inference/model_inversion/mi_face.py index 24c7809158..26a478e268 100644 --- a/art/attacks/inference/model_inversion/mi_face.py +++ b/art/attacks/inference/model_inversion/mi_face.py @@ -23,7 +23,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Optional, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import trange @@ -90,7 +90,7 @@ def __init__( self.verbose = verbose self._check_params() - def infer(self, x: Optional[np.ndarray], y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray: + def infer(self, x: np.ndarray | None, y: np.ndarray | None = None, **kwargs) -> np.ndarray: """ Extract a thieved classifier. diff --git a/art/attacks/inference/reconstruction/white_box.py b/art/attacks/inference/reconstruction/white_box.py index 3ed3aab5d9..e15c663feb 100644 --- a/art/attacks/inference/reconstruction/white_box.py +++ b/art/attacks/inference/reconstruction/white_box.py @@ -21,7 +21,6 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Optional, Tuple import numpy as np import sklearn @@ -68,7 +67,7 @@ def objective(x, y, x_train, y_train, private_estimator, parent_model, params): return residual - def reconstruct(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> Tuple[np.ndarray, np.ndarray]: + def reconstruct(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> tuple[np.ndarray, np.ndarray]: """ Infer the missing row from x, y with which `estimator` was trained with. @@ -83,7 +82,7 @@ def reconstruct(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) - tol = float("inf") x_0 = x[0, :] - x_guess: Optional[np.ndarray] = None + x_guess: np.ndarray | None = None y_guess: int for _y in range(self.estimator.nb_classes): diff --git a/art/attacks/poisoning/adversarial_embedding_attack.py b/art/attacks/poisoning/adversarial_embedding_attack.py index 7ae533311a..d4c4c6aef5 100644 --- a/art/attacks/poisoning/adversarial_embedding_attack.py +++ b/art/attacks/poisoning/adversarial_embedding_attack.py @@ -18,10 +18,10 @@ """ This module implements clean-label attacks on Neural Networks. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Tuple, Union, List, Optional, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -63,9 +63,9 @@ def __init__( self, classifier: "CLASSIFIER_TYPE", backdoor: PoisoningAttackBackdoor, - feature_layer: Union[int, str], - target: Union[np.ndarray, List[Tuple[np.ndarray, np.ndarray]]], - pp_poison: Union[float, List[float]] = 0.05, + feature_layer: int | str, + target: np.ndarray | list[tuple[np.ndarray, np.ndarray]], + pp_poison: float | list[float] = 0.05, discriminator_layer_1: int = 256, discriminator_layer_2: int = 128, regularization: float = 10, @@ -97,23 +97,23 @@ def __init__( self.discriminator_layer_1 = discriminator_layer_1 self.discriminator_layer_2 = discriminator_layer_2 self.regularization = regularization - self.train_data: Optional[np.ndarray] = None - self.train_labels: Optional[np.ndarray] = None - self.is_backdoor: Optional[np.ndarray] = None + self.train_data: np.ndarray | None = None + self.train_labels: np.ndarray | None = None + self.is_backdoor: np.ndarray | None = None self.learning_rate = learning_rate self._check_params() if isinstance(self.estimator, KerasClassifier): using_tf_keras = "tensorflow.python.keras" in str(type(self.estimator.model)) if using_tf_keras: # pragma: no cover - from tensorflow.keras.models import Model, clone_model # pylint: disable=E0611 - from tensorflow.keras.layers import ( # pylint: disable=E0611 + from tensorflow.keras.models import Model, clone_model + from tensorflow.keras.layers import ( GaussianNoise, Dense, BatchNormalization, LeakyReLU, ) - from tensorflow.keras.optimizers.legacy import Adam # pylint: disable=E0611 + from tensorflow.keras.optimizers.legacy import Adam opt = Adam(lr=self.learning_rate) @@ -179,9 +179,9 @@ def __init__( else: raise NotImplementedError("This attack currently only supports Keras.") - def poison( # pylint: disable=W0221 - self, x: np.ndarray, y: Optional[np.ndarray] = None, broadcast=False, **kwargs - ) -> Tuple[np.ndarray, np.ndarray]: + def poison( + self, x: np.ndarray, y: np.ndarray | None = None, broadcast=False, **kwargs + ) -> tuple[np.ndarray, np.ndarray]: """ Calls perturbation function on input x and target labels y @@ -192,7 +192,7 @@ def poison( # pylint: disable=W0221 """ return self.backdoor.poison(x, y, broadcast=broadcast) - def poison_estimator( # pylint: disable=W0221 + def poison_estimator( self, x: np.ndarray, y: np.ndarray, batch_size: int = 64, nb_epochs: int = 10, **kwargs ) -> "CLASSIFIER_TYPE": """ @@ -262,7 +262,7 @@ def poison_estimator( # pylint: disable=W0221 raise NotImplementedError("Currently only Keras is supported") - def get_training_data(self) -> Optional[Tuple[np.ndarray, Optional[np.ndarray], Optional[np.ndarray]]]: + def get_training_data(self) -> tuple[np.ndarray, np.ndarray | None, np.ndarray | None] | None: """ Returns the training data generated from the last call to fit diff --git a/art/attacks/poisoning/backdoor_attack.py b/art/attacks/poisoning/backdoor_attack.py index 34223cb8ca..0f829589fe 100644 --- a/art/attacks/poisoning/backdoor_attack.py +++ b/art/attacks/poisoning/backdoor_attack.py @@ -18,10 +18,10 @@ """ This module implements Backdoor Attacks to poison data used in ML models. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations +from collections.abc import Callable import logging -from typing import Callable, List, Optional, Tuple, Union import numpy as np @@ -43,7 +43,7 @@ class PoisoningAttackBackdoor(PoisoningAttackBlackBox): attack_params = PoisoningAttackBlackBox.attack_params + ["perturbation"] _estimator_requirements = () - def __init__(self, perturbation: Union[Callable, List[Callable]]) -> None: + def __init__(self, perturbation: Callable | list[Callable]) -> None: """ Initialize a backdoor poisoning attack. @@ -53,9 +53,9 @@ def __init__(self, perturbation: Union[Callable, List[Callable]]) -> None: self.perturbation = perturbation self._check_params() - def poison( # pylint: disable=W0221 - self, x: np.ndarray, y: Optional[np.ndarray] = None, broadcast=False, **kwargs - ) -> Tuple[np.ndarray, np.ndarray]: + def poison( + self, x: np.ndarray, y: np.ndarray | None = None, broadcast=False, **kwargs + ) -> tuple[np.ndarray, np.ndarray]: """ Calls perturbation function on input x and returns the perturbed input and poison labels for the data. diff --git a/art/attacks/poisoning/backdoor_attack_dgm/backdoor_attack_dgm_red.py b/art/attacks/poisoning/backdoor_attack_dgm/backdoor_attack_dgm_red.py index 9a10b28a93..b792cbc22a 100644 --- a/art/attacks/poisoning/backdoor_attack_dgm/backdoor_attack_dgm_red.py +++ b/art/attacks/poisoning/backdoor_attack_dgm/backdoor_attack_dgm_red.py @@ -53,7 +53,6 @@ def __init__(self, generator: "TensorFlowV2Generator") -> None: """ import tensorflow as tf - # pylint: disable=W0212 super().__init__(generator=generator) self._model_clone = tf.keras.models.clone_model(self.estimator.model) diff --git a/art/attacks/poisoning/bad_det/bad_det_gma.py b/art/attacks/poisoning/bad_det/bad_det_gma.py index 14a7b5ba3d..1501094e81 100644 --- a/art/attacks/poisoning/bad_det/bad_det_gma.py +++ b/art/attacks/poisoning/bad_det/bad_det_gma.py @@ -23,7 +23,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Dict, List, Tuple, Union + import numpy as np from tqdm.auto import tqdm @@ -75,18 +75,18 @@ def __init__( self.verbose = verbose self._check_params() - def poison( # pylint: disable=W0221 + def poison( self, - x: Union[np.ndarray, List[np.ndarray]], - y: List[Dict[str, np.ndarray]], + x: np.ndarray | list[np.ndarray], + y: list[dict[str, np.ndarray]], **kwargs, - ) -> Tuple[Union[np.ndarray, List[np.ndarray]], List[Dict[str, np.ndarray]]]: + ) -> tuple[np.ndarray | list[np.ndarray], list[dict[str, np.ndarray]]]: """ Generate poisoning examples by inserting the backdoor onto the input `x` and changing the classification for labels `y`. :param x: Sample images of shape `NCHW` or `NHWC` or a list of sample images of any size. - :param y: True labels of type `List[Dict[np.ndarray]]`, one dictionary per input image. The keys and values + :param y: True labels of type `list[dict[np.ndarray]]`, one dictionary per input image. The keys and values of the dictionary are: - boxes [N, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. @@ -102,14 +102,14 @@ def poison( # pylint: disable=W0221 raise ValueError("Unrecognized input dimension. BadDet GMA can only be applied to image data.") # copy images - x_poison: Union[np.ndarray, List[np.ndarray]] + x_poison: np.ndarray | list[np.ndarray] if isinstance(x, np.ndarray): x_poison = x.copy() else: x_poison = [x_i.copy() for x_i in x] # copy labels - y_poison: List[Dict[str, np.ndarray]] = [] + y_poison: list[dict[str, np.ndarray]] = [] for y_i in y: target_dict = {k: v.copy() for k, v in y_i.items()} y_poison.append(target_dict) diff --git a/art/attacks/poisoning/bad_det/bad_det_oda.py b/art/attacks/poisoning/bad_det/bad_det_oda.py index 3b99fd35b7..1d144f14bb 100644 --- a/art/attacks/poisoning/bad_det/bad_det_oda.py +++ b/art/attacks/poisoning/bad_det/bad_det_oda.py @@ -23,7 +23,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Dict, List, Tuple, Union + import numpy as np from tqdm.auto import tqdm @@ -75,18 +75,18 @@ def __init__( self.verbose = verbose self._check_params() - def poison( # pylint: disable=W0221 + def poison( self, - x: Union[np.ndarray, List[np.ndarray]], - y: List[Dict[str, np.ndarray]], + x: np.ndarray | list[np.ndarray], + y: list[dict[str, np.ndarray]], **kwargs, - ) -> Tuple[Union[np.ndarray, List[np.ndarray]], List[Dict[str, np.ndarray]]]: + ) -> tuple[np.ndarray | list[np.ndarray], list[dict[str, np.ndarray]]]: """ Generate poisoning examples by inserting the backdoor onto the input `x` and changing the classification for labels `y`. :param x: Sample images of shape `NCHW` or `NHWC` or a list of sample images of any size. - :param y: True labels of type `List[Dict[np.ndarray]]`, one dictionary per input image. The keys and values + :param y: True labels of type `list[dict[np.ndarray]]`, one dictionary per input image. The keys and values of the dictionary are: - boxes [N, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. @@ -102,14 +102,14 @@ def poison( # pylint: disable=W0221 raise ValueError("Unrecognized input dimension. BadDet ODA can only be applied to image data.") # copy images - x_poison: Union[np.ndarray, List[np.ndarray]] + x_poison: np.ndarray | list[np.ndarray] if isinstance(x, np.ndarray): x_poison = x.copy() else: x_poison = [x_i.copy() for x_i in x] # copy labels and find indices of the source class - y_poison: List[Dict[str, np.ndarray]] = [] + y_poison: list[dict[str, np.ndarray]] = [] source_indices = [] for i, y_i in enumerate(y): target_dict = {k: v.copy() for k, v in y_i.items()} diff --git a/art/attacks/poisoning/bad_det/bad_det_oga.py b/art/attacks/poisoning/bad_det/bad_det_oga.py index 042e195de7..ddb6385d06 100644 --- a/art/attacks/poisoning/bad_det/bad_det_oga.py +++ b/art/attacks/poisoning/bad_det/bad_det_oga.py @@ -23,7 +23,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Dict, List, Tuple, Union + import numpy as np from tqdm.auto import tqdm @@ -83,18 +83,18 @@ def __init__( self.verbose = verbose self._check_params() - def poison( # pylint: disable=W0221 + def poison( self, - x: Union[np.ndarray, List[np.ndarray]], - y: List[Dict[str, np.ndarray]], + x: np.ndarray | list[np.ndarray], + y: list[dict[str, np.ndarray]], **kwargs, - ) -> Tuple[Union[np.ndarray, List[np.ndarray]], List[Dict[str, np.ndarray]]]: + ) -> tuple[np.ndarray | list[np.ndarray], list[dict[str, np.ndarray]]]: """ Generate poisoning examples by inserting the backdoor onto the input `x` and changing the classification for labels `y`. :param x: Sample images of shape `NCHW` or `NHWC` or a list of sample images of any size. - :param y: True labels of type `List[Dict[np.ndarray]]`, one dictionary per input image. The keys and values + :param y: True labels of type `list[dict[np.ndarray]]`, one dictionary per input image. The keys and values of the dictionary are: - boxes [N, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. @@ -110,14 +110,14 @@ def poison( # pylint: disable=W0221 raise ValueError("Unrecognized input dimension. BadDet OGA can only be applied to image data.") # copy images - x_poison: Union[np.ndarray, List[np.ndarray]] + x_poison: np.ndarray | list[np.ndarray] if isinstance(x, np.ndarray): x_poison = x.copy() else: x_poison = [x_i.copy() for x_i in x] # copy labels - y_poison: List[Dict[str, np.ndarray]] = [] + y_poison: list[dict[str, np.ndarray]] = [] for y_i in y: target_dict = {k: v.copy() for k, v in y_i.items()} y_poison.append(target_dict) diff --git a/art/attacks/poisoning/bad_det/bad_det_rma.py b/art/attacks/poisoning/bad_det/bad_det_rma.py index 68ec7aeb92..1e11deb847 100644 --- a/art/attacks/poisoning/bad_det/bad_det_rma.py +++ b/art/attacks/poisoning/bad_det/bad_det_rma.py @@ -23,7 +23,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Dict, List, Tuple, Union, Optional + import numpy as np from tqdm.auto import tqdm @@ -54,7 +54,7 @@ class BadDetRegionalMisclassificationAttack(PoisoningAttackObjectDetector): def __init__( self, backdoor: PoisoningAttackBackdoor, - class_source: Optional[int] = None, + class_source: int | None = None, class_target: int = 1, percent_poison: float = 0.3, channels_first: bool = False, @@ -80,18 +80,18 @@ def __init__( self.verbose = verbose self._check_params() - def poison( # pylint: disable=W0221 + def poison( self, - x: Union[np.ndarray, List[np.ndarray]], - y: List[Dict[str, np.ndarray]], + x: np.ndarray | list[np.ndarray], + y: list[dict[str, np.ndarray]], **kwargs, - ) -> Tuple[Union[np.ndarray, List[np.ndarray]], List[Dict[str, np.ndarray]]]: + ) -> tuple[np.ndarray | list[np.ndarray], list[dict[str, np.ndarray]]]: """ Generate poisoning examples by inserting the backdoor onto the input `x` and changing the classification for labels `y`. :param x: Sample images of shape `NCHW` or `NHWC` or a list of sample images of any size. - :param y: True labels of type `List[Dict[np.ndarray]]`, one dictionary per input image. The keys and values + :param y: True labels of type `list[dict[np.ndarray]]`, one dictionary per input image. The keys and values of the dictionary are: - boxes [N, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. @@ -107,14 +107,14 @@ def poison( # pylint: disable=W0221 raise ValueError("Unrecognized input dimension. BadDet RMA can only be applied to image data.") # copy images - x_poison: Union[np.ndarray, List[np.ndarray]] + x_poison: np.ndarray | list[np.ndarray] if isinstance(x, np.ndarray): x_poison = x.copy() else: x_poison = [x_i.copy() for x_i in x] # copy labels and find indices of the source class - y_poison: List[Dict[str, np.ndarray]] = [] + y_poison: list[dict[str, np.ndarray]] = [] source_indices = [] for i, y_i in enumerate(y): target_dict = {k: v.copy() for k, v in y_i.items()} diff --git a/art/attacks/poisoning/bullseye_polytope_attack.py b/art/attacks/poisoning/bullseye_polytope_attack.py index 543f6b4aaa..46e2eff100 100644 --- a/art/attacks/poisoning/bullseye_polytope_attack.py +++ b/art/attacks/poisoning/bullseye_polytope_attack.py @@ -18,11 +18,11 @@ """ This module implements Bullseye Polytope clean-label attacks on Neural Networks. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging import time -from typing import Optional, Tuple, Union, TYPE_CHECKING, List +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import trange @@ -33,7 +33,7 @@ from art.estimators.classification.pytorch import PyTorchClassifier if TYPE_CHECKING: - # pylint: disable=C0412 + import torch from art.utils import CLASSIFIER_NEURALNETWORK_TYPE @@ -71,14 +71,14 @@ class BullseyePolytopeAttackPyTorch(PoisoningAttackWhiteBox): def __init__( self, - classifier: Union["CLASSIFIER_NEURALNETWORK_TYPE", List["CLASSIFIER_NEURALNETWORK_TYPE"]], + classifier: "CLASSIFIER_NEURALNETWORK_TYPE" | list["CLASSIFIER_NEURALNETWORK_TYPE"], target: np.ndarray, - feature_layer: Union[Union[str, int], List[Union[str, int]]], + feature_layer: str | int | list[str | int], opt: str = "adam", max_iter: int = 4000, learning_rate: float = 4e-2, momentum: float = 0.9, - decay_iter: Union[int, List[int]] = 10000, + decay_iter: int | list[int] = 10000, decay_coeff: float = 0.5, epsilon: float = 0.1, dropout: float = 0.3, @@ -109,7 +109,7 @@ def __init__( :param batch_size: Batch size. :param verbose: Show progress bars. """ - self.subsistute_networks: List["CLASSIFIER_NEURALNETWORK_TYPE"] = ( + self.subsistute_networks: list["CLASSIFIER_NEURALNETWORK_TYPE"] = ( [classifier] if not isinstance(classifier, list) else classifier ) @@ -130,7 +130,7 @@ def __init__( self.verbose = verbose self._check_params() - def poison(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> Tuple[np.ndarray, np.ndarray]: + def poison(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> tuple[np.ndarray, np.ndarray]: """ Iteratively finds optimal attack points starting at values at x @@ -179,7 +179,7 @@ def forward(self): # Initializing from the coefficients of last step gives faster convergence. s_init_coeff_list = [] n_poisons = len(x) - s_coeff: Union["torch.Tensor", List["torch.Tensor"]] + s_coeff: "torch.Tensor" | list["torch.Tensor"] for _, net in enumerate(self.subsistute_networks): # End to end training if self.endtoend: @@ -192,7 +192,7 @@ def forward(self): else: # pragma: no cover raise ValueError("Activations are None.") else: - layer_2: Union[int, str] = self.feature_layer + layer_2: int | str = self.feature_layer activations = net.get_activations(x, layer=layer_2, batch_size=self.batch_size, framework=True) if activations is not None: block_feats = [feat.detach() for feat in activations] @@ -205,7 +205,7 @@ def forward(self): else: # pragma: no cover if isinstance(self.feature_layer, list): raise NotImplementedError - layer_3: Union[int, str] = self.feature_layer + layer_3: int | str = self.feature_layer activations = net.get_activations(x, layer=layer_3, batch_size=self.batch_size, framework=True) if activations is not None: target_feat_list.append(activations.detach()) # type: ignore @@ -321,7 +321,7 @@ def loss_from_center( if end2end: loss = torch.tensor(0.0) for net, center_feats in zip(subs_net_list, target_feat_list): - poisons_feats: Union[List[float], "torch.Tensor", np.ndarray] + poisons_feats: list[float] | "torch.Tensor" | np.ndarray if net_repeat > 1: poisons_feats_repeats = [ net.get_activations(poison_batch(), layer=feature_layer, framework=True) for _ in range(net_repeat) diff --git a/art/attacks/poisoning/clean_label_backdoor_attack.py b/art/attacks/poisoning/clean_label_backdoor_attack.py index bcece4b70b..3b68e85e17 100644 --- a/art/attacks/poisoning/clean_label_backdoor_attack.py +++ b/art/attacks/poisoning/clean_label_backdoor_attack.py @@ -18,10 +18,10 @@ """ This module implements Clean Label Backdoor Attacks to poison data used in ML models. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Optional, Tuple, TYPE_CHECKING, Union +from typing import TYPE_CHECKING import numpy as np @@ -64,7 +64,7 @@ def __init__( proxy_classifier: "CLASSIFIER_LOSS_GRADIENTS_TYPE", target: np.ndarray, pp_poison: float = 0.33, - norm: Union[int, float, str] = np.inf, + norm: int | float | str = np.inf, eps: float = 0.3, eps_step: float = 0.1, max_iter: int = 100, @@ -101,9 +101,9 @@ def __init__( ) self._check_params() - def poison( # pylint: disable=W0221 - self, x: np.ndarray, y: Optional[np.ndarray] = None, broadcast: bool = True, **kwargs - ) -> Tuple[np.ndarray, np.ndarray]: + def poison( + self, x: np.ndarray, y: np.ndarray | None = None, broadcast: bool = True, **kwargs + ) -> tuple[np.ndarray, np.ndarray]: """ Calls perturbation function on input x and returns the perturbed input and poison labels for the data. diff --git a/art/attacks/poisoning/feature_collision_attack.py b/art/attacks/poisoning/feature_collision_attack.py index 4f2aeeb5cb..996aad434c 100644 --- a/art/attacks/poisoning/feature_collision_attack.py +++ b/art/attacks/poisoning/feature_collision_attack.py @@ -18,11 +18,11 @@ """ This module implements clean-label attacks on Neural Networks. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations from functools import reduce import logging -from typing import Optional, Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import trange @@ -71,15 +71,15 @@ def __init__( self, classifier: "CLASSIFIER_NEURALNETWORK_TYPE", target: np.ndarray, - feature_layer: Union[str, int], + feature_layer: str | int, learning_rate: float = 500 * 255.0, decay_coeff: float = 0.5, stopping_tol: float = 1e-10, - obj_threshold: Optional[float] = None, + obj_threshold: float | None = None, num_old_obj: int = 40, max_iter: int = 120, similarity_coeff: float = 256.0, - watermark: Optional[float] = None, + watermark: float | None = None, verbose: bool = True, ): """ @@ -126,7 +126,7 @@ def __init__( raise ValueError("Type of estimator currently not supported.") self.attack_loss = tensor_norm(self.poison_feature_rep - self.target_feature_rep) - def poison(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> Tuple[np.ndarray, np.ndarray]: + def poison(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> tuple[np.ndarray, np.ndarray]: """ Iteratively finds optimal attack points starting at values at x @@ -287,7 +287,7 @@ def get_class_name(obj: object) -> str: return module + "." + obj.__class__.__name__ -def tensor_norm(tensor, norm_type: Union[int, float, str] = 2): # pylint: disable=R1710 +def tensor_norm(tensor, norm_type: int | float | str = 2): # pylint: disable=inconsistent-return-statements """ Compute the norm of a tensor. diff --git a/art/attacks/poisoning/gradient_matching_attack.py b/art/attacks/poisoning/gradient_matching_attack.py index 3f89c347f2..a90c4dff97 100644 --- a/art/attacks/poisoning/gradient_matching_attack.py +++ b/art/attacks/poisoning/gradient_matching_attack.py @@ -23,7 +23,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Any, Dict, Tuple, TYPE_CHECKING, List +from typing import Any, TYPE_CHECKING import numpy as np from tqdm.auto import trange, tqdm @@ -33,7 +33,7 @@ from art.estimators.classification.classifier import ClassifierMixin if TYPE_CHECKING: - # pylint: disable=C0412 + from art.utils import CLASSIFIER_NEURALNETWORK_TYPE logger = logging.getLogger(__name__) @@ -67,9 +67,9 @@ def __init__( epsilon: float = 0.1, max_trials: int = 8, max_epochs: int = 250, - learning_rate_schedule: Tuple[List[float], List[int]] = ([1e-1, 1e-2, 1e-3, 1e-4], [100, 150, 200, 220]), + learning_rate_schedule: tuple[list[float], list[int]] = ([1e-1, 1e-2, 1e-3, 1e-4], [100, 150, 200, 220]), batch_size: int = 128, - clip_values: Tuple[float, float] = (0, 1.0), + clip_values: tuple[float, float] = (0, 1.0), verbose: int = 1, ): """ @@ -81,7 +81,7 @@ def __init__( :param max_trials: The maximum number of restarts to optimize the poison. :param max_epochs: The maximum number of epochs to optimize the train per trial. :param learning_rate_schedule: The learning rate schedule to optimize the poison. - A List of (learning rate, epoch) pairs. The learning rate is used + A list of (learning rate, epoch) pairs. The learning rate is used if the current epoch is less than the specified epoch. :param batch_size: Batch size. :param clip_values: The range of the input features to the classifier. @@ -155,7 +155,6 @@ def _initialize_poison_tensorflow( :param x_poison: A list of training data to poison a portion of. :param y_poison: A list of true labels for x_poison. """ - # pylint: disable=no-name-in-module from tensorflow.keras import backend as K import tensorflow as tf from tensorflow.keras.layers import Input, Embedding, Add, Lambda @@ -173,7 +172,7 @@ def _weight_grad(classifier: TensorFlowV2Classifier, x: tf.Tensor, target: tf.Te # Get the target gradient vector. import tensorflow as tf - with tf.GradientTape() as t: # pylint: disable=C0103 + with tf.GradientTape() as t: # pylint: disable=invalid-name t.watch(classifier.model.weights) output = classifier.model(x, training=False) loss = classifier.loss_object(target, output) @@ -204,10 +203,10 @@ def _weight_grad(classifier: TensorFlowV2Classifier, x: tf.Tensor, target: tf.Te def loss_fn(input_noised: tf.Tensor, target: tf.Tensor, grad_ws_norm: tf.Tensor): d_w2_norm = _weight_grad(classifier, input_noised, target) - B = 1 - tf.reduce_sum(grad_ws_norm * d_w2_norm) # pylint: disable=C0103 + B = 1 - tf.reduce_sum(grad_ws_norm * d_w2_norm) # pylint: disable=invalid-name return B - B = tf.keras.layers.Lambda(lambda x: loss_fn(x[0], x[1], x[2]))( # pylint: disable=C0103 + B = tf.keras.layers.Lambda(lambda x: loss_fn(x[0], x[1], x[2]))( # pylint: disable=invalid-name [input_noised, y_true_poison, self.grad_ws_norm] ) @@ -220,7 +219,7 @@ class PredefinedLRSchedule(tf.keras.optimizers.schedules.LearningRateSchedule): Use a preset learning rate based on the current training epoch. """ - def __init__(self, learning_rates: List[float], milestones: List[int]): + def __init__(self, learning_rates: list[float], milestones: list[int]): self.schedule = list(zip(milestones, learning_rates)) def __call__(self, step: int) -> float: @@ -231,7 +230,7 @@ def __call__(self, step: int) -> float: lr_prev = learning_rate return lr_prev - def get_config(self) -> Dict: + def get_config(self) -> dict: """ Returns the parameters. """ @@ -281,7 +280,7 @@ class NoiseEmbedding(nn.Module): Gradient matching noise layer. """ - def __init__(self, num_poison: int, len_noise: int, epsilon: float, clip_values: Tuple[float, float]): + def __init__(self, num_poison: int, len_noise: int, epsilon: float, clip_values: tuple[float, float]): super().__init__() self.embedding_layer = nn.Embedding(num_poison, len_noise) @@ -328,14 +327,14 @@ def __init__( def forward( self, x: torch.Tensor, indices_poison: torch.Tensor, y: torch.Tensor, grad_ws_norm: torch.Tensor - ) -> Tuple[torch.Tensor, torch.Tensor]: + ) -> tuple[torch.Tensor, torch.Tensor]: """ Applies the poison noise and compute the loss with respect to the target gradient. """ poisoned_samples = self.noise_embedding(x, indices_poison) d_w2_norm = _weight_grad(self.classifier, poisoned_samples, y) d_w2_norm.requires_grad_(True) - B_score = 1 - self.cos(grad_ws_norm, d_w2_norm) # pylint: disable=C0103 + B_score = 1 - self.cos(grad_ws_norm, d_w2_norm) # pylint: disable=invalid-name return B_score, poisoned_samples self.grad_ws_norm = _weight_grad( @@ -360,7 +359,7 @@ class PredefinedLRSchedule: Use a preset learning rate based on the current training epoch. """ - def __init__(self, learning_rates: List[float], milestones: List[int]): + def __init__(self, learning_rates: list[float], milestones: list[int]): self.schedule = list(zip(milestones, learning_rates)) def __call__(self, step: int) -> float: @@ -371,7 +370,7 @@ def __call__(self, step: int) -> float: lr_prev = learning_rate return lr_prev - def get_config(self) -> Dict: + def get_config(self) -> dict: """ returns a dictionary of parameters. """ @@ -383,7 +382,7 @@ def get_config(self) -> Dict: def poison( self, x_trigger: np.ndarray, y_trigger: np.ndarray, x_train: np.ndarray, y_train: np.ndarray - ) -> Tuple[np.ndarray, np.ndarray]: + ) -> tuple[np.ndarray, np.ndarray]: """ Optimizes a portion of poisoned samples from x_train to make a model classify x_target as y_target by matching the gradients. @@ -418,7 +417,7 @@ def poison( num_poison_samples = int(self.percent_poison * len(x_train)) # Try poisoning num_trials times and choose the best one. - best_B = np.finfo(np.float32).max # pylint: disable=C0103 + best_B = np.finfo(np.float32).max # pylint: disable=invalid-name best_x_poisoned = None best_indices_poison = None @@ -433,11 +432,11 @@ def poison( x_poison = x_train[indices_poison] y_poison = y_train[indices_poison] self._initialize_poison(x_trigger, y_trigger, x_poison, y_poison) - x_poisoned, B_ = poisoner(x_poison, y_poison) # pylint: disable=C0103 + x_poisoned, B_ = poisoner(x_poison, y_poison) # pylint: disable=invalid-name finish_poisoning() - B_ = np.mean(B_) # Averaging B losses from multiple batches. # pylint: disable=C0103 + B_ = np.mean(B_) # Averaging B losses from multiple batches. # pylint: disable=invalid-name if B_ < best_B: - best_B = B_ # pylint: disable=C0103 + best_B = B_ # pylint: disable=invalid-name best_x_poisoned = x_poisoned best_indices_poison = indices_poison @@ -446,7 +445,7 @@ def poison( x_train[best_indices_poison] = best_x_poisoned return x_train, y_train # y_train has not been modified. - def _poison__pytorch(self, x_poison: np.ndarray, y_poison: np.ndarray) -> Tuple[Any, Any]: + def _poison__pytorch(self, x_poison: np.ndarray, y_poison: np.ndarray) -> tuple[Any, Any]: """ Optimize the poison by matching the gradient within the perturbation budget. @@ -503,7 +502,7 @@ def __len__(self): epoch_iterator.set_postfix(loss=sum_loss / count) self.lr_schedule.step() - B_sum = 0 # pylint: disable=C0103 + B_sum = 0 # pylint: disable=invalid-name count = 0 all_poisoned_samples = [] self.backdoor_model.eval() @@ -514,13 +513,13 @@ def __len__(self): x = x.to(device) y = y.to(device) indices = indices.to(device) - B, poisoned_samples = self.backdoor_model(x, indices, y, self.grad_ws_norm) # pylint: disable=C0103 + B, poisoned_samples = self.backdoor_model(x, indices, y, self.grad_ws_norm) # pylint: disable=invalid-name all_poisoned_samples.append(poisoned_samples.detach().cpu().numpy()) - B_sum += B.detach().cpu().numpy() # pylint: disable=C0103 + B_sum += B.detach().cpu().numpy() # pylint: disable=invalid-name count += 1 return np.concatenate(all_poisoned_samples, axis=0), B_sum / count - def _poison__tensorflow(self, x_poison: np.ndarray, y_poison: np.ndarray) -> Tuple[Any, Any]: + def _poison__tensorflow(self, x_poison: np.ndarray, y_poison: np.ndarray) -> tuple[Any, Any]: """ Optimize the poison by matching the gradient within the perturbation budget. @@ -545,7 +544,7 @@ def _poison__tensorflow(self, x_poison: np.ndarray, y_poison: np.ndarray) -> Tup epochs=self.max_epochs, verbose=0, ) - [input_noised_, B_] = self.backdoor_model.predict( # pylint: disable=C0103 + [input_noised_, B_] = self.backdoor_model.predict( # pylint: disable=invalid-name [x_poison, y_poison, np.arange(len(y_poison))], batch_size=self.batch_size ) diff --git a/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor.py b/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor.py index 7b5a7e791a..2b5204f150 100644 --- a/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor.py +++ b/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor.py @@ -20,10 +20,10 @@ | Paper link: https://arxiv.org/abs/1910.00033 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import List, Optional, Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -80,12 +80,12 @@ def __init__( classifier: "CLASSIFIER_NEURALNETWORK_TYPE", target: np.ndarray, source: np.ndarray, - feature_layer: Union[str, int], + feature_layer: str | int, backdoor: PoisoningAttackBackdoor, eps: float = 0.1, learning_rate: float = 0.001, decay_coeff: float = 0.95, - decay_iter: Union[int, List[int]] = 2000, + decay_iter: int | list[int] = 2000, stopping_threshold: float = 10, max_iter: int = 5000, batch_size: float = 100, @@ -179,9 +179,7 @@ def __init__( else: raise ValueError("Only Pytorch, Keras, and TensorFlowV2 classifiers are supported") - def poison( # pylint: disable=W0221 - self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs - ) -> Tuple[np.ndarray, np.ndarray]: + def poison(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> tuple[np.ndarray, np.ndarray]: """ Calls perturbation function on the dataset x and returns only the perturbed inputs and their indices in the dataset. diff --git a/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor_keras.py b/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor_keras.py index cd91c54e88..e863a158db 100644 --- a/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor_keras.py +++ b/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor_keras.py @@ -20,10 +20,10 @@ | Paper link: https://arxiv.org/abs/1910.00033 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import List, Optional, Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np import six @@ -38,7 +38,7 @@ from art.utils import check_and_transform_label_format if TYPE_CHECKING: - # pylint: disable=C0412 + from art.estimators.classification.tensorflow import TensorFlowV2Classifier logger = logging.getLogger(__name__) @@ -58,15 +58,15 @@ class HiddenTriggerBackdoorKeras(PoisoningAttackWhiteBox): def __init__( self, - classifier: Union["KerasClassifier", "TensorFlowV2Classifier"], + classifier: "KerasClassifier" | "TensorFlowV2Classifier", target: np.ndarray, source: np.ndarray, - feature_layer: Union[str, int], + feature_layer: str | int, backdoor: PoisoningAttackBackdoor, eps: float = 0.1, learning_rate: float = 0.001, decay_coeff: float = 0.95, - decay_iter: Union[int, List[int]] = 2000, + decay_iter: int | list[int] = 2000, stopping_threshold: float = 10, max_iter: int = 5000, batch_size: float = 100, @@ -116,9 +116,7 @@ def __init__( self.verbose = verbose self.print_iter = print_iter - def poison( # pylint: disable=W0221 - self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs - ) -> Tuple[np.ndarray, np.ndarray]: + def poison(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> tuple[np.ndarray, np.ndarray]: """ Calls perturbation function on the dataset x and returns only the perturbed input and their indices in the dataset. @@ -135,11 +133,11 @@ def poison( # pylint: disable=W0221 from scipy.spatial import distance if isinstance(self.estimator, KerasClassifier): - # pylint: disable=E0401 + if not self.estimator.is_tensorflow: import keras.backend as k else: - import tensorflow.keras.backend as k # pylint: disable=E0611 + import tensorflow.keras.backend as k data = np.copy(x) if y is None: @@ -248,9 +246,10 @@ def poison( # pylint: disable=W0221 output_tensor = self._get_keras_tensor() attack_loss = tf.math.square(tf.norm(feat1_var - output_tensor)) - attack_grad_f = k.gradients(attack_loss, self.estimator._input)[0] # pylint: disable=W0212 + attack_grad_f = k.gradients(attack_loss, self.estimator._input)[0] self._custom_loss["loss_function"] = k.function( - [self.estimator._input, k.learning_phase()], [attack_grad_f] # pylint: disable=W0212 + [self.estimator._input, k.learning_phase()], + [attack_grad_f], ) else: feat1_var = self._custom_loss["feat_var"] @@ -296,26 +295,24 @@ def _get_keras_tensor(self): Helper function to get the feature layer output tensor in the keras graph :return: Output tensor """ - if self.estimator._layer_names is None: # pylint: disable=W0212 + if self.estimator._layer_names is None: raise ValueError("No layer names identified.") if isinstance(self.feature_layer, six.string_types): - if self.feature_layer not in self.estimator._layer_names: # pylint: disable=W0212 + if self.feature_layer not in self.estimator._layer_names: raise ValueError(f"Layer name {self.feature_layer} is not part of the graph.") layer_name = self.feature_layer elif isinstance(self.feature_layer, int): - if self.feature_layer < 0 or self.feature_layer >= len( - self.estimator._layer_names # pylint: disable=W0212 - ): + if self.feature_layer < 0 or self.feature_layer >= len(self.estimator._layer_names): raise ValueError( - f"Layer index {self.feature_layer} is outside of range [0 to " # pylint: disable=W0212 - f"{len(self.estimator._layer_names) - 1}])." # pylint: disable=W0212 + f"Layer index {self.feature_layer} is outside of range [0 to " + f"{len(self.estimator._layer_names) - 1}])." ) - layer_name = self.estimator._layer_names[self.feature_layer] # pylint: disable=W0212 + layer_name = self.estimator._layer_names[self.feature_layer] else: raise TypeError("Layer must be of type `str` or `int`.") - keras_layer = self.estimator._model.get_layer(layer_name) # pylint: disable=W0212 + keras_layer = self.estimator._model.get_layer(layer_name) num_inbound_nodes = len(getattr(keras_layer, "_inbound_nodes", [])) if num_inbound_nodes > 1: layer_output = keras_layer.get_output_at(0) @@ -336,7 +333,5 @@ def _apply_preprocessing(self, x: np.ndarray) -> np.ndarray: x_expanded = x # Apply preprocessing - x_preprocessed, _ = self.estimator._apply_preprocessing( # pylint: disable=W0212 - x=x_expanded, y=None, fit=False - ) + x_preprocessed, _ = self.estimator._apply_preprocessing(x=x_expanded, y=None, fit=False) return x_preprocessed diff --git a/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor_pytorch.py b/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor_pytorch.py index e5466f5510..10ed919e84 100644 --- a/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor_pytorch.py +++ b/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor_pytorch.py @@ -38,10 +38,10 @@ | Paper link: https://arxiv.org/abs/1910.00033 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import List, Optional, Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import trange @@ -54,7 +54,7 @@ from art.utils import check_and_transform_label_format if TYPE_CHECKING: - # pylint: disable=C0412 + from art.estimators.classification.pytorch import PyTorchClassifier logger = logging.getLogger(__name__) @@ -77,12 +77,12 @@ def __init__( classifier: "PyTorchClassifier", target: np.ndarray, source: np.ndarray, - feature_layer: Union[str, int], + feature_layer: str | int, backdoor: PoisoningAttackBackdoor, eps: float = 0.1, learning_rate: float = 0.001, decay_coeff: float = 0.95, - decay_iter: Union[int, List[int]] = 2000, + decay_iter: int | list[int] = 2000, stopping_threshold: float = 10, max_iter: int = 5000, batch_size: float = 100, @@ -132,9 +132,7 @@ def __init__( self.verbose = verbose self.print_iter = print_iter - def poison( # pylint: disable=W0221 - self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs - ) -> Tuple[np.ndarray, np.ndarray]: + def poison(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> tuple[np.ndarray, np.ndarray]: """ Calls perturbation function on the dataset x and returns only the perturbed input and their indices in the dataset. diff --git a/art/attacks/poisoning/perturbations/audio_perturbations.py b/art/attacks/poisoning/perturbations/audio_perturbations.py index 1c8c723fe9..9b0ab67cb7 100644 --- a/art/attacks/poisoning/perturbations/audio_perturbations.py +++ b/art/attacks/poisoning/perturbations/audio_perturbations.py @@ -21,7 +21,7 @@ because loading the audio trigger from disk (librosa.load()) is very slow and should be done only once. """ -from typing import Optional + import librosa import numpy as np @@ -91,7 +91,7 @@ def __init__( self, sampling_rate: int = 16000, backdoor_path: str = "../../../utils/data/backdoors/cough_trigger.wav", - duration: Optional[float] = None, + duration: float | None = None, **kwargs, ): """ diff --git a/art/attacks/poisoning/perturbations/image_perturbations.py b/art/attacks/poisoning/perturbations/image_perturbations.py index b48985e810..26061dfcb4 100644 --- a/art/attacks/poisoning/perturbations/image_perturbations.py +++ b/art/attacks/poisoning/perturbations/image_perturbations.py @@ -18,7 +18,7 @@ """ Adversarial perturbations designed to work for images. """ -from typing import Optional, Tuple +from __future__ import annotations import numpy as np @@ -110,7 +110,7 @@ def insert_image( random: bool = True, x_shift: int = 0, y_shift: int = 0, - size: Optional[Tuple[int, int]] = None, + size: tuple[int, int] | None = None, mode: str = "L", blend=0.8, ) -> np.ndarray: @@ -121,7 +121,7 @@ def insert_image( :param x: A single image or batch of images of shape NHWC, NCHW, or HWC. Input is in range [0,1]. :param backdoor_path: The path to the image to insert as a trigger. :param channels_first: Whether the channels axis is in the first or last dimension - :param random: Whether or not the image should be randomly placed somewhere on the image. + :param random: Whether the image should be randomly placed somewhere on the image. :param x_shift: Number of pixels from the left to shift the trigger (when not using random placement). :param y_shift: Number of pixels from the right to shift the trigger (when not using random placement). :param size: The size the trigger image should be (height, width). Default `None` if no resizing necessary. diff --git a/art/attacks/poisoning/poisoning_attack_svm.py b/art/attacks/poisoning/poisoning_attack_svm.py index 220f10da89..2b4fed5a6b 100644 --- a/art/attacks/poisoning/poisoning_attack_svm.py +++ b/art/attacks/poisoning/poisoning_attack_svm.py @@ -21,7 +21,6 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Optional, Tuple import numpy as np from tqdm.auto import tqdm @@ -80,7 +79,7 @@ def __init__( :raises `NotImplementedError`, `TypeError`: If the argument classifier has the wrong type. :param verbose: Show progress bars. """ - # pylint: disable=W0212 + from sklearn.svm import LinearSVC, SVC super().__init__(classifier=classifier) @@ -104,7 +103,7 @@ def __init__( self.verbose = verbose self._check_params() - def poison(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> Tuple[np.ndarray, np.ndarray]: + def poison(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> tuple[np.ndarray, np.ndarray]: """ Iteratively finds optimal attack points starting at values at `x`. @@ -152,7 +151,7 @@ def generate_attack_point(self, x_attack: np.ndarray, y_attack: np.ndarray) -> n :param y_attack: The initial attack label. :return: A tuple containing the final attack point and the poisoned model. """ - # pylint: disable=W0212 + from sklearn.preprocessing import normalize if self.y_train is None or self.x_train is None: @@ -197,7 +196,7 @@ def predict_sign(self, vec: np.ndarray) -> np.ndarray: :param vec: An input array. :return: An array of -1/1 predictions. """ - # pylint: disable=W0212 + preds = self.estimator.model.predict(vec) return 2 * preds - 1 @@ -210,7 +209,7 @@ def attack_gradient(self, attack_point: np.ndarray, tol: float = 0.0001) -> np.n :param tol: Tolerance level. :return: The attack gradient. """ - # pylint: disable=W0212 + if self.x_val is None or self.y_val is None: # pragma: no cover raise ValueError("The values of `x_val` and `y_val` are required for computing the gradients.") diff --git a/art/attacks/poisoning/sleeper_agent_attack.py b/art/attacks/poisoning/sleeper_agent_attack.py index 3202cf013f..2de9821ab9 100644 --- a/art/attacks/poisoning/sleeper_agent_attack.py +++ b/art/attacks/poisoning/sleeper_agent_attack.py @@ -20,10 +20,10 @@ | Paper link: https://arxiv.org/abs/2106.08970 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Tuple, TYPE_CHECKING, List, Union +from typing import TYPE_CHECKING import random import numpy as np @@ -37,7 +37,7 @@ if TYPE_CHECKING: - # pylint: disable=C0412 + from art.utils import CLASSIFIER_NEURALNETWORK_TYPE logger = logging.getLogger(__name__) @@ -55,13 +55,13 @@ def __init__( classifier: "CLASSIFIER_NEURALNETWORK_TYPE", percent_poison: float, patch: np.ndarray, - indices_target: List[int], + indices_target: list[int], epsilon: float = 0.1, max_trials: int = 8, max_epochs: int = 250, - learning_rate_schedule: Tuple[List[float], List[int]] = ([1e-1, 1e-2, 1e-3, 1e-4], [100, 150, 200, 220]), + learning_rate_schedule: tuple[list[float], list[int]] = ([1e-1, 1e-2, 1e-3, 1e-4], [100, 150, 200, 220]), batch_size: int = 128, - clip_values: Tuple[float, float] = (0, 1.0), + clip_values: tuple[float, float] = (0, 1.0), verbose: int = 1, patching_strategy: str = "random", selection_strategy: str = "random", @@ -84,7 +84,7 @@ def __init__( :param max_trials: The maximum number of restarts to optimize the poison. :param max_epochs: The maximum number of epochs to optimize the train per trial. :param learning_rate_schedule: The learning rate schedule to optimize the poison. - A List of (learning rate, epoch) pairs. The learning rate is used + A list of (learning rate, epoch) pairs. The learning rate is used if the current epoch is less than the specified epoch. :param batch_size: Batch size. :param clip_values: The range of the input features to the classifier. @@ -134,7 +134,6 @@ def __init__( self.initial_epoch = 0 self.retrain_batch_size = retrain_batch_size - # pylint: disable=W0221 def poison( # type: ignore self, x_trigger: np.ndarray, @@ -143,7 +142,7 @@ def poison( # type: ignore y_train: np.ndarray, x_test: np.ndarray, y_test: np.ndarray, - ) -> Tuple[np.ndarray, np.ndarray]: + ) -> tuple[np.ndarray, np.ndarray]: """ Optimizes a portion of poisoned samples from x_train to make a model classify x_target as y_target by matching the gradients. @@ -189,7 +188,7 @@ def poison( # type: ignore num_poison_samples = int(self.percent_poison * len(x_train_target_samples)) # Try poisoning num_trials times and choose the best one. - best_B = np.finfo(np.float32).max # pylint: disable=C0103 + best_B = np.finfo(np.float32).max # pylint: disable=invalid-name best_x_poisoned: np.ndarray best_indices_poison: np.ndarray @@ -214,24 +213,24 @@ def poison( # type: ignore self.max_epochs = retrain_epochs for i in range(self.retraining_factor): if i == self.retraining_factor - 1: - x_poisoned, B_ = poisoner(x_poison, y_poison) # pylint: disable=C0103 + x_poisoned, B_ = poisoner(x_poison, y_poison) # pylint: disable=invalid-name else: - x_poisoned, B_ = poisoner(x_poison, y_poison) # pylint: disable=C0103 + x_poisoned, B_ = poisoner(x_poison, y_poison) # pylint: disable=invalid-name self._model_retraining(x_poisoned, x_train, y_train, x_test, y_test) self.initial_epoch = self.max_epochs self.max_epochs = self.max_epochs + retrain_epochs else: - x_poisoned, B_ = poisoner(x_poison, y_poison) # pylint: disable=C0103 + x_poisoned, B_ = poisoner(x_poison, y_poison) # pylint: disable=invalid-name finish_poisoning() - B_ = np.mean(B_) # Averaging B losses from multiple batches. # pylint: disable=C0103 + B_ = np.mean(B_) # Averaging B losses from multiple batches. # pylint: disable=invalid-name if B_ < best_B: - best_B = B_ # pylint: disable=C0103 + best_B = B_ # pylint: disable=invalid-name best_x_poisoned = x_poisoned best_indices_poison = self.indices_poison if best_B == np.finfo(np.float32).max: logger.warning("Attack unsuccessful: all loss values were non-finite. Defaulting to final trial.") - best_B = B_ # pylint: disable=C0103 + best_B = B_ # pylint: disable=invalid-name best_x_poisoned = x_poisoned best_indices_poison = self.indices_poison # set indices_poison to be the best indices after all trials @@ -258,7 +257,7 @@ def poison( # type: ignore raise NotImplementedError("SleeperAgentAttack is currently implemented only for PyTorch and TensorFlowV2.") return x_train, y_train - def _select_target_train_samples(self, x_train: np.ndarray, y_train: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + def _select_target_train_samples(self, x_train: np.ndarray, y_train: np.ndarray) -> tuple[np.ndarray, np.ndarray]: """ Used for selecting train samples from target class :param x_train: clean training data @@ -341,7 +340,7 @@ def _create_model( y_test: np.ndarray, batch_size: int = 128, epochs: int = 80, - ) -> Union["TensorFlowV2Classifier", "PyTorchClassifier"]: + ) -> "TensorFlowV2Classifier" | "PyTorchClassifier": """ Creates a new model. @@ -418,7 +417,7 @@ def _select_poison_indices( for i in range(len(x_samples) - 1): image = tf.constant(x_samples[i : i + 1]) label = tf.constant(y_samples[i : i + 1]) - with tf.GradientTape() as t: # pylint: disable=C0103 + with tf.GradientTape() as t: # pylint: disable=invalid-name t.watch(classifier.model.weights) output = classifier.model(image, training=False) loss_tf = classifier.loss_object(label, output) # type: ignore diff --git a/art/config.py b/art/config.py index b0740387a6..b56a8ee9c2 100644 --- a/art/config.py +++ b/art/config.py @@ -28,14 +28,14 @@ # ------------------------------------------------------------------------------------------------- CONSTANTS AND TYPES -ART_NUMPY_DTYPE = np.float32 # pylint: disable=C0103 +ART_NUMPY_DTYPE = np.float32 # pylint: disable=invalid-name ART_DATA_PATH: str # --------------------------------------------------------------------------------------------- DEFAULT PACKAGE CONFIGS _folder = os.path.expanduser("~") if not os.access(_folder, os.W_OK): # pragma: no cover - _folder = "/tmp" # pylint: disable=C0103 + _folder = "/tmp" # pylint: disable=invalid-name _folder = os.path.join(_folder, ".art") @@ -50,7 +50,7 @@ def set_data_path(path): if not os.access(expanded_path, os.W_OK): # pragma: no cover logger.warning("path %s is read only", expanded_path) - global ART_DATA_PATH # pylint: disable=W0603 + global ART_DATA_PATH # pylint: disable=global-statement ART_DATA_PATH = expanded_path logger.info("set ART_DATA_PATH to %s", expanded_path) diff --git a/art/data_generators.py b/art/data_generators.py index dacc06eafe..7be00cb2cc 100644 --- a/art/data_generators.py +++ b/art/data_generators.py @@ -22,12 +22,12 @@ their own generators following the :class:`.DataGenerator` interface. For large, numpy array-based datasets, the :class:`.NumpyDataGenerator` class can be flexibly used with `fit_generator` on framework-specific classifiers. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import abc import inspect import logging -from typing import Any, Dict, Generator, Iterator, Optional, Tuple, Union, TYPE_CHECKING +from typing import Any, Generator, Iterator, TYPE_CHECKING import numpy as np @@ -45,7 +45,7 @@ class DataGenerator(abc.ABC): Base class for data generators. """ - def __init__(self, size: Optional[int], batch_size: int) -> None: + def __init__(self, size: int | None, batch_size: int) -> None: """ Base initializer for data generators. @@ -63,7 +63,7 @@ def __init__(self, size: Optional[int], batch_size: int) -> None: if size is not None and batch_size > size: raise ValueError("The batch size must be smaller than the dataset size.") - self._iterator: Optional[Any] = None + self._iterator: Any | None = None @abc.abstractmethod def get_batch(self) -> tuple: @@ -90,7 +90,7 @@ def batch_size(self) -> int: return self._batch_size @property - def size(self) -> Optional[int]: + def size(self) -> int | None: """ :return: Return the dataset size. """ @@ -180,14 +180,14 @@ class KerasDataGenerator(DataGenerator): def __init__( self, - iterator: Union[ - "keras.utils.Sequence", - "tf.keras.utils.Sequence", - "keras.preprocessing.image.ImageDataGenerator", - "tf.keras.preprocessing.image.ImageDataGenerator", - Generator, - ], - size: Optional[int], + iterator: ( + "keras.utils.Sequence" + | "tf.keras.utils.Sequence" + | "keras.preprocessing.image.ImageDataGenerator" + | "tf.keras.preprocessing.image.ImageDataGenerator" + | Generator + ), + size: int | None, batch_size: int, ) -> None: """ @@ -309,7 +309,7 @@ def __init__( sess: "tf.Session", iterator: "tf.data.Iterator", iterator_type: str, - iterator_arg: Union[Dict, Tuple, "tf.Operation"], + iterator_arg: dict | tuple | "tf.Operation", size: int, batch_size: int, ) -> None: @@ -325,7 +325,7 @@ def __init__( :param batch_size: Size of the minibatches. :raises `TypeError`, `ValueError`: If input parameters are not valid. """ - # pylint: disable=E0401 + import tensorflow.compat.v1 as tf super().__init__(size=size, batch_size=batch_size) @@ -394,7 +394,7 @@ def __init__(self, iterator: "tf.data.Dataset", size: int, batch_size: int) -> N :param batch_size: Size of the minibatches. :raises `TypeError`, `ValueError`: If input parameters are not valid. """ - # pylint: disable=E0401 + import tensorflow as tf super().__init__(size=size, batch_size=batch_size) diff --git a/art/defences/detector/evasion/binary_activation_detector.py b/art/defences/detector/evasion/binary_activation_detector.py index 9c3201f49d..27a6e4457d 100644 --- a/art/defences/detector/evasion/binary_activation_detector.py +++ b/art/defences/detector/evasion/binary_activation_detector.py @@ -19,10 +19,10 @@ Module containing different methods for the detection of adversarial examples. All models are considered to be binary detectors. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -46,7 +46,7 @@ def __init__( self, classifier: "CLASSIFIER_NEURALNETWORK_TYPE", detector: "CLASSIFIER_NEURALNETWORK_TYPE", - layer: Union[int, str], + layer: int | str, ) -> None: """ Create a `BinaryActivationDetector` instance which performs binary classification on activation information. @@ -75,9 +75,7 @@ def __init__( raise ValueError(f"Layer name {layer} is not part of the graph.") self._layer_name = layer - def _get_activations( - self, x: np.ndarray, layer: Union[int, str], batch_size: int, framework: bool = False - ) -> np.ndarray: + def _get_activations(self, x: np.ndarray, layer: int | str, batch_size: int, framework: bool = False) -> np.ndarray: x_activations = self.classifier.get_activations(x, layer, batch_size, framework) if x_activations is None: raise ValueError("Classifier activations are null.") @@ -100,7 +98,7 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in x_activations: np.ndarray = self._get_activations(x, self._layer_name, batch_size) self.detector.fit(x_activations, y, batch_size=batch_size, nb_epochs=nb_epochs, **kwargs) - def detect(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> Tuple[dict, np.ndarray]: + def detect(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> tuple[dict, np.ndarray]: """ Perform detection of adversarial data and return prediction as tuple. diff --git a/art/defences/detector/evasion/binary_input_detector.py b/art/defences/detector/evasion/binary_input_detector.py index 8933f11050..f1990238ca 100644 --- a/art/defences/detector/evasion/binary_input_detector.py +++ b/art/defences/detector/evasion/binary_input_detector.py @@ -22,7 +22,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -63,7 +63,7 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in """ self.detector.fit(x, y, batch_size=batch_size, nb_epochs=nb_epochs, **kwargs) - def detect(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> Tuple[dict, np.ndarray]: + def detect(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> tuple[dict, np.ndarray]: """ Perform detection of adversarial data and return prediction as tuple. diff --git a/art/defences/detector/evasion/evasion_detector.py b/art/defences/detector/evasion/evasion_detector.py index aceb5e7c05..db6b82c970 100644 --- a/art/defences/detector/evasion/evasion_detector.py +++ b/art/defences/detector/evasion/evasion_detector.py @@ -21,7 +21,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import abc -from typing import Any, Dict, List, Tuple +from typing import Any import numpy as np @@ -31,7 +31,7 @@ class EvasionDetector(abc.ABC): Abstract base class for all evasion detectors. """ - defence_params: List[str] = [] + defence_params: list[str] = [] def __init__(self) -> None: """ @@ -53,7 +53,7 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in raise NotImplementedError @abc.abstractmethod - def detect(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> Tuple[dict, np.ndarray]: + def detect(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> tuple[dict, np.ndarray]: """ Perform detection of adversarial data and return prediction as tuple. @@ -77,7 +77,7 @@ def set_params(self, **kwargs) -> None: setattr(self, key, value) self._check_params() - def get_params(self) -> Dict[str, Any]: + def get_params(self) -> dict[str, Any]: """ Returns dictionary of parameters used to run defence. diff --git a/art/defences/detector/evasion/subsetscanning/detector.py b/art/defences/detector/evasion/subsetscanning/detector.py index 54a46ec6f6..da1e64f324 100644 --- a/art/defences/detector/evasion/subsetscanning/detector.py +++ b/art/defences/detector/evasion/subsetscanning/detector.py @@ -19,11 +19,11 @@ This module implements the fast generalized subset scan based detector. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging import sys -from typing import Optional, Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from sklearn import metrics @@ -57,7 +57,7 @@ def __init__( self, classifier: "CLASSIFIER_NEURALNETWORK_TYPE", bgd_data: np.ndarray, - layer: Union[int, str], + layer: int | str, scoring_function: Literal["BerkJones", "HigherCriticism", "KolmarovSmirnov"] = "BerkJones", verbose: bool = True, ) -> None: @@ -114,9 +114,7 @@ def __init__( bgd_scores.append(best_score) self.bgd_scores = np.asarray(bgd_scores) - def _get_activations( - self, x: np.ndarray, layer: Union[int, str], batch_size: int, framework: bool = False - ) -> np.ndarray: + def _get_activations(self, x: np.ndarray, layer: int | str, batch_size: int, framework: bool = False) -> np.ndarray: x_activations = self.classifier.get_activations(x, layer, batch_size, framework) if x_activations is None: raise ValueError("Classifier activations are null.") @@ -161,10 +159,10 @@ def scan( self, clean_x: np.ndarray, adv_x: np.ndarray, - clean_size: Optional[int] = None, - adv_size: Optional[int] = None, + clean_size: int | None = None, + adv_size: int | None = None, run: int = 10, - ) -> Tuple[np.ndarray, np.ndarray, float]: + ) -> tuple[np.ndarray, np.ndarray, float]: """ Returns scores of highest scoring subsets. @@ -226,7 +224,7 @@ def scan( return clean_scores_array, adv_scores_array, detection_power - def detect(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> Tuple[dict, np.ndarray]: + def detect(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> tuple[dict, np.ndarray]: """ Perform detection of adversarial data and return prediction as tuple. diff --git a/art/defences/detector/evasion/subsetscanning/scanner.py b/art/defences/detector/evasion/subsetscanning/scanner.py index 5d70ee099d..0481a9069b 100644 --- a/art/defences/detector/evasion/subsetscanning/scanner.py +++ b/art/defences/detector/evasion/subsetscanning/scanner.py @@ -18,7 +18,7 @@ """ Subset scanning based on FGSS """ -from typing import Callable, Tuple +from collections.abc import Callable import numpy as np @@ -38,7 +38,7 @@ def fgss_individ_for_nets( pvalues: np.ndarray, a_max: float = 0.5, score_function: Callable[[np.ndarray, np.ndarray, np.ndarray], np.ndarray] = ScoringFunctions.get_score_bj_fast, - ) -> Tuple[float, np.ndarray, np.ndarray, float]: + ) -> tuple[float, np.ndarray, np.ndarray, float]: """ Finds the highest scoring subset of records and attribute. Return the subsets, the score, and the alpha that maximizes the score. @@ -91,7 +91,7 @@ def fgss_for_nets( restarts: int = 10, image_to_node_init: bool = False, score_function: Callable[[np.ndarray, np.ndarray, np.ndarray], np.ndarray] = ScoringFunctions.get_score_bj_fast, - ) -> Tuple[float, np.ndarray, np.ndarray, float]: + ) -> tuple[float, np.ndarray, np.ndarray, float]: """ Finds the highest scoring subset of records and attribute. Return the subsets, the score, and the alpha that maximizes the score iterates between images and nodes, each time performing NPSS efficient maximization. diff --git a/art/defences/detector/evasion/subsetscanning/scanningops.py b/art/defences/detector/evasion/subsetscanning/scanningops.py index e8d770ba85..e47bec0a70 100644 --- a/art/defences/detector/evasion/subsetscanning/scanningops.py +++ b/art/defences/detector/evasion/subsetscanning/scanningops.py @@ -18,7 +18,7 @@ """ Scanning operations """ -from typing import Callable, Tuple +from collections.abc import Callable import numpy as np @@ -34,7 +34,7 @@ def optimize_in_single_dimension( a_max: float, image_to_node: bool, score_function: Callable[[np.ndarray, np.ndarray, np.ndarray], np.ndarray], - ) -> Tuple[float, np.ndarray, float]: + ) -> tuple[float, np.ndarray, float]: """ Optimizes over all subsets of nodes for a given subset of images or over all subsets of images for a given subset of nodes. @@ -140,7 +140,7 @@ def single_restart( indices_of_seeds: np.ndarray, image_to_node: bool, score_function: Callable[[np.ndarray, np.ndarray, np.ndarray], np.ndarray], - ) -> Tuple[float, np.ndarray, np.ndarray, float]: + ) -> tuple[float, np.ndarray, np.ndarray, float]: """ Here we control the iteration between images->nodes and nodes->images. It starts with a fixed subset of nodes by default. diff --git a/art/defences/detector/poison/activation_defence.py b/art/defences/detector/poison/activation_defence.py index cf99dd30c2..17a17ef00f 100644 --- a/art/defences/detector/poison/activation_defence.py +++ b/art/defences/detector/poison/activation_defence.py @@ -24,13 +24,13 @@ defence, see https://arxiv.org/abs/1905.13409 . For details on how to evaluate classifier security in general, see https://arxiv.org/abs/1902.06705 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging import os import pickle import time -from typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING +from typing import Any, TYPE_CHECKING import numpy as np @@ -81,8 +81,8 @@ def __init__( classifier: "CLASSIFIER_NEURALNETWORK_TYPE", x_train: np.ndarray, y_train: np.ndarray, - generator: Optional[DataGenerator] = None, - ex_re_threshold: Optional[float] = None, + generator: DataGenerator | None = None, + ex_re_threshold: float | None = None, ) -> None: """ Create an :class:`.ActivationDefence` object with the provided classifier. @@ -101,15 +101,15 @@ def __init__( self.reduce = "PCA" self.cluster_analysis = "smaller" self.generator = generator - self.activations_by_class: List[np.ndarray] = [] - self.clusters_by_class: List[np.ndarray] = [] + self.activations_by_class: list[np.ndarray] = [] + self.clusters_by_class: list[np.ndarray] = [] self.assigned_clean_by_class: np.ndarray - self.is_clean_by_class: List[np.ndarray] = [] + self.is_clean_by_class: list[np.ndarray] = [] self.errors_by_class: np.ndarray - self.red_activations_by_class: List[np.ndarray] = [] # Activations reduced by class + self.red_activations_by_class: list[np.ndarray] = [] # Activations reduced by class self.evaluator = GroundTruthEvaluator() - self.is_clean_lst: List[int] = [] - self.confidence_level: List[float] = [] + self.is_clean_lst: list[int] = [] + self.confidence_level: list[float] = [] self.poisonous_clusters: np.ndarray self.clusterer = MiniBatchKMeans(n_clusters=self.nb_clusters) self.ex_re_threshold = ex_re_threshold @@ -163,8 +163,7 @@ def evaluate_defence(self, is_clean: np.ndarray, **kwargs) -> str: ) return conf_matrix_json - # pylint: disable=W0221 - def detect_poison(self, **kwargs) -> Tuple[Dict[str, Any], List[int]]: + def detect_poison(self, **kwargs) -> tuple[dict[str, Any], list[int]]: """ Returns poison detected and a report. @@ -242,7 +241,7 @@ def detect_poison(self, **kwargs) -> Tuple[Dict[str, Any], List[int]]: return report, self.is_clean_lst - def cluster_activations(self, **kwargs) -> Tuple[List[np.ndarray], List[np.ndarray]]: + def cluster_activations(self, **kwargs) -> tuple[list[np.ndarray], list[np.ndarray]]: """ Clusters activations and returns cluster_by_class and red_activations_by_class, where cluster_by_class[i][j] is the cluster to which the j-th data point in the ith class belongs and the correspondent activations reduced by @@ -306,7 +305,7 @@ class red_activations_by_class[i][j]. return self.clusters_by_class, self.red_activations_by_class - def analyze_clusters(self, **kwargs) -> Tuple[Dict[str, Any], np.ndarray]: + def analyze_clusters(self, **kwargs) -> tuple[dict[str, Any], np.ndarray]: """ This function analyzes the clusters according to the provided method. @@ -358,7 +357,7 @@ def analyze_clusters(self, **kwargs) -> Tuple[Dict[str, Any], np.ndarray]: return report, self.assigned_clean_by_class - def exclusionary_reclassification(self, report: Dict[str, Any]): + def exclusionary_reclassification(self, report: dict[str, Any]): """ This function perform exclusionary reclassification. Based on the ex_re_threshold, suspicious clusters will be rechecked. If they remain suspicious, the suspected source @@ -392,7 +391,7 @@ class will be added to the report and the data will be relabelled. The new label # Test on the suspicious clusters n_train = len(self.x_train) indices_by_class = self._segment_by_class(np.arange(n_train), self.y_train) - indicies_by_cluster: List[List[List]] = [ + indicies_by_cluster: list[list[list]] = [ [[] for _ in range(self.nb_clusters)] for _ in range(self.classifier.nb_classes) ] @@ -447,7 +446,7 @@ def relabel_poison_ground_truth( tolerable_backdoor: float = 0.01, max_epochs: int = 50, batch_epochs: int = 10, - ) -> Tuple[float, "CLASSIFIER_NEURALNETWORK_TYPE"]: + ) -> tuple[float, "CLASSIFIER_NEURALNETWORK_TYPE"]: """ Revert poison attack by continue training the current classifier with `x`, `y_fix`. `test_set_split` determines the percentage in x that will be used as training set, while `1-test_set_split` determines how many data points @@ -501,7 +500,7 @@ def relabel_poison_cross_validation( tolerable_backdoor: float = 0.01, max_epochs: int = 50, batch_epochs: int = 10, - ) -> Tuple[float, "CLASSIFIER_NEURALNETWORK_TYPE"]: + ) -> tuple[float, "CLASSIFIER_NEURALNETWORK_TYPE"]: """ Revert poison attack by continue training the current classifier with `x`, `y_fix`. `n_splits` determines the number of cross validation splits. @@ -515,7 +514,7 @@ def relabel_poison_cross_validation( :param batch_epochs: Number of epochs to be trained before checking current state of model. :return: (improve_factor, classifier) """ - # pylint: disable=E0001 + from sklearn.model_selection import KFold # Train using cross validation @@ -594,7 +593,7 @@ def _remove_pickle(file_name: str) -> None: def visualize_clusters( self, x_raw: np.ndarray, save: bool = True, folder: str = ".", **kwargs - ) -> List[List[np.ndarray]]: + ) -> list[list[np.ndarray]]: """ This function creates the sprite/mosaic visualization for clusters. When save=True, it also stores a sprite (mosaic) per cluster in art.config.ART_DATA_PATH. @@ -612,7 +611,7 @@ def visualize_clusters( self.cluster_activations() x_raw_by_class = self._segment_by_class(x_raw, self.y_train) - x_raw_by_cluster: List[List[np.ndarray]] = [ # type: ignore + x_raw_by_cluster: list[list[np.ndarray]] = [ # type: ignore [[] for _ in range(self.nb_clusters)] for _ in range(self.classifier.nb_classes) # type: ignore ] @@ -622,7 +621,7 @@ def visualize_clusters( x_raw_by_cluster[n_class][assigned_cluster].append(x_raw_by_class[n_class][j]) # Now create sprites: - sprites_by_class: List[List[np.ndarray]] = [ # type: ignore + sprites_by_class: list[list[np.ndarray]] = [ # type: ignore [[] for _ in range(self.nb_clusters)] for _ in range(self.classifier.nb_classes) # type: ignore ] for i, class_i in enumerate(x_raw_by_cluster): @@ -682,7 +681,7 @@ def _check_params(self): if self.ex_re_threshold is not None and self.ex_re_threshold <= 0: raise ValueError("Exclusionary reclassification threshold must be positive") - def _get_activations(self, x_train: Optional[np.ndarray] = None) -> np.ndarray: + def _get_activations(self, x_train: np.ndarray | None = None) -> np.ndarray: """ Find activations from :class:`.Classifier`. """ @@ -716,7 +715,7 @@ def _get_activations(self, x_train: Optional[np.ndarray] = None) -> np.ndarray: ) return activations - def _segment_by_class(self, data: np.ndarray, features: np.ndarray) -> List[np.ndarray]: + def _segment_by_class(self, data: np.ndarray, features: np.ndarray) -> list[np.ndarray]: """ Returns segmented data according to specified features. @@ -783,14 +782,14 @@ def train_remove_backdoor( def cluster_activations( - separated_activations: List[np.ndarray], + separated_activations: list[np.ndarray], nb_clusters: int = 2, nb_dims: int = 10, reduce: str = "FastICA", clustering_method: str = "KMeans", - generator: Optional[DataGenerator] = None, - clusterer_new: Optional[MiniBatchKMeans] = None, -) -> Tuple[List[np.ndarray], List[np.ndarray]]: + generator: DataGenerator | None = None, + clusterer_new: MiniBatchKMeans | None = None, +) -> tuple[list[np.ndarray], list[np.ndarray]]: """ Clusters activations and returns two arrays. 1) separated_clusters: where separated_clusters[i] is a 1D array indicating which cluster each data point @@ -852,7 +851,7 @@ def reduce_dimensionality(activations: np.ndarray, nb_dims: int = 10, reduce: st :param reduce: Method to perform dimensionality reduction, default is FastICA. :return: Array with the reduced activations. """ - # pylint: disable=E0001 + from sklearn.decomposition import FastICA, PCA if reduce == "FastICA": diff --git a/art/defences/detector/poison/clustering_analyzer.py b/art/defences/detector/poison/clustering_analyzer.py index 27b6855d94..ddef6184a2 100644 --- a/art/defences/detector/poison/clustering_analyzer.py +++ b/art/defences/detector/poison/clustering_analyzer.py @@ -21,7 +21,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Any, Dict, List, Tuple +from typing import Any import numpy as np @@ -48,11 +48,11 @@ def assign_class(clusters: np.ndarray, clean_clusters: np.ndarray, poison_cluste assigned_clean[np.isin(clusters, poison_clusters)] = 0 return assigned_clean - def analyze_by_size(self, separated_clusters: List[np.ndarray]) -> Tuple[np.ndarray, np.ndarray, Dict[str, int]]: + def analyze_by_size(self, separated_clusters: list[np.ndarray]) -> tuple[np.ndarray, np.ndarray, dict[str, int]]: """ - Designates as poisonous the cluster with less number of items on it. + Designates as poisonous the cluster with less items on it. - :param separated_clusters: list where separated_clusters[i] is the cluster assignments for the ith class. + :param separated_clusters: List where separated_clusters[i] is the cluster assignments for the ith class. :return: all_assigned_clean, summary_poison_clusters, report: where all_assigned_clean[i] is a 1D boolean array indicating whether a given data point was determined to be clean (as opposed to poisonous) and @@ -60,7 +60,7 @@ def analyze_by_size(self, separated_clusters: List[np.ndarray]) -> Tuple[np.ndar classified as poison, otherwise 0 report: Dictionary with summary of the analysis """ - report: Dict[str, Any] = { + report: dict[str, Any] = { "cluster_analysis": "smaller", "suspicious_clusters": 0, } @@ -92,7 +92,7 @@ def analyze_by_size(self, separated_clusters: List[np.ndarray]) -> Tuple[np.ndar susp = cluster_id in poison_clusters dict_i = dict(ptc_data_in_cluster=round(ptc, 2), suspicious_cluster=susp) - dict_cluster: Dict[str, Dict[str, int]] = {"cluster_" + str(cluster_id): dict_i} + dict_cluster: dict[str, dict[str, int]] = {"cluster_" + str(cluster_id): dict_i} report_class.update(dict_cluster) report["Class_" + str(i)] = report_class @@ -102,23 +102,23 @@ def analyze_by_size(self, separated_clusters: List[np.ndarray]) -> Tuple[np.ndar def analyze_by_distance( self, - separated_clusters: List[np.ndarray], - separated_activations: List[np.ndarray], - ) -> Tuple[np.ndarray, np.ndarray, Dict[str, int]]: + separated_clusters: list[np.ndarray], + separated_activations: list[np.ndarray], + ) -> tuple[np.ndarray, np.ndarray, dict[str, int]]: """ Assigns a cluster as poisonous if its median activation is closer to the median activation for another class than it is to the median activation of its own class. Currently, this function assumes there are only two clusters per class. - :param separated_clusters: list where separated_clusters[i] is the cluster assignments for the ith class. - :param separated_activations: list where separated_activations[i] is a 1D array of [0,1] for [poison,clean]. + :param separated_clusters: List where separated_clusters[i] is the cluster assignments for the ith class. + :param separated_activations: List where separated_activations[i] is a 1D array of [0,1] for [poison,clean]. :return: all_assigned_clean, summary_poison_clusters, report: where all_assigned_clean[i] is a 1D boolean array indicating whether a given data point was determined to be clean (as opposed to poisonous) and summary_poison_clusters: array, where summary_poison_clusters[i][j]=1 if cluster j of class i was classified as poison, otherwise 0 report: Dictionary with summary of the analysis. """ - report: Dict[str, Any] = {"cluster_analysis": 0.0} + report: dict[str, Any] = {"cluster_analysis": 0.0} all_assigned_clean = [] cluster_centers = [] @@ -191,10 +191,10 @@ def analyze_by_distance( def analyze_by_relative_size( self, - separated_clusters: List[np.ndarray], + separated_clusters: list[np.ndarray], size_threshold: float = 0.35, r_size: int = 2, - ) -> Tuple[np.ndarray, np.ndarray, Dict[str, int]]: + ) -> tuple[np.ndarray, np.ndarray, dict[str, int]]: """ Assigns a cluster as poisonous if the smaller one contains less than threshold of the data. This method assumes only 2 clusters @@ -209,7 +209,7 @@ def analyze_by_relative_size( report: Dictionary with summary of the analysis. """ size_threshold = round(size_threshold, r_size) - report: Dict[str, Any] = { + report: dict[str, Any] = { "cluster_analysis": "relative_size", "suspicious_clusters": 0, "size_threshold": size_threshold, @@ -261,7 +261,7 @@ def analyze_by_silhouette_score( silhouette_threshold: float = 0.1, r_size: int = 2, r_silhouette: int = 4, - ) -> Tuple[np.ndarray, np.ndarray, Dict[str, int]]: + ) -> tuple[np.ndarray, np.ndarray, dict[str, int]]: """ Analyzes clusters to determine level of suspiciousness of poison based on the cluster's relative size and silhouette score. @@ -274,8 +274,8 @@ def analyze_by_silhouette_score( the silhouette score is higher than silhouette_threshold, the cluster is classified as poisonous. If the above thresholds are not provided, the default ones will be used. - :param separated_clusters: list where `separated_clusters[i]` is the cluster assignments for the ith class. - :param reduced_activations_by_class: list where separated_activations[i] is a 1D array of [0,1] for + :param separated_clusters: List where `separated_clusters[i]` is the cluster assignments for the ith class. + :param reduced_activations_by_class: List where separated_activations[i] is a 1D array of [0,1] for [poison,clean]. :param size_threshold: (optional) threshold used to define when a cluster is substantially smaller. A default value is used if the parameter is not provided. @@ -289,12 +289,12 @@ def analyze_by_silhouette_score( summary_poison_clusters[i][j]=1 if cluster j of class j was classified as poison report: Dictionary with summary of the analysis. """ - # pylint: disable=E0001 + from sklearn.metrics import silhouette_score size_threshold = round(size_threshold, r_size) silhouette_threshold = round(silhouette_threshold, r_silhouette) - report: Dict[str, Any] = { + report: dict[str, Any] = { "cluster_analysis": "silhouette_score", "size_threshold": str(size_threshold), "silhouette_threshold": str(silhouette_threshold), @@ -314,7 +314,7 @@ def analyze_by_silhouette_score( # Generate report for class silhouette_avg = round(silhouette_score(activations, clusters), r_silhouette) - dict_i: Dict[str, Any] = dict( + dict_i: dict[str, Any] = dict( sizes_clusters=str(bins), ptc_cluster=str(percentages), avg_silhouette_score=str(silhouette_avg), @@ -335,7 +335,7 @@ def analyze_by_silhouette_score( # If relative size of the clusters is Not suspicious, we conclude it's not suspicious. dict_i.update(suspicious=False) - report_class: Dict[str, Dict[str, bool]] = {"class_" + str(i): dict_i} + report_class: dict[str, dict[str, bool]] = {"class_" + str(i): dict_i} for p_id in poison_clusters[0]: summary_poison_clusters[i][p_id] = 1 diff --git a/art/defences/detector/poison/ground_truth_evaluator.py b/art/defences/detector/poison/ground_truth_evaluator.py index 6baaf7331b..095f52b448 100644 --- a/art/defences/detector/poison/ground_truth_evaluator.py +++ b/art/defences/detector/poison/ground_truth_evaluator.py @@ -22,7 +22,7 @@ import json import logging -from typing import Tuple, Union, List + import numpy as np @@ -40,8 +40,8 @@ def __init__(self): """ def analyze_correctness( - self, assigned_clean_by_class: Union[np.ndarray, List[int], List[np.ndarray]], is_clean_by_class: list - ) -> Tuple[np.ndarray, str]: + self, assigned_clean_by_class: np.ndarray | list[int] | list[np.ndarray], is_clean_by_class: list + ) -> tuple[np.ndarray, str]: """ For each training sample, determine whether the activation clustering method was correct. diff --git a/art/defences/detector/poison/poison_filtering_defence.py b/art/defences/detector/poison/poison_filtering_defence.py index 0d10e7619a..685e2d617d 100644 --- a/art/defences/detector/poison/poison_filtering_defence.py +++ b/art/defences/detector/poison/poison_filtering_defence.py @@ -22,7 +22,7 @@ import abc import sys -from typing import Any, Dict, List, Tuple, TYPE_CHECKING +from typing import Any, TYPE_CHECKING import numpy as np @@ -56,7 +56,7 @@ def __init__(self, classifier: "CLASSIFIER_TYPE", x_train: np.ndarray, y_train: self.y_train = y_train @abc.abstractmethod - def detect_poison(self, **kwargs) -> Tuple[dict, List[int]]: + def detect_poison(self, **kwargs) -> tuple[dict, list[int]]: """ Detect poison. @@ -87,7 +87,7 @@ def set_params(self, **kwargs) -> None: setattr(self, key, value) self._check_params() - def get_params(self) -> Dict[str, Any]: + def get_params(self) -> dict[str, Any]: """ Returns dictionary of parameters used to run defence. diff --git a/art/defences/detector/poison/provenance_defense.py b/art/defences/detector/poison/provenance_defense.py index 5fede17b32..41ffd32d85 100644 --- a/art/defences/detector/poison/provenance_defense.py +++ b/art/defences/detector/poison/provenance_defense.py @@ -24,7 +24,7 @@ import logging from copy import deepcopy -from typing import Dict, List, Optional, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from sklearn.model_selection import train_test_split @@ -64,8 +64,8 @@ def __init__( x_train: np.ndarray, y_train: np.ndarray, p_train: np.ndarray, - x_val: Optional[np.ndarray] = None, - y_val: Optional[np.ndarray] = None, + x_val: np.ndarray | None = None, + y_val: np.ndarray | None = None, eps: float = 0.2, perf_func: str = "accuracy", pp_valid: float = 0.2, @@ -91,11 +91,11 @@ def __init__( self.eps = eps self.perf_func = perf_func self.pp_valid = pp_valid - self.assigned_clean_by_device: List[np.ndarray] = [] - self.is_clean_by_device: List[np.ndarray] = [] - self.errors_by_device: Optional[np.ndarray] = None + self.assigned_clean_by_device: list[np.ndarray] = [] + self.is_clean_by_device: list[np.ndarray] = [] + self.errors_by_device: np.ndarray | None = None self.evaluator = GroundTruthEvaluator() - self.is_clean_lst: Optional[np.ndarray] = None + self.is_clean_lst: np.ndarray | None = None self._check_params() def evaluate_defence(self, is_clean: np.ndarray, **kwargs) -> str: @@ -120,7 +120,7 @@ def evaluate_defence(self, is_clean: np.ndarray, **kwargs) -> str: ) return conf_matrix_json - def detect_poison(self, **kwargs) -> Tuple[Dict[int, float], List[int]]: + def detect_poison(self, **kwargs) -> tuple[dict[int, float], list[int]]: """ Returns poison detected and a report. @@ -148,7 +148,7 @@ def detect_poison(self, **kwargs) -> Tuple[Dict[int, float], List[int]]: return report, self.is_clean_lst # type: ignore - def detect_poison_partially_trusted(self, **kwargs) -> Dict[int, float]: + def detect_poison_partially_trusted(self, **kwargs) -> dict[int, float]: """ Detect poison given trusted validation data @@ -187,7 +187,7 @@ def detect_poison_partially_trusted(self, **kwargs) -> Dict[int, float]: return suspected - def detect_poison_untrusted(self, **kwargs) -> Dict[int, float]: + def detect_poison_untrusted(self, **kwargs) -> dict[int, float]: """ Detect poison given no trusted validation data @@ -236,7 +236,7 @@ def detect_poison_untrusted(self, **kwargs) -> Dict[int, float]: return suspected @staticmethod - def filter_input(data: np.ndarray, labels: np.ndarray, segment: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + def filter_input(data: np.ndarray, labels: np.ndarray, segment: np.ndarray) -> tuple[np.ndarray, np.ndarray]: """ Return the data and labels that are not part of a specified segment diff --git a/art/defences/detector/poison/roni.py b/art/defences/detector/poison/roni.py index 52372d0d53..2171c58b60 100644 --- a/art/defences/detector/poison/roni.py +++ b/art/defences/detector/poison/roni.py @@ -21,11 +21,12 @@ | Paper link: https://people.eecs.berkeley.edu/~tygar/papers/SML/misleading.learners.pdf """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations +from collections.abc import Callable import logging from copy import deepcopy -from typing import Callable, List, Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from sklearn.model_selection import train_test_split @@ -66,7 +67,7 @@ def __init__( y_train: np.ndarray, x_val: np.ndarray, y_val: np.ndarray, - perf_func: Union[str, Callable] = "accuracy", + perf_func: str | Callable = "accuracy", pp_cal: float = 0.2, pp_quiz: float = 0.2, calibrated: bool = True, @@ -99,7 +100,7 @@ def __init__( self.x_val = x_val self.y_val = y_val self.perf_func = perf_func - self.is_clean_lst: List[int] = [] + self.is_clean_lst: list[int] = [] self._check_params() def evaluate_defence(self, is_clean: np.ndarray, **kwargs) -> str: @@ -121,7 +122,7 @@ def evaluate_defence(self, is_clean: np.ndarray, **kwargs) -> str: _, conf_matrix = self.evaluator.analyze_correctness([self.is_clean_lst], [is_clean]) # type: ignore return conf_matrix - def detect_poison(self, **kwargs) -> Tuple[dict, List[int]]: + def detect_poison(self, **kwargs) -> tuple[dict, list[int]]: """ Returns poison detected and a report. @@ -182,7 +183,7 @@ def is_suspicious(self, before_classifier: "CLASSIFIER_TYPE", perf_shift: float) return bool(perf_shift < -self.eps) - def get_calibration_info(self, before_classifier: "CLASSIFIER_TYPE") -> Tuple[float, float]: + def get_calibration_info(self, before_classifier: "CLASSIFIER_TYPE") -> tuple[float, float]: """ Calculate the median and standard deviation of the accuracy shifts caused by the calibration set. diff --git a/art/defences/detector/poison/spectral_signature_defense.py b/art/defences/detector/poison/spectral_signature_defense.py index 8fd44a3200..53fa8f44f8 100644 --- a/art/defences/detector/poison/spectral_signature_defense.py +++ b/art/defences/detector/poison/spectral_signature_defense.py @@ -25,7 +25,7 @@ """ from __future__ import absolute_import, division, print_function, unicode_literals -from typing import List, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -99,7 +99,7 @@ def evaluate_defence(self, is_clean: np.ndarray, **kwargs) -> str: return conf_matrix_json - def detect_poison(self, **kwargs) -> Tuple[dict, List[int]]: + def detect_poison(self, **kwargs) -> tuple[dict, list[int]]: """ Returns poison detected and a report. @@ -131,7 +131,7 @@ def detect_poison(self, **kwargs) -> Tuple[dict, List[int]]: for idx, feature in enumerate(features_split): # Check for empty list - if len(feature): # pylint: disable=C1801 + if len(feature): score = SpectralSignatureDefense.spectral_signature_scores(np.vstack(feature)) # type: ignore score_cutoff = np.quantile(score, max(1 - self.eps_multiplier * self.expected_pp_poison, 0.0)) score_by_class.append(score) diff --git a/art/defences/postprocessor/postprocessor.py b/art/defences/postprocessor/postprocessor.py index 524a9b3a54..9603a5a2cb 100644 --- a/art/defences/postprocessor/postprocessor.py +++ b/art/defences/postprocessor/postprocessor.py @@ -19,7 +19,6 @@ This module implements the abstract base class for defences that post-process classifier output. """ from __future__ import absolute_import, division, print_function, unicode_literals -from typing import List import abc @@ -32,7 +31,7 @@ class Postprocessor(abc.ABC): evaluation for loss gradients or the calculation of class gradients. """ - params: List[str] = [] + params: list[str] = [] def __init__(self, is_fitted: bool = False, apply_fit: bool = True, apply_predict: bool = True) -> None: """ diff --git a/art/defences/preprocessor/cutmix/cutmix.py b/art/defences/preprocessor/cutmix/cutmix.py index 5be82444e4..00d8a67f39 100644 --- a/art/defences/preprocessor/cutmix/cutmix.py +++ b/art/defences/preprocessor/cutmix/cutmix.py @@ -27,7 +27,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Optional, Tuple + import numpy as np from tqdm.auto import tqdm @@ -80,7 +80,7 @@ def __init__( self.verbose = verbose self._check_params() - def __call__(self, x: np.ndarray, y: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]: + def __call__(self, x: np.ndarray, y: np.ndarray | None = None) -> tuple[np.ndarray, np.ndarray | None]: """ Apply CutMix data augmentation to sample `x`. diff --git a/art/defences/preprocessor/cutmix/cutmix_pytorch.py b/art/defences/preprocessor/cutmix/cutmix_pytorch.py index 5cf9e82c0d..3f549a90db 100644 --- a/art/defences/preprocessor/cutmix/cutmix_pytorch.py +++ b/art/defences/preprocessor/cutmix/cutmix_pytorch.py @@ -27,7 +27,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Optional, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import tqdm @@ -35,7 +35,7 @@ from art.defences.preprocessor.preprocessor import PreprocessorPyTorch if TYPE_CHECKING: - # pylint: disable=C0412 + import torch logger = logging.getLogger(__name__) @@ -91,8 +91,8 @@ def __init__( self._check_params() def forward( - self, x: "torch.Tensor", y: Optional["torch.Tensor"] = None - ) -> Tuple["torch.Tensor", Optional["torch.Tensor"]]: + self, x: "torch.Tensor", y: "torch.Tensor" | None = None + ) -> tuple["torch.Tensor", "torch.Tensor" | None]: """ Apply CutMix data augmentation to sample `x`. diff --git a/art/defences/preprocessor/cutmix/cutmix_tensorflow.py b/art/defences/preprocessor/cutmix/cutmix_tensorflow.py index 2ebfeb0673..c4c0a9c227 100644 --- a/art/defences/preprocessor/cutmix/cutmix_tensorflow.py +++ b/art/defences/preprocessor/cutmix/cutmix_tensorflow.py @@ -24,10 +24,10 @@ see https://arxiv.org/abs/1803.09868 . For details on how to evaluate classifier security in general, see https://arxiv.org/abs/1902.06705 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Optional, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import tqdm @@ -35,7 +35,7 @@ from art.defences.preprocessor.preprocessor import PreprocessorTensorFlowV2 if TYPE_CHECKING: - # pylint: disable=C0412 + import tensorflow as tf logger = logging.getLogger(__name__) @@ -83,7 +83,7 @@ def __init__( self.verbose = verbose self._check_params() - def forward(self, x: "tf.Tensor", y: Optional["tf.Tensor"] = None) -> Tuple["tf.Tensor", Optional["tf.Tensor"]]: + def forward(self, x: "tf.Tensor", y: "tf.Tensor" | None = None) -> tuple["tf.Tensor", "tf.Tensor" | None]: """ Apply CutMix data augmentation to sample `x`. @@ -148,8 +148,8 @@ def forward(self, x: "tf.Tensor", y: Optional["tf.Tensor"] = None) -> Tuple["tf. prob = np.random.rand() if prob < self.probability: # uniform sampling - center_y = tf.random.uniform(shape=[], maxval=height, dtype=tf.int32) # pylint: disable=E1123 - center_x = tf.random.uniform(shape=[], maxval=width, dtype=tf.int32) # pylint: disable=E1123 + center_y = tf.random.uniform(shape=[], maxval=height, dtype=tf.int32) + center_x = tf.random.uniform(shape=[], maxval=width, dtype=tf.int32) bby1 = tf.clip_by_value(center_y - cut_height // 2, 0, height) bbx1 = tf.clip_by_value(center_x - cut_width // 2, 0, width) bby2 = tf.clip_by_value(center_y + cut_height // 2, 0, height) diff --git a/art/defences/preprocessor/cutout/cutout.py b/art/defences/preprocessor/cutout/cutout.py index 796cef184b..d8b9f3fca2 100644 --- a/art/defences/preprocessor/cutout/cutout.py +++ b/art/defences/preprocessor/cutout/cutout.py @@ -27,7 +27,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Optional, Tuple + import numpy as np from tqdm.auto import trange @@ -73,7 +73,7 @@ def __init__( self.verbose = verbose self._check_params() - def __call__(self, x: np.ndarray, y: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]: + def __call__(self, x: np.ndarray, y: np.ndarray | None = None) -> tuple[np.ndarray, np.ndarray | None]: """ Apply Cutout data augmentation to sample `x`. diff --git a/art/defences/preprocessor/cutout/cutout_pytorch.py b/art/defences/preprocessor/cutout/cutout_pytorch.py index 40e16222ed..bbcebc86a8 100644 --- a/art/defences/preprocessor/cutout/cutout_pytorch.py +++ b/art/defences/preprocessor/cutout/cutout_pytorch.py @@ -27,14 +27,14 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Optional, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING from tqdm.auto import trange from art.defences.preprocessor.preprocessor import PreprocessorPyTorch if TYPE_CHECKING: - # pylint: disable=C0412 + import torch logger = logging.getLogger(__name__) @@ -84,8 +84,8 @@ def __init__( self._check_params() def forward( - self, x: "torch.Tensor", y: Optional["torch.Tensor"] = None - ) -> Tuple["torch.Tensor", Optional["torch.Tensor"]]: + self, x: "torch.Tensor", y: "torch.Tensor" | None = None + ) -> tuple["torch.Tensor", "torch.Tensor" | None]: """ Apply Cutout data augmentation to sample `x`. diff --git a/art/defences/preprocessor/cutout/cutout_tensorflow.py b/art/defences/preprocessor/cutout/cutout_tensorflow.py index 3b30625406..739477dde7 100644 --- a/art/defences/preprocessor/cutout/cutout_tensorflow.py +++ b/art/defences/preprocessor/cutout/cutout_tensorflow.py @@ -24,15 +24,15 @@ see https://arxiv.org/abs/1803.09868 . For details on how to evaluate classifier security in general, see https://arxiv.org/abs/1902.06705 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Optional, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING from art.defences.preprocessor.preprocessor import PreprocessorTensorFlowV2 if TYPE_CHECKING: - # pylint: disable=C0412 + import tensorflow as tf logger = logging.getLogger(__name__) @@ -74,7 +74,7 @@ def __init__( self.verbose = verbose self._check_params() - def forward(self, x: "tf.Tensor", y: Optional["tf.Tensor"] = None) -> Tuple["tf.Tensor", Optional["tf.Tensor"]]: + def forward(self, x: "tf.Tensor", y: "tf.Tensor" | None = None) -> tuple["tf.Tensor", "tf.Tensor" | None]: """ Apply Cutout data augmentation to sample `x`. diff --git a/art/defences/preprocessor/feature_squeezing.py b/art/defences/preprocessor/feature_squeezing.py index 49b2d01394..b4467499ef 100644 --- a/art/defences/preprocessor/feature_squeezing.py +++ b/art/defences/preprocessor/feature_squeezing.py @@ -27,7 +27,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Optional, Tuple + import numpy as np @@ -71,7 +71,7 @@ def __init__( self.bit_depth = bit_depth self._check_params() - def __call__(self, x: np.ndarray, y: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]: + def __call__(self, x: np.ndarray, y: np.ndarray | None = None) -> tuple[np.ndarray, np.ndarray | None]: """ Apply feature squeezing to sample `x`. diff --git a/art/defences/preprocessor/gaussian_augmentation.py b/art/defences/preprocessor/gaussian_augmentation.py index 92b4f78f78..8ad66eed96 100644 --- a/art/defences/preprocessor/gaussian_augmentation.py +++ b/art/defences/preprocessor/gaussian_augmentation.py @@ -18,10 +18,10 @@ """ This module implements the Gaussian augmentation defence in `GaussianAugmentation`. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Optional, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -56,7 +56,7 @@ def __init__( sigma: float = 1.0, augmentation: bool = True, ratio: float = 1.0, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, apply_fit: bool = True, apply_predict: bool = False, ): @@ -87,7 +87,7 @@ def __init__( self.clip_values = clip_values self._check_params() - def __call__(self, x: np.ndarray, y: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]: + def __call__(self, x: np.ndarray, y: np.ndarray | None = None) -> tuple[np.ndarray, np.ndarray | None]: """ Augment the sample `(x, y)` with Gaussian noise. The result is either an extended dataset containing the original sample, as well as the newly created noisy samples (augmentation=True) or just the noisy counterparts diff --git a/art/defences/preprocessor/inverse_gan.py b/art/defences/preprocessor/inverse_gan.py index c26217afac..0f5436a777 100644 --- a/art/defences/preprocessor/inverse_gan.py +++ b/art/defences/preprocessor/inverse_gan.py @@ -20,10 +20,10 @@ | Paper link: https://arxiv.org/abs/1911.10291 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Optional, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from scipy.optimize import minimize @@ -31,7 +31,7 @@ from art.defences.preprocessor.preprocessor import Preprocessor if TYPE_CHECKING: - # pylint: disable=C0412,R0401 + # pylint: disable=cyclic-import import tensorflow as tf from art.estimators.encoding.tensorflow import TensorFlowEncoder @@ -53,7 +53,7 @@ def __init__( self, sess: "tf.compat.v1.Session", gan: "TensorFlowGenerator", - inverse_gan: Optional["TensorFlowEncoder"], + inverse_gan: "TensorFlowEncoder" | None, apply_fit: bool = False, apply_predict: bool = False, ): @@ -80,9 +80,7 @@ def __init__( self._grad = tf.gradients(self._loss, self.gan.input_ph) self._check_params() - def __call__( - self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs - ) -> Tuple[np.ndarray, Optional[np.ndarray]]: + def __call__(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> tuple[np.ndarray, np.ndarray | None]: """ Applies the :class:`.InverseGAN` defence upon the sample input. diff --git a/art/defences/preprocessor/jpeg_compression.py b/art/defences/preprocessor/jpeg_compression.py index ba894f013b..157de04ca5 100644 --- a/art/defences/preprocessor/jpeg_compression.py +++ b/art/defences/preprocessor/jpeg_compression.py @@ -28,7 +28,7 @@ from io import BytesIO import logging -from typing import Optional, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import tqdm @@ -101,7 +101,7 @@ def _compress(self, x: np.ndarray, mode: str) -> np.ndarray: tmp_jpeg.close() return x_jpeg - def __call__(self, x: np.ndarray, y: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]: + def __call__(self, x: np.ndarray, y: np.ndarray | None = None) -> tuple[np.ndarray, np.ndarray | None]: """ Apply JPEG compression to sample `x`. diff --git a/art/defences/preprocessor/label_smoothing.py b/art/defences/preprocessor/label_smoothing.py index add97946ba..6ceae3de55 100644 --- a/art/defences/preprocessor/label_smoothing.py +++ b/art/defences/preprocessor/label_smoothing.py @@ -27,7 +27,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Optional, Tuple + import numpy as np @@ -68,7 +68,7 @@ def __init__( self.max_value = max_value self._check_params() - def __call__(self, x: np.ndarray, y: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]: + def __call__(self, x: np.ndarray, y: np.ndarray | None = None) -> tuple[np.ndarray, np.ndarray | None]: """ Apply label smoothing. diff --git a/art/defences/preprocessor/mixup/mixup.py b/art/defences/preprocessor/mixup/mixup.py index e329fc8434..ea4bf9b1d6 100644 --- a/art/defences/preprocessor/mixup/mixup.py +++ b/art/defences/preprocessor/mixup/mixup.py @@ -27,7 +27,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Optional, Tuple + import numpy as np @@ -73,7 +73,7 @@ def __init__( self.num_mix = num_mix self._check_params() - def __call__(self, x: np.ndarray, y: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]: + def __call__(self, x: np.ndarray, y: np.ndarray | None = None) -> tuple[np.ndarray, np.ndarray | None]: """ Apply Mixup data augmentation to feature data `x` and labels `y`. diff --git a/art/defences/preprocessor/mixup/mixup_pytorch.py b/art/defences/preprocessor/mixup/mixup_pytorch.py index 63b5dc4487..feb9ba08f9 100644 --- a/art/defences/preprocessor/mixup/mixup_pytorch.py +++ b/art/defences/preprocessor/mixup/mixup_pytorch.py @@ -27,14 +27,14 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Optional, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from art.defences.preprocessor.preprocessor import PreprocessorPyTorch if TYPE_CHECKING: - # pylint: disable=C0412 + import torch logger = logging.getLogger(__name__) @@ -84,8 +84,8 @@ def __init__( self._check_params() def forward( - self, x: "torch.Tensor", y: Optional["torch.Tensor"] = None - ) -> Tuple["torch.Tensor", Optional["torch.Tensor"]]: + self, x: "torch.Tensor", y: "torch.Tensor" | None = None + ) -> tuple["torch.Tensor", "torch.Tensor" | None]: """ Apply Mixup data augmentation to feature data `x` and labels `y`. diff --git a/art/defences/preprocessor/mixup/mixup_tensorflow.py b/art/defences/preprocessor/mixup/mixup_tensorflow.py index ab0a3fe83f..53a1036c5e 100644 --- a/art/defences/preprocessor/mixup/mixup_tensorflow.py +++ b/art/defences/preprocessor/mixup/mixup_tensorflow.py @@ -24,17 +24,17 @@ see https://arxiv.org/abs/1803.09868 . For details on how to evaluate classifier security in general, see https://arxiv.org/abs/1902.06705 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Optional, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from art.defences.preprocessor.preprocessor import PreprocessorTensorFlowV2 if TYPE_CHECKING: - # pylint: disable=C0412 + import tensorflow as tf logger = logging.getLogger(__name__) @@ -77,7 +77,7 @@ def __init__( self.num_mix = num_mix self._check_params() - def forward(self, x: "tf.Tensor", y: Optional["tf.Tensor"] = None) -> Tuple["tf.Tensor", Optional["tf.Tensor"]]: + def forward(self, x: "tf.Tensor", y: "tf.Tensor" | None = None) -> tuple["tf.Tensor", "tf.Tensor" | None]: """ Apply Mixup data augmentation to feature data `x` and labels `y`. diff --git a/art/defences/preprocessor/mp3_compression.py b/art/defences/preprocessor/mp3_compression.py index 50e198c466..7fc5280079 100644 --- a/art/defences/preprocessor/mp3_compression.py +++ b/art/defences/preprocessor/mp3_compression.py @@ -27,7 +27,7 @@ import logging from io import BytesIO -from typing import Optional, Tuple + import numpy as np from tqdm.auto import tqdm @@ -67,7 +67,7 @@ def __init__( self.verbose = verbose self._check_params() - def __call__(self, x: np.ndarray, y: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]: + def __call__(self, x: np.ndarray, y: np.ndarray | None = None) -> tuple[np.ndarray, np.ndarray | None]: """ Apply MP3 compression to sample `x`. diff --git a/art/defences/preprocessor/mp3_compression_pytorch.py b/art/defences/preprocessor/mp3_compression_pytorch.py index e4c706541f..def6629658 100644 --- a/art/defences/preprocessor/mp3_compression_pytorch.py +++ b/art/defences/preprocessor/mp3_compression_pytorch.py @@ -24,7 +24,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import TYPE_CHECKING, Optional, Tuple +from typing import TYPE_CHECKING from art.defences.preprocessor.mp3_compression import Mp3Compression from art.defences.preprocessor.preprocessor import PreprocessorPyTorch @@ -32,7 +32,7 @@ logger = logging.getLogger(__name__) if TYPE_CHECKING: - # pylint: disable=C0412 + import torch @@ -83,19 +83,19 @@ def __init__( verbose=verbose, ) - class CompressionPyTorchNumpy(Function): # pylint: disable=W0223 + class CompressionPyTorchNumpy(Function): # pylint: disable=abstract-method """ Function running Preprocessor. """ @staticmethod - def forward(ctx, input): # pylint: disable=W0622,W0221 + def forward(ctx, input): # pylint: disable=redefined-builtin,arguments-differ numpy_input = input.detach().cpu().numpy() result, _ = self.compression_numpy(numpy_input) return input.new(result) @staticmethod - def backward(ctx, grad_output): # pylint: disable=W0221 + def backward(ctx, grad_output): numpy_go = grad_output.cpu().numpy() # np.expand_dims(input, axis=[0, 2]) result = self.compression_numpy.estimate_gradient(None, numpy_go) @@ -105,8 +105,8 @@ def backward(ctx, grad_output): # pylint: disable=W0221 self._compression_pytorch_numpy = CompressionPyTorchNumpy def forward( - self, x: "torch.Tensor", y: Optional["torch.Tensor"] = None - ) -> Tuple["torch.Tensor", Optional["torch.Tensor"]]: + self, x: "torch.Tensor", y: "torch.Tensor" | None = None + ) -> tuple["torch.Tensor", "torch.Tensor" | None]: """ Apply MP3 compression to sample `x`. diff --git a/art/defences/preprocessor/pixel_defend.py b/art/defences/preprocessor/pixel_defend.py index 13f9997472..cd8c2d3619 100644 --- a/art/defences/preprocessor/pixel_defend.py +++ b/art/defences/preprocessor/pixel_defend.py @@ -25,10 +25,10 @@ see https://arxiv.org/abs/1802.00420 . For details on how to evaluate classifier security in general, see https://arxiv.org/abs/1902.06705 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Optional, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import tqdm @@ -60,7 +60,7 @@ def __init__( self, clip_values: "CLIP_VALUES_TYPE" = (0.0, 1.0), eps: int = 16, - pixel_cnn: Optional["CLASSIFIER_NEURALNETWORK_TYPE"] = None, + pixel_cnn: "CLASSIFIER_NEURALNETWORK_TYPE" | None = None, batch_size: int = 128, apply_fit: bool = False, apply_predict: bool = True, @@ -83,7 +83,7 @@ def __init__( self.verbose = verbose self._check_params() - def __call__(self, x: np.ndarray, y: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]: + def __call__(self, x: np.ndarray, y: np.ndarray | None = None) -> tuple[np.ndarray, np.ndarray | None]: """ Apply pixel defence to sample `x`. diff --git a/art/defences/preprocessor/preprocessor.py b/art/defences/preprocessor/preprocessor.py index 79acb5867f..c72e03ba5e 100644 --- a/art/defences/preprocessor/preprocessor.py +++ b/art/defences/preprocessor/preprocessor.py @@ -18,10 +18,10 @@ """ This module implements the abstract base class for defences that pre-process input data. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import abc -from typing import List, Optional, Tuple, Any, TYPE_CHECKING +from typing import Any, TYPE_CHECKING import numpy as np @@ -40,7 +40,7 @@ class Preprocessor(abc.ABC): To modify, override `estimate_gradient` """ - params: List[str] = [] + params: list[str] = [] def __init__(self, is_fitted: bool = False, apply_fit: bool = True, apply_predict: bool = True) -> None: """ @@ -80,7 +80,7 @@ def apply_predict(self) -> bool: return self._apply_predict @abc.abstractmethod - def __call__(self, x: np.ndarray, y: Optional[Any] = None) -> Tuple[np.ndarray, Optional[Any]]: + def __call__(self, x: np.ndarray, y: Any | None = None) -> tuple[np.ndarray, Any | None]: """ Perform data preprocessing and return preprocessed data as tuple. @@ -90,7 +90,7 @@ def __call__(self, x: np.ndarray, y: Optional[Any] = None) -> Tuple[np.ndarray, """ raise NotImplementedError - def fit(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> None: + def fit(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> None: """ Fit the parameters of the data preprocessor if it has any. @@ -100,7 +100,7 @@ def fit(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> None: """ pass - def estimate_gradient(self, x: np.ndarray, grad: np.ndarray) -> np.ndarray: # pylint: disable=W0613,R0201 + def estimate_gradient(self, x: np.ndarray, grad: np.ndarray) -> np.ndarray: """ Provide an estimate of the gradients of the defence for the backward pass. If the defence is not differentiable, this is an estimate of the gradient, most often replacing the computation performed by the defence with the @@ -124,7 +124,7 @@ def set_params(self, **kwargs) -> None: # pragma: no cover def _check_params(self) -> None: # pragma: no cover pass - def forward(self, x: Any, y: Any = None) -> Tuple[Any, Any]: + def forward(self, x: Any, y: Any = None) -> tuple[Any, Any]: """ Perform data preprocessing and return preprocessed data. @@ -153,7 +153,7 @@ def __init__(self, device_type: str = "gpu", **kwargs): self._device = torch.device(f"cuda:{cuda_idx}") @abc.abstractmethod - def forward(self, x: "torch.Tensor", y: Optional[Any] = None) -> Tuple["torch.Tensor", Optional[Any]]: + def forward(self, x: "torch.Tensor", y: Any | None = None) -> tuple["torch.Tensor", Any | None]: """ Perform data preprocessing in PyTorch and return preprocessed data as tuple. @@ -163,7 +163,7 @@ def forward(self, x: "torch.Tensor", y: Optional[Any] = None) -> Tuple["torch.Te """ raise NotImplementedError - def estimate_forward(self, x: "torch.Tensor", y: Optional["torch.Tensor"] = None) -> "torch.Tensor": + def estimate_forward(self, x: "torch.Tensor", y: "torch.Tensor" | None = None) -> "torch.Tensor": """ Provide a differentiable estimate of the forward function, so that autograd can calculate gradients of the defence for the backward pass. If the defence is differentiable, just call `self.forward()`. @@ -183,7 +183,7 @@ def device(self): """ return self._device - def __call__(self, x: np.ndarray, y: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]: + def __call__(self, x: np.ndarray, y: np.ndarray | None = None) -> tuple[np.ndarray, np.ndarray | None]: """ Apply preprocessing to input `x` and labels `y`. @@ -195,7 +195,7 @@ def __call__(self, x: np.ndarray, y: Optional[np.ndarray] = None) -> Tuple[np.nd x_tensor = torch.tensor(x, device=self.device) if y is not None: - y_tensor: Optional[torch.Tensor] = torch.tensor(y, device=self.device) + y_tensor: torch.Tensor | None = torch.tensor(y, device=self.device) else: y_tensor = None @@ -204,7 +204,7 @@ def __call__(self, x: np.ndarray, y: Optional[np.ndarray] = None) -> Tuple[np.nd x_result = x_tensor.cpu().numpy() if y_tensor is not None: - y_result: Optional[np.ndarray] = y_tensor.cpu().numpy() + y_result: np.ndarray | None = y_tensor.cpu().numpy() else: y_result = None return x_result, y_result @@ -250,7 +250,7 @@ class PreprocessorTensorFlowV2(Preprocessor): """ @abc.abstractmethod - def forward(self, x: "tf.Tensor", y: Optional[Any] = None) -> Tuple["tf.Tensor", Optional[Any]]: + def forward(self, x: "tf.Tensor", y: Any | None = None) -> tuple["tf.Tensor", Any | None]: """ Perform data preprocessing in TensorFlow v2 and return preprocessed data as tuple. @@ -260,7 +260,7 @@ def forward(self, x: "tf.Tensor", y: Optional[Any] = None) -> Tuple["tf.Tensor", """ raise NotImplementedError - def estimate_forward(self, x: "tf.Tensor", y: Optional["tf.Tensor"] = None) -> "tf.Tensor": + def estimate_forward(self, x: "tf.Tensor", y: "tf.Tensor" | None = None) -> "tf.Tensor": """ Provide a differentiable estimate of the forward function, so that autograd can calculate gradients of the defence for the backward pass. If the defence is differentiable, just call `self.forward()`. @@ -273,7 +273,7 @@ def estimate_forward(self, x: "tf.Tensor", y: Optional["tf.Tensor"] = None) -> " """ return self.forward(x, y=y)[0] - def __call__(self, x: np.ndarray, y: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]: + def __call__(self, x: np.ndarray, y: np.ndarray | None = None) -> tuple[np.ndarray, np.ndarray | None]: """ Apply preprocessing to input `x` and labels `y`. diff --git a/art/defences/preprocessor/resample.py b/art/defences/preprocessor/resample.py index 39b78432a1..0b8b7324cb 100644 --- a/art/defences/preprocessor/resample.py +++ b/art/defences/preprocessor/resample.py @@ -24,7 +24,7 @@ see https://arxiv.org/abs/1902.06705. """ import logging -from typing import Optional, Tuple + import numpy as np @@ -66,7 +66,7 @@ def __init__( self.channels_first = channels_first self._check_params() - def __call__(self, x: np.ndarray, y: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]: + def __call__(self, x: np.ndarray, y: np.ndarray | None = None) -> tuple[np.ndarray, np.ndarray | None]: """ Resample `x` to a new sampling rate. diff --git a/art/defences/preprocessor/spatial_smoothing.py b/art/defences/preprocessor/spatial_smoothing.py index 9485571030..7d34444a5f 100644 --- a/art/defences/preprocessor/spatial_smoothing.py +++ b/art/defences/preprocessor/spatial_smoothing.py @@ -24,10 +24,10 @@ see https://arxiv.org/abs/1803.09868 . For details on how to evaluate classifier security in general, see https://arxiv.org/abs/1902.06705 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Optional, Tuple + import numpy as np from scipy.ndimage import median_filter @@ -55,7 +55,7 @@ def __init__( self, window_size: int = 3, channels_first: bool = False, - clip_values: Optional[CLIP_VALUES_TYPE] = None, + clip_values: CLIP_VALUES_TYPE | None = None, apply_fit: bool = False, apply_predict: bool = True, ) -> None: @@ -76,7 +76,7 @@ def __init__( self.clip_values = clip_values self._check_params() - def __call__(self, x: np.ndarray, y: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]: + def __call__(self, x: np.ndarray, y: np.ndarray | None = None) -> tuple[np.ndarray, np.ndarray | None]: """ Apply local spatial smoothing to sample `x`. diff --git a/art/defences/preprocessor/spatial_smoothing_pytorch.py b/art/defences/preprocessor/spatial_smoothing_pytorch.py index b4bc737ab0..5437a8c392 100644 --- a/art/defences/preprocessor/spatial_smoothing_pytorch.py +++ b/art/defences/preprocessor/spatial_smoothing_pytorch.py @@ -24,17 +24,17 @@ see https://arxiv.org/abs/1803.09868 . For details on how to evaluate classifier security in general, see https://arxiv.org/abs/1902.06705 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Optional, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from art.defences.preprocessor.preprocessor import PreprocessorPyTorch if TYPE_CHECKING: - # pylint: disable=C0412 + import torch from art.utils import CLIP_VALUES_TYPE @@ -56,7 +56,7 @@ def __init__( self, window_size: int = 3, channels_first: bool = False, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, apply_fit: bool = False, apply_predict: bool = True, device_type: str = "gpu", @@ -91,7 +91,7 @@ class MedianBlurCustom(MedianBlur): An ongoing effort to reproduce the median blur function in SciPy. """ - def __init__(self, kernel_size: Tuple[int, int]) -> None: + def __init__(self, kernel_size: tuple[int, int]) -> None: super().__init__(kernel_size) # Half-pad the input so that the output keeps the same shape. @@ -119,7 +119,7 @@ def __init__(self, kernel_size: Tuple[int, int]) -> None: self.kernel = get_binary_kernel2d(kernel_size) - # pylint: disable=W0622 + # pylint: disable=redefined-builtin def forward(self, input: "torch.Tensor"): # type: ignore import torch import torch.nn.functional as F @@ -155,8 +155,8 @@ def forward(self, input: "torch.Tensor"): # type: ignore self.median_blur = MedianBlurCustom(kernel_size=(self.window_size, self.window_size)) def forward( - self, x: "torch.Tensor", y: Optional["torch.Tensor"] = None - ) -> Tuple["torch.Tensor", Optional["torch.Tensor"]]: + self, x: "torch.Tensor", y: "torch.Tensor" | None = None + ) -> tuple["torch.Tensor", "torch.Tensor" | None]: """ Apply local spatial smoothing to sample `x`. """ diff --git a/art/defences/preprocessor/spatial_smoothing_tensorflow.py b/art/defences/preprocessor/spatial_smoothing_tensorflow.py index 3ad26c0ad7..c1bc898893 100644 --- a/art/defences/preprocessor/spatial_smoothing_tensorflow.py +++ b/art/defences/preprocessor/spatial_smoothing_tensorflow.py @@ -24,17 +24,17 @@ see https://arxiv.org/abs/1803.09868 . For details on how to evaluate classifier security in general, see https://arxiv.org/abs/1902.06705 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Optional, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from art.defences.preprocessor.preprocessor import PreprocessorTensorFlowV2 if TYPE_CHECKING: - # pylint: disable=C0412 + import tensorflow as tf from art.utils import CLIP_VALUES_TYPE @@ -56,7 +56,7 @@ def __init__( self, window_size: int = 3, channels_first: bool = False, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, apply_fit: bool = False, apply_predict: bool = True, ) -> None: @@ -77,7 +77,7 @@ def __init__( self.clip_values = clip_values self._check_params() - def forward(self, x: "tf.Tensor", y: Optional["tf.Tensor"] = None) -> Tuple["tf.Tensor", Optional["tf.Tensor"]]: + def forward(self, x: "tf.Tensor", y: "tf.Tensor" | None = None) -> tuple["tf.Tensor", "tf.Tensor" | None]: """ Apply local spatial smoothing to sample `x`. """ diff --git a/art/defences/preprocessor/thermometer_encoding.py b/art/defences/preprocessor/thermometer_encoding.py index b8b10c5675..e0038a42d4 100644 --- a/art/defences/preprocessor/thermometer_encoding.py +++ b/art/defences/preprocessor/thermometer_encoding.py @@ -27,7 +27,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Optional, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -78,7 +78,7 @@ def __init__( self.channels_first = channels_first self._check_params() - def __call__(self, x: np.ndarray, y: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]: + def __call__(self, x: np.ndarray, y: np.ndarray | None = None) -> tuple[np.ndarray, np.ndarray | None]: """ Apply thermometer encoding to sample `x`. The new axis with the encoding is added as last dimension. diff --git a/art/defences/preprocessor/variance_minimization.py b/art/defences/preprocessor/variance_minimization.py index 6c779903db..9386c26336 100644 --- a/art/defences/preprocessor/variance_minimization.py +++ b/art/defences/preprocessor/variance_minimization.py @@ -24,10 +24,10 @@ see https://arxiv.org/abs/1802.00420 . For details on how to evaluate classifier security in general, see https://arxiv.org/abs/1902.06705 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Optional, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from scipy.optimize import minimize @@ -62,7 +62,7 @@ def __init__( lamb: float = 0.5, solver: str = "L-BFGS-B", max_iter: int = 10, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, apply_fit: bool = False, apply_predict: bool = True, verbose: bool = False, @@ -91,7 +91,7 @@ def __init__( self.verbose = verbose self._check_params() - def __call__(self, x: np.ndarray, y: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]: + def __call__(self, x: np.ndarray, y: np.ndarray | None = None) -> tuple[np.ndarray, np.ndarray | None]: """ Apply total variance minimization to sample `x`. diff --git a/art/defences/preprocessor/video_compression.py b/art/defences/preprocessor/video_compression.py index f8bfba0c28..f19f29c483 100644 --- a/art/defences/preprocessor/video_compression.py +++ b/art/defences/preprocessor/video_compression.py @@ -26,7 +26,7 @@ import logging import os from tempfile import TemporaryDirectory -from typing import Optional, Tuple + import warnings import numpy as np @@ -75,7 +75,7 @@ def __init__( self.verbose = verbose self._check_params() - def __call__(self, x: np.ndarray, y: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]: + def __call__(self, x: np.ndarray, y: np.ndarray | None = None) -> tuple[np.ndarray, np.ndarray | None]: """ Apply video compression to sample `x`. diff --git a/art/defences/preprocessor/video_compression_pytorch.py b/art/defences/preprocessor/video_compression_pytorch.py index 19fbee4a5e..53f8a3bed8 100644 --- a/art/defences/preprocessor/video_compression_pytorch.py +++ b/art/defences/preprocessor/video_compression_pytorch.py @@ -24,7 +24,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Optional, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING from art.defences.preprocessor.preprocessor import PreprocessorPyTorch from art.defences.preprocessor.video_compression import VideoCompression @@ -32,7 +32,7 @@ logger = logging.getLogger(__name__) if TYPE_CHECKING: - # pylint: disable=C0412 + import torch @@ -91,19 +91,19 @@ def __init__( verbose=verbose, ) - class CompressionPyTorchNumpy(Function): # pylint: disable=W0223 + class CompressionPyTorchNumpy(Function): # pylint: disable=abstract-method """ Function running Preprocessor. """ @staticmethod - def forward(ctx, input): # pylint: disable=W0622,W0221 + def forward(ctx, input): # pylint: disable=redefined-builtin,arguments-differ numpy_input = input.detach().cpu().numpy() result, _ = self.compression_numpy(numpy_input) return input.new(result) @staticmethod - def backward(ctx, grad_output): # pylint: disable=W0221 + def backward(ctx, grad_output): numpy_go = grad_output.cpu().numpy() result = self.compression_numpy.estimate_gradient(None, numpy_go) return grad_output.new(result) @@ -111,8 +111,8 @@ def backward(ctx, grad_output): # pylint: disable=W0221 self._compression_pytorch_numpy = CompressionPyTorchNumpy def forward( - self, x: "torch.Tensor", y: Optional["torch.Tensor"] = None - ) -> Tuple["torch.Tensor", Optional["torch.Tensor"]]: + self, x: "torch.Tensor", y: "torch.Tensor" | None = None + ) -> tuple["torch.Tensor", "torch.Tensor" | None]: """ Apply video compression to sample `x`. diff --git a/art/defences/trainer/adversarial_trainer.py b/art/defences/trainer/adversarial_trainer.py index b33a34aa4f..923dfa8fef 100644 --- a/art/defences/trainer/adversarial_trainer.py +++ b/art/defences/trainer/adversarial_trainer.py @@ -31,10 +31,10 @@ principled approach to making classifiers more robust (see https://arxiv.org/abs/1802.00420), very careful evaluations are required to assess its effectiveness case by case (see https://arxiv.org/abs/1902.06705). """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import List, Optional, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import trange, tqdm @@ -71,7 +71,7 @@ class AdversarialTrainer(Trainer): def __init__( self, classifier: "CLASSIFIER_LOSS_GRADIENTS_TYPE", - attacks: Union["EvasionAttack", List["EvasionAttack"]], + attacks: "EvasionAttack" | list["EvasionAttack"], ratio: float = 0.5, ) -> None: """ @@ -96,9 +96,9 @@ def __init__( raise ValueError("The `ratio` of adversarial samples in each batch has to be between 0 and 1.") self.ratio = ratio - self._precomputed_adv_samples: List[Optional[np.ndarray]] = [] - self.x_augmented: Optional[np.ndarray] = None - self.y_augmented: Optional[np.ndarray] = None + self._precomputed_adv_samples: list[np.ndarray | None] = [] + self.x_augmented: np.ndarray | None = None + self.y_augmented: np.ndarray | None = None def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwargs) -> None: """ @@ -193,9 +193,7 @@ def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwarg ) attack_id = (attack_id + 1) % len(self.attacks) - def fit( # pylint: disable=W0221 - self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: int = 20, **kwargs - ) -> None: + def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: int = 20, **kwargs) -> None: """ Train a model adversarially. See class documentation for more information on the exact procedure. diff --git a/art/defences/trainer/adversarial_trainer_awp.py b/art/defences/trainer/adversarial_trainer_awp.py index 0c3c33d61e..a0c4a0a7c9 100644 --- a/art/defences/trainer/adversarial_trainer_awp.py +++ b/art/defences/trainer/adversarial_trainer_awp.py @@ -23,10 +23,10 @@ | It was noted that this protocol uses double perturbation mechanism i.e, perturbation on the input samples and then perturbation on the model parameters. Consequently, framework specific implementations are being provided in ART. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import abc -from typing import Optional, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -78,14 +78,14 @@ def __init__( super().__init__(classifier) @abc.abstractmethod - def fit( # pylint: disable=W0221 + def fit( self, x: np.ndarray, y: np.ndarray, - validation_data: Optional[Tuple[np.ndarray, np.ndarray]] = None, + validation_data: tuple[np.ndarray, np.ndarray] | None = None, batch_size: int = 128, nb_epochs: int = 20, - **kwargs + **kwargs, ): """ Train a model adversarially with AWP. See class documentation for more information on the exact procedure. @@ -101,12 +101,12 @@ def fit( # pylint: disable=W0221 raise NotImplementedError @abc.abstractmethod - def fit_generator( # pylint: disable=W0221 + def fit_generator( self, generator: DataGenerator, - validation_data: Optional[Tuple[np.ndarray, np.ndarray]] = None, + validation_data: tuple[np.ndarray, np.ndarray] | None = None, nb_epochs: int = 20, - **kwargs + **kwargs, ): """ Train a model adversarially with AWP using a data generator. diff --git a/art/defences/trainer/adversarial_trainer_awp_pytorch.py b/art/defences/trainer/adversarial_trainer_awp_pytorch.py index 1b95f0c8bb..13e58ba3f8 100644 --- a/art/defences/trainer/adversarial_trainer_awp_pytorch.py +++ b/art/defences/trainer/adversarial_trainer_awp_pytorch.py @@ -20,11 +20,11 @@ | Paper link: https://proceedings.neurips.cc/paper/2020/file/1ef91c212e30e14bf125e9374262401f-Paper.pdf """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging import time -from typing import Optional, Tuple, TYPE_CHECKING, List, Dict +from typing import TYPE_CHECKING from collections import OrderedDict import numpy as np @@ -86,12 +86,12 @@ def fit( self, x: np.ndarray, y: np.ndarray, - validation_data: Optional[Tuple[np.ndarray, np.ndarray]] = None, + validation_data: tuple[np.ndarray, np.ndarray] | None = None, batch_size: int = 128, nb_epochs: int = 20, - scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None, + scheduler: "torch.optim.lr_scheduler._LRScheduler" | None = None, **kwargs, - ): # pylint: disable=W0221 + ): """ Train a model adversarially with AWP protocol. See class documentation for more information on the exact procedure. @@ -109,9 +109,7 @@ def fit( logger.info("Performing adversarial training with AWP with %s protocol", self._mode) - if (scheduler is not None) and ( - not isinstance(scheduler, torch.optim.lr_scheduler._LRScheduler) # pylint: disable=W0212 - ): + if (scheduler is not None) and (not isinstance(scheduler, torch.optim.lr_scheduler._LRScheduler)): raise ValueError("Invalid Pytorch scheduler is provided for adversarial training.") best_acc_adv_test = 0 @@ -152,13 +150,13 @@ def fit( if validation_data is not None: (x_test, y_test) = validation_data y_test = check_and_transform_label_format(y_test, nb_classes=self.classifier.nb_classes) - # pylint: disable=W0212 + x_preprocessed_test, y_preprocessed_test = self._classifier._apply_preprocessing( x_test, y_test, fit=True, ) - # pylint: enable=W0212 + # pylint: enable=protected-access output_clean = np.argmax(self.predict(x_preprocessed_test), axis=1) nb_correct_clean = np.sum(output_clean == np.argmax(y_preprocessed_test, axis=1)) x_test_adv = self._attack.generate(x_preprocessed_test, y=y_preprocessed_test) @@ -196,11 +194,11 @@ def fit( def fit_generator( self, generator: DataGenerator, - validation_data: Optional[Tuple[np.ndarray, np.ndarray]] = None, + validation_data: tuple[np.ndarray, np.ndarray] | None = None, nb_epochs: int = 20, - scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None, + scheduler: "torch.optim.lr_scheduler._LRScheduler" | None = None, **kwargs, - ): # pylint: disable=W0221 + ): """ Train a model adversarially with AWP protocol using a data generator. See class documentation for more information on the exact procedure. @@ -216,9 +214,7 @@ def fit_generator( logger.info("Performing adversarial training with AWP with %s protocol", self._mode) - if (scheduler is not None) and ( - not isinstance(scheduler, torch.optim.lr_scheduler._LRScheduler) # pylint: disable=W0212 - ): + if (scheduler is not None) and (not isinstance(scheduler, torch.optim.lr_scheduler._LRScheduler)): raise ValueError("Invalid Pytorch scheduler is provided for adversarial training.") size = generator.size @@ -261,13 +257,13 @@ def fit_generator( if validation_data is not None: (x_test, y_test) = validation_data y_test = check_and_transform_label_format(y_test, nb_classes=self.classifier.nb_classes) - # pylint: disable=W0212 + x_preprocessed_test, y_preprocessed_test = self._classifier._apply_preprocessing( x_test, y_test, fit=True, ) - # pylint: enable=W0212 + # pylint: enable=protected-access output_clean = np.argmax(self.predict(x_preprocessed_test), axis=1) nb_correct_clean = np.sum(output_clean == np.argmax(y_preprocessed_test, axis=1)) x_test_adv = self._attack.generate(x_preprocessed_test, y=y_preprocessed_test) @@ -301,14 +297,14 @@ def fit_generator( train_acc / train_n, ) - def _batch_process(self, x_batch: np.ndarray, y_batch: np.ndarray) -> Tuple[float, float, float]: + def _batch_process(self, x_batch: np.ndarray, y_batch: np.ndarray) -> tuple[float, float, float]: """ Perform the operations of AWP for a batch of data. See class documentation for more information on the exact procedure. :param x_batch: batch of x. :param y_batch: batch of y. - :return: tuple containing batch data loss, batch data accuracy and number of samples in the batch + :return: Tuple containing batch data loss, batch data accuracy and number of samples in the batch """ import torch from torch import nn @@ -328,15 +324,11 @@ def _batch_process(self, x_batch: np.ndarray, y_batch: np.ndarray) -> Tuple[floa # Apply preprocessing y_batch = check_and_transform_label_format(y_batch, nb_classes=self.classifier.nb_classes) - x_preprocessed, y_preprocessed = self._classifier._apply_preprocessing( # pylint: disable=W0212 - x_batch, y_batch, fit=True - ) - x_preprocessed_pert, _ = self._classifier._apply_preprocessing( # pylint: disable=W0212 - x_batch_pert, y_batch, fit=True - ) + x_preprocessed, y_preprocessed = self._classifier._apply_preprocessing(x_batch, y_batch, fit=True) + x_preprocessed_pert, _ = self._classifier._apply_preprocessing(x_batch_pert, y_batch, fit=True) # Check label shape - if self._classifier._reduce_labels: # pylint: disable=W0212 + if self._classifier._reduce_labels: y_preprocessed = np.argmax(y_preprocessed, axis=1) i_batch = torch.from_numpy(x_preprocessed).to(self._classifier.device) @@ -393,7 +385,7 @@ def _batch_process(self, x_batch: np.ndarray, y_batch: np.ndarray) -> Tuple[floa def _weight_perturbation( self, x_batch: "torch.Tensor", x_batch_pert: "torch.Tensor", y_batch: "torch.Tensor" - ) -> Dict[str, "torch.Tensor"]: + ) -> dict[str, "torch.Tensor"]: """ Calculate wight perturbation for a batch of data. See class documentation for more information on the exact procedure. @@ -450,18 +442,18 @@ def _weight_perturbation( @staticmethod def _calculate_model_params( p_classifier: PyTorchClassifier, - ) -> Tuple[Dict[str, Dict[str, "torch.Tensor"]], "torch.Tensor"]: + ) -> tuple[dict[str, dict[str, "torch.Tensor"]], "torch.Tensor"]: """ Calculates a given model's different layers' parameters' shape and norm, and model parameter norm. :param p_classifier: model for awp protocol. - :return: tuple with first element a dictionary with model parameters' names as keys and a nested dictionary + :return: Tuple with first element a dictionary with model parameters' names as keys and a nested dictionary as value. The nested dictionary contains model parameters, model parameters' size, model parameters' norms. The second element of tuple denotes norm of all model parameters """ import torch - params_dict: Dict[str, Dict[str, "torch.Tensor"]] = OrderedDict() + params_dict: dict[str, dict[str, "torch.Tensor"]] = OrderedDict() list_params = [] for name, param in p_classifier.model.state_dict().items(): if len(param.size()) <= 1: @@ -479,13 +471,13 @@ def _calculate_model_params( return params_dict, model_all_params_norm def _modify_classifier( - self, p_classifier: PyTorchClassifier, list_keys: List[str], w_perturb: Dict[str, "torch.Tensor"], op: str + self, p_classifier: PyTorchClassifier, list_keys: list[str], w_perturb: dict[str, "torch.Tensor"], op: str ) -> None: """ Modify the model's weight parameters according to the weight perturbations. :param p_classifier: model for awp protocol. - :param list_keys: list of model parameters' names + :param list_keys: List of model parameters' names :param w_perturb: dictionary containing model parameters' names as keys and model parameters as values :param op: controls whether weight perturbation will be added or subtracted from model parameters """ diff --git a/art/defences/trainer/adversarial_trainer_fbf.py b/art/defences/trainer/adversarial_trainer_fbf.py index ce0d259593..e11b179be0 100644 --- a/art/defences/trainer/adversarial_trainer_fbf.py +++ b/art/defences/trainer/adversarial_trainer_fbf.py @@ -23,10 +23,10 @@ | It was noted that this protocol is sensitive to the use of techniques like data augmentation, gradient clipping, and learning rate schedules. Consequently, framework specific implementations are being provided in ART. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import abc -from typing import Optional, Union, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -48,7 +48,7 @@ class AdversarialTrainerFBF(Trainer, abc.ABC): def __init__( self, classifier: "CLASSIFIER_LOSS_GRADIENTS_TYPE", - eps: Union[int, float] = 8, + eps: int | float = 8, ): """ Create an :class:`.AdversarialTrainerFBF` instance. @@ -60,14 +60,14 @@ def __init__( super().__init__(classifier) @abc.abstractmethod - def fit( # pylint: disable=W0221 + def fit( self, x: np.ndarray, y: np.ndarray, - validation_data: Optional[Tuple[np.ndarray, np.ndarray]] = None, + validation_data: tuple[np.ndarray, np.ndarray] | None = None, batch_size: int = 128, nb_epochs: int = 20, - **kwargs + **kwargs, ): """ Train a model adversarially with FBF. See class documentation for more information on the exact procedure. diff --git a/art/defences/trainer/adversarial_trainer_fbf_pytorch.py b/art/defences/trainer/adversarial_trainer_fbf_pytorch.py index b63c50aaa8..ffeabf42b1 100644 --- a/art/defences/trainer/adversarial_trainer_fbf_pytorch.py +++ b/art/defences/trainer/adversarial_trainer_fbf_pytorch.py @@ -20,11 +20,11 @@ | Paper link: https://openreview.net/forum?id=BJx040EFvH """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging import time -from typing import Optional, Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import trange @@ -52,7 +52,7 @@ class AdversarialTrainerFBFPyTorch(AdversarialTrainerFBF): time making this one of the fastest adversarial training protocol. """ - def __init__(self, classifier: "PyTorchClassifier", eps: Union[int, float] = 8, use_amp: bool = False): + def __init__(self, classifier: "PyTorchClassifier", eps: int | float = 8, use_amp: bool = False): """ Create an :class:`.AdversarialTrainerFBFPyTorch` instance. @@ -68,10 +68,10 @@ def fit( self, x: np.ndarray, y: np.ndarray, - validation_data: Optional[Tuple[np.ndarray, np.ndarray]] = None, + validation_data: tuple[np.ndarray, np.ndarray] | None = None, batch_size: int = 128, nb_epochs: int = 20, - **kwargs + **kwargs, ): """ Train a model adversarially with FBF protocol. @@ -194,7 +194,7 @@ def lr_schedule(step_t): train_acc / train_n, ) - def _batch_process(self, x_batch: np.ndarray, y_batch: np.ndarray, l_r: float) -> Tuple[float, float, float]: + def _batch_process(self, x_batch: np.ndarray, y_batch: np.ndarray, l_r: float) -> tuple[float, float, float]: """ Perform the operations of FBF for a batch of data. See class documentation for more information on the exact procedure. @@ -205,7 +205,7 @@ def _batch_process(self, x_batch: np.ndarray, y_batch: np.ndarray, l_r: float) - """ import torch - if self._classifier._optimizer is None: # pylint: disable=W0212 + if self._classifier._optimizer is None: raise ValueError("Optimizer of classifier is currently None, but is required for adversarial training.") n = x_batch.shape[0] @@ -219,40 +219,38 @@ def _batch_process(self, x_batch: np.ndarray, y_batch: np.ndarray, l_r: float) - x_batch_pert = x_batch + delta # Apply preprocessing - x_preprocessed, y_preprocessed = self._classifier._apply_preprocessing( # pylint: disable=W0212 - x_batch_pert, y_batch, fit=True - ) + x_preprocessed, y_preprocessed = self._classifier._apply_preprocessing(x_batch_pert, y_batch, fit=True) # Check label shape - if self._classifier._reduce_labels: # pylint: disable=W0212 + if self._classifier._reduce_labels: y_preprocessed = np.argmax(y_preprocessed, axis=1) - i_batch = torch.from_numpy(x_preprocessed).to(self._classifier._device) # pylint: disable=W0212 - o_batch = torch.from_numpy(y_preprocessed).to(self._classifier._device) # pylint: disable=W0212 + i_batch = torch.from_numpy(x_preprocessed).to(self._classifier._device) + o_batch = torch.from_numpy(y_preprocessed).to(self._classifier._device) # Zero the parameter gradients - self._classifier._optimizer.zero_grad() # pylint: disable=W0212 + self._classifier._optimizer.zero_grad() # Perform prediction - model_outputs = self._classifier._model(i_batch) # pylint: disable=W0212 + model_outputs = self._classifier._model(i_batch) # Form the loss function - loss = self._classifier._loss(model_outputs[-1], o_batch) # pylint: disable=W0212 + loss = self._classifier._loss(model_outputs[-1], o_batch) - self._classifier._optimizer.param_groups[0].update(lr=l_r) # pylint: disable=W0212 + self._classifier._optimizer.param_groups[0].update(lr=l_r) # Actual training if self._use_amp: # pragma: no cover - from apex import amp # pylint: disable=E0611 + from apex import amp - with amp.scale_loss(loss, self._classifier._optimizer) as scaled_loss: # pylint: disable=W0212 + with amp.scale_loss(loss, self._classifier._optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() # clip the gradients - torch.nn.utils.clip_grad_norm_(self._classifier._model.parameters(), 0.5) # pylint: disable=W0212 - self._classifier._optimizer.step() # pylint: disable=W0212 + torch.nn.utils.clip_grad_norm_(self._classifier._model.parameters(), 0.5) + self._classifier._optimizer.step() train_loss = loss.item() * o_batch.size(0) train_acc = (model_outputs[0].max(1)[1] == o_batch).sum().item() diff --git a/art/defences/trainer/adversarial_trainer_madry_pgd.py b/art/defences/trainer/adversarial_trainer_madry_pgd.py index 332d1d8a75..9e132edaed 100644 --- a/art/defences/trainer/adversarial_trainer_madry_pgd.py +++ b/art/defences/trainer/adversarial_trainer_madry_pgd.py @@ -24,8 +24,10 @@ principled approach to making classifiers more robust (see https://arxiv.org/abs/1802.00420), very careful evaluations are required to assess its effectiveness case by case (see https://arxiv.org/abs/1902.06705). """ +from __future__ import annotations + import logging -from typing import Optional, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -54,10 +56,10 @@ class AdversarialTrainerMadryPGD(Trainer): def __init__( self, classifier: "CLASSIFIER_LOSS_GRADIENTS_TYPE", - nb_epochs: Optional[int] = 205, - batch_size: Optional[int] = 128, - eps: Union[int, float] = 8, - eps_step: Union[int, float] = 2, + nb_epochs: int | None = 205, + batch_size: int | None = 128, + eps: int | float = 8, + eps_step: int | float = 2, max_iter: int = 7, num_random_init: int = 1, ) -> None: @@ -90,14 +92,14 @@ def __init__( self.trainer = AdversarialTrainer(classifier, self.attack, ratio=1.0) # type: ignore - def fit( # pylint: disable=W0221 + def fit( self, x: np.ndarray, y: np.ndarray, - validation_data: Optional[np.ndarray] = None, - batch_size: Optional[int] = None, - nb_epochs: Optional[int] = None, - **kwargs + validation_data: np.ndarray | None = None, + batch_size: int | None = None, + nb_epochs: int | None = None, + **kwargs, ) -> None: """ Train a model adversarially. See class documentation for more information on the exact procedure. diff --git a/art/defences/trainer/adversarial_trainer_oaat.py b/art/defences/trainer/adversarial_trainer_oaat.py index 0ef9e6b97a..c5e20fed67 100644 --- a/art/defences/trainer/adversarial_trainer_oaat.py +++ b/art/defences/trainer/adversarial_trainer_oaat.py @@ -24,10 +24,11 @@ | It was noted that this protocol uses double perturbation mechanism i.e, perturbation on the input samples and then perturbation on the model parameters. Consequently, framework specific implementations are being provided in ART. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import abc -from typing import Optional, Tuple, TYPE_CHECKING, Sequence +from collections.abc import Sequence +from typing import TYPE_CHECKING import numpy as np @@ -61,7 +62,7 @@ def __init__( :param classifier: Model to train adversarially. :param proxy_classifier: Model for adversarial weight perturbation. :param lpips_classifier: Weight averaging model for calculating activations. - :param list_avg_models: list of models for weight averaging. + :param list_avg_models: List of models for weight averaging. :param attack: attack to use for data augmentation in adversarial training :param train_params: parameters' dictionary related to adversarial training """ @@ -75,14 +76,14 @@ def __init__( super().__init__(classifier) @abc.abstractmethod - def fit( # pylint: disable=W0221 + def fit( self, x: np.ndarray, y: np.ndarray, - validation_data: Optional[Tuple[np.ndarray, np.ndarray]] = None, + validation_data: tuple[np.ndarray, np.ndarray] | None = None, batch_size: int = 128, nb_epochs: int = 20, - **kwargs + **kwargs, ): """ Train a model adversarially with OAAT. See class documentation for more information on the exact procedure. @@ -98,12 +99,12 @@ def fit( # pylint: disable=W0221 raise NotImplementedError @abc.abstractmethod - def fit_generator( # pylint: disable=W0221 + def fit_generator( self, generator: DataGenerator, - validation_data: Optional[Tuple[np.ndarray, np.ndarray]] = None, + validation_data: tuple[np.ndarray, np.ndarray] | None = None, nb_epochs: int = 20, - **kwargs + **kwargs, ): """ Train a model adversarially with OAAT using a data generator. diff --git a/art/defences/trainer/adversarial_trainer_oaat_pytorch.py b/art/defences/trainer/adversarial_trainer_oaat_pytorch.py index c012d73e89..6a8ba5063d 100644 --- a/art/defences/trainer/adversarial_trainer_oaat_pytorch.py +++ b/art/defences/trainer/adversarial_trainer_oaat_pytorch.py @@ -21,13 +21,13 @@ | Paper link: https://link.springer.com/chapter/10.1007/978-3-031-20065-6_18 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations from collections import OrderedDict import logging import os import time -from typing import Optional, Tuple, TYPE_CHECKING, List, Dict, Union +from typing import TYPE_CHECKING import six import numpy as np @@ -60,7 +60,7 @@ def __init__( classifier: PyTorchClassifier, proxy_classifier: PyTorchClassifier, lpips_classifier: PyTorchClassifier, - list_avg_models: List[PyTorchClassifier], + list_avg_models: list[PyTorchClassifier], attack: EvasionAttack, train_params: dict, ): @@ -70,7 +70,7 @@ def __init__( :param classifier: Model to train adversarially. :param proxy_classifier: Model for adversarial weight perturbation. :param lpips_classifier: Weight averaging model for calculating activations. - :param list_avg_models: list of models for weight averaging. + :param list_avg_models: List of models for weight averaging. :param attack: attack to use for data augmentation in adversarial training. :param train_params: training parameters' dictionary related to adversarial training """ @@ -78,7 +78,7 @@ def __init__( self._classifier: PyTorchClassifier self._proxy_classifier: PyTorchClassifier self._lpips_classifier: PyTorchClassifier - self._list_avg_models: List[PyTorchClassifier] + self._list_avg_models: list[PyTorchClassifier] self._attack: EvasionAttack self._train_params: dict self._apply_wp: bool @@ -88,11 +88,11 @@ def fit( self, x: np.ndarray, y: np.ndarray, - validation_data: Optional[Tuple[np.ndarray, np.ndarray]] = None, + validation_data: tuple[np.ndarray, np.ndarray] | None = None, batch_size: int = 128, nb_epochs: int = 20, **kwargs, - ): # pylint: disable=W0221 + ): """ Train a model adversarially with OAAT protocol. See class documentation for more information on the exact procedure. @@ -131,7 +131,7 @@ def fit( self._apply_wp = True if i_epoch == (int(3 * nb_epochs / 4)): - self._classifier._optimizer = torch.optim.SGD( # pylint: disable=W0212 + self._classifier._optimizer = torch.optim.SGD( self._classifier.model.parameters(), lr=self._train_params["lr"], momentum=self._train_params["momentum"], @@ -169,9 +169,7 @@ def fit( else: full_path_load = os.path.join(self._train_params["models_path"], file_name) if os.path.isfile(full_path_load): - self._lpips_classifier._model._model.load_state_dict( # pylint: disable=W0212 - torch.load(full_path_load) - ) + self._lpips_classifier._model._model.load_state_dict(torch.load(full_path_load)) else: raise ValueError("Invalid path/file for weight average model is provided for adversarial training.") @@ -214,13 +212,13 @@ def fit( if validation_data is not None: (x_test, y_test) = validation_data y_test = check_and_transform_label_format(y_test, nb_classes=self.classifier.nb_classes) - # pylint: disable=W0212 + x_preprocessed_test, y_preprocessed_test = self._classifier._apply_preprocessing( x_test, y_test, fit=True, ) - # pylint: enable=W0212 + # pylint: enable=protected-access output_clean = np.argmax(self.predict(x_preprocessed_test), axis=1) nb_correct_clean = np.sum(output_clean == np.argmax(y_preprocessed_test, axis=1)) self._attack.set_params( @@ -229,13 +227,13 @@ def fit( max_iter=self._train_params["max_iter"], ) x_test_adv = self._attack.generate(x_preprocessed_test, y=y_preprocessed_test) - # pylint: disable=W0212 + x_preprocessed_test_adv, y_preprocessed_test = self._classifier._apply_preprocessing( x_test_adv, y_test, fit=True, ) - # pylint: enable=W0212 + # pylint: enable=protected-access output_adv = np.argmax(self.predict(x_preprocessed_test_adv), axis=1) nb_correct_adv = np.sum(output_adv == np.argmax(y_preprocessed_test, axis=1)) @@ -281,17 +279,17 @@ def fit( folder = os.path.split(full_path)[0] if not os.path.exists(folder): os.makedirs(folder) - # pylint: disable=W0212 + # disable pylint because access to _modules required torch.save(p_classifier._model._model.state_dict(), full_path) def fit_generator( self, generator: DataGenerator, - validation_data: Optional[Tuple[np.ndarray, np.ndarray]] = None, + validation_data: tuple[np.ndarray, np.ndarray] | None = None, nb_epochs: int = 20, **kwargs, - ): # pylint: disable=W0221 + ): """ Train a model adversarially with OAAT protocol using a data generator. See class documentation for more information on the exact procedure. @@ -330,7 +328,7 @@ def fit_generator( self._apply_wp = True if i_epoch == (int(3 * nb_epochs / 4)): - self._classifier._optimizer = torch.optim.SGD( # pylint: disable=W0212 + self._classifier._optimizer = torch.optim.SGD( self._classifier.model.parameters(), lr=self._train_params["lr"], momentum=self._train_params["momentum"], @@ -368,9 +366,7 @@ def fit_generator( else: full_path_load = os.path.join(self._train_params["models_path"], file_name) if os.path.isfile(full_path_load): - self._lpips_classifier._model._model.load_state_dict( # pylint: disable=W0212 - torch.load(full_path_load) - ) + self._lpips_classifier._model._model.load_state_dict(torch.load(full_path_load)) else: raise ValueError("Invalid path/file for weight average model is provided for adversarial training.") @@ -411,13 +407,13 @@ def fit_generator( if validation_data is not None: (x_test, y_test) = validation_data y_test = check_and_transform_label_format(y_test, nb_classes=self.classifier.nb_classes) - # pylint: disable=W0212 + x_preprocessed_test, y_preprocessed_test = self._classifier._apply_preprocessing( x_test, y_test, fit=True, ) - # pylint: enable=W0212 + # pylint: enable=protected-access output_clean = np.argmax(self.predict(x_preprocessed_test), axis=1) nb_correct_clean = np.sum(output_clean == np.argmax(y_preprocessed_test, axis=1)) self._attack.set_params( @@ -426,13 +422,13 @@ def fit_generator( max_iter=self._train_params["max_iter"], ) x_test_adv = self._attack.generate(x_preprocessed_test, y=y_preprocessed_test) - # pylint: disable=W0212 + x_preprocessed_test_adv, y_preprocessed_test = self._classifier._apply_preprocessing( x_test_adv, y_test, fit=True, ) - # pylint: enable=W0212 + # pylint: enable=protected-access output_adv = np.argmax(self.predict(x_preprocessed_test_adv), axis=1) nb_correct_adv = np.sum(output_adv == np.argmax(y_preprocessed_test, axis=1)) @@ -477,13 +473,13 @@ def fit_generator( folder = os.path.split(full_path)[0] if not os.path.exists(folder): os.makedirs(folder) - # pylint: disable=W0212 + # disable pylint because access to _modules required torch.save(p_classifier._model._model.state_dict(), full_path) def _batch_process( self, i_epoch: int, nb_epochs: int, batch_id: int, x_batch: np.ndarray, y_batch: np.ndarray - ) -> Tuple[float, float, float]: + ) -> tuple[float, float, float]: """ Perform the operations of OAAT for a batch of data. See class documentation for more information on the exact procedure. @@ -493,7 +489,7 @@ def _batch_process( :param batch_id: batch_id of input data. :param x_batch: batch of x. :param y_batch: batch of y. - :return: tuple containing batch data loss, batch data accuracy and number of samples in the batch + :return: Tuple containing batch data loss, batch data accuracy and number of samples in the batch """ import torch from torch import nn @@ -544,15 +540,11 @@ def _batch_process( # Apply preprocessing y_batch = check_and_transform_label_format(y_batch, nb_classes=self.classifier.nb_classes) - x_preprocessed, y_preprocessed = self._classifier._apply_preprocessing( # pylint: disable=W0212 - x_batch, y_batch, fit=True - ) - x_preprocessed_pert, _ = self._classifier._apply_preprocessing( # pylint: disable=W0212 - x_batch_pert, y_batch, fit=True - ) + x_preprocessed, y_preprocessed = self._classifier._apply_preprocessing(x_batch, y_batch, fit=True) + x_preprocessed_pert, _ = self._classifier._apply_preprocessing(x_batch_pert, y_batch, fit=True) # Check label shape - if self._classifier._reduce_labels: # pylint: disable=W0212 + if self._classifier._reduce_labels: y_preprocessed = np.argmax(y_preprocessed, axis=1) i_batch = torch.from_numpy(x_preprocessed).to(self._classifier.device) @@ -633,7 +625,7 @@ def _batch_process( def _weight_perturbation( self, x_batch: "torch.Tensor", x_batch_pert: "torch.Tensor", y_batch: "torch.Tensor" - ) -> Dict[str, "torch.Tensor"]: + ) -> dict[str, "torch.Tensor"]: """ Calculate wight perturbation for a batch of data. See class documentation for more information on the exact procedure. @@ -680,18 +672,18 @@ def _weight_perturbation( @staticmethod def _calculate_model_params( p_classifier: PyTorchClassifier, - ) -> Tuple[Dict[str, Dict[str, "torch.Tensor"]], "torch.Tensor"]: + ) -> tuple[dict[str, dict[str, "torch.Tensor"]], "torch.Tensor"]: """ Calculates a given model's different layers' parameters' shape and norm, and model parameter norm. :param p_classifier: model for awp protocol. - :return: tuple with first element a dictionary with model parameters' names as keys and a nested dictionary + :return: Tuple with first element a dictionary with model parameters' names as keys and a nested dictionary as value. The nested dictionary contains model parameters, model parameters' size, model parameters' norms. The second element of tuple denotes norm of all model parameters """ import torch - params_dict: Dict[str, Dict[str, "torch.Tensor"]] = OrderedDict() + params_dict: dict[str, dict[str, "torch.Tensor"]] = OrderedDict() list_params = [] for name, param in p_classifier.model.state_dict().items(): if len(param.size()) <= 1: @@ -709,7 +701,7 @@ def _calculate_model_params( return params_dict, model_all_params_norm def _modify_classifier( - self, p_classifier: PyTorchClassifier, list_keys: List[str], w_perturb: Dict[str, "torch.Tensor"], op: str + self, p_classifier: PyTorchClassifier, list_keys: list[str], w_perturb: dict[str, "torch.Tensor"], op: str ) -> None: """ Modify the model's weight parameters according to the weight perturbations. @@ -736,8 +728,8 @@ def _modify_classifier( def get_layer_activations( # type: ignore p_classifier: PyTorchClassifier, x: "torch.Tensor", - layers: List[Union[int, str]], - ) -> Tuple[Dict[str, "torch.Tensor"], List[str]]: + layers: list[int | str], + ) -> tuple[dict[str, "torch.Tensor"], list[str]]: """ Return the output of the specified layers for input `x`. `layers` is a list of either layer indices (between 0 and `nb_layers - 1`) or layers' name. The number of layers can be determined by counting the results returned by @@ -755,14 +747,14 @@ def get_layer_activations( # type: ignore list_layer_names = [] for layer in layers: if isinstance(layer, six.string_types): - if layer not in p_classifier._layer_names: # pylint: disable=W0212 + if layer not in p_classifier._layer_names: raise ValueError(f"Layer name {layer} not supported") layer_name = layer list_layer_names.append(layer_name) elif isinstance(layer, int): layer_index = layer - layer_name = p_classifier._layer_names[layer_index] # pylint: disable=W0212 + layer_name = p_classifier._layer_names[layer_index] list_layer_names.append(layer_name) else: @@ -770,27 +762,27 @@ def get_layer_activations( # type: ignore def get_feature(name): # the hook signature - def hook(model, input, output): # pylint: disable=W0622,W0613 - p_classifier._features[name] = output # pylint: disable=W0212 + def hook(model, input, output): # pylint: disable=redefined-builtin,unused-argument + p_classifier._features[name] = output return hook if not hasattr(p_classifier, "_features"): - p_classifier._features = {} # pylint: disable=W0212 + p_classifier._features = {} # register forward hooks on the layers of choice for layer_name in list_layer_names: - if layer_name not in p_classifier._features: # pylint: disable=W0212 - interim_layer = dict([*p_classifier._model._model.named_modules()])[layer_name] # pylint: disable=W0212 + if layer_name not in p_classifier._features: + interim_layer = dict([*p_classifier._model._model.named_modules()])[layer_name] interim_layer.register_forward_hook(get_feature(layer_name)) p_classifier.model(x) - return p_classifier._features, list_layer_names # pylint: disable=W0212 + return p_classifier._features, list_layer_names @staticmethod def normalize_concatenate_activations( - activations_dict: Dict[str, "torch.Tensor"], - list_layer_names: List[str], + activations_dict: dict[str, "torch.Tensor"], + list_layer_names: list[str], ) -> "torch.Tensor": """ Takes a dictionary `activations_dict' of activation values of different layers for an input batch and Returns @@ -823,7 +815,7 @@ def calculate_lpips_distance( # type: ignore p_classifier: PyTorchClassifier, input_1: "torch.Tensor", input_2: "torch.Tensor", - layers: List[Union[int, str]], + layers: list[int | str], ) -> "torch.Tensor": """ Return the LPIPS distance between input_1 and input_2. `layers` is a list of either layer indices (between 0 and @@ -898,8 +890,8 @@ def _attack_lpips( self, x: np.ndarray, y: np.ndarray, - eps: Union[int, float, np.ndarray], - eps_step: Union[int, float, np.ndarray], + eps: int | float | np.ndarray, + eps_step: int | float | np.ndarray, max_iter: int, training_mode: bool, ) -> np.ndarray: @@ -934,8 +926,8 @@ def _one_step_adv_example( x: "torch.Tensor", x_init: "torch.Tensor", y: "torch.Tensor", - eps: Union[int, float, np.ndarray], - eps_step: Union[int, float, np.ndarray], + eps: int | float | np.ndarray, + eps_step: int | float | np.ndarray, random_init: bool, training_mode: bool, ) -> "torch.Tensor": @@ -1050,22 +1042,14 @@ def _compute_perturbation( x_init = x_init.clone().detach() else: x_init = torch.tensor(x_init).to(self._classifier.device) - inputs_t, y_preprocessed = self._classifier._apply_preprocessing( # pylint: disable=W0212 - x_grad, y=y_grad, fit=False, no_grad=False - ) - inputs_init, _ = self._classifier._apply_preprocessing( # pylint: disable=W0212 - x_init, y=y_grad, fit=False, no_grad=False - ) + inputs_t, y_preprocessed = self._classifier._apply_preprocessing(x_grad, y=y_grad, fit=False, no_grad=False) + inputs_init, _ = self._classifier._apply_preprocessing(x_init, y=y_grad, fit=False, no_grad=False) elif isinstance(x, np.ndarray): - x_preprocessed, y_preprocessed = self._classifier._apply_preprocessing( # pylint: disable=W0212 - x, y=y, fit=False, no_grad=True - ) + x_preprocessed, y_preprocessed = self._classifier._apply_preprocessing(x, y=y, fit=False, no_grad=True) x_grad = torch.from_numpy(x_preprocessed).to(self._classifier.device) x_grad.requires_grad = True inputs_t = x_grad - x_init_preprocessed, _ = self._classifier._apply_preprocessing( # pylint: disable=W0212 - x_init, y=y, fit=False, no_grad=True - ) + x_init_preprocessed, _ = self._classifier._apply_preprocessing(x_init, y=y, fit=False, no_grad=True) x_init = torch.from_numpy(x_init_preprocessed).to(self._classifier.device) x_init.requires_grad = False inputs_init = x_init @@ -1073,7 +1057,7 @@ def _compute_perturbation( raise NotImplementedError("Combination of inputs and preprocessing not supported.") # Check label shape - if self._classifier._reduce_labels: # pylint: disable=W0212 + if self._classifier._reduce_labels: y_preprocessed = self._classifier.reduce_labels(y_preprocessed) if isinstance(y_preprocessed, np.ndarray): @@ -1106,7 +1090,7 @@ def _compute_perturbation( raise ValueError("Gradient term in PyTorch model is `None`.") if not self._classifier.all_framework_preprocessing: - grad = self._classifier._apply_preprocessing_gradient(x, grad) # pylint: disable=W0212 + grad = self._classifier._apply_preprocessing_gradient(x, grad) assert grad.shape == x.shape # Check for nan before normalisation and replace with 0 @@ -1131,7 +1115,7 @@ def _compute_perturbation( return grad def _apply_perturbation( - self, x: "torch.Tensor", perturbation: "torch.Tensor", eps_step: Union[int, float, np.ndarray] + self, x: "torch.Tensor", perturbation: "torch.Tensor", eps_step: int | float | np.ndarray ) -> "torch.Tensor": """ Apply perturbation on examples. @@ -1157,7 +1141,7 @@ def _apply_perturbation( return x def _projection( - self, values: "torch.Tensor", eps: Union[int, float, np.ndarray], norm_p: Union[int, float, str] + self, values: "torch.Tensor", eps: int | float | np.ndarray, norm_p: int | float | str ) -> "torch.Tensor": """ Project `values` on the L_p norm ball of size `eps`. @@ -1209,4 +1193,4 @@ def _projection( values = values_tmp.reshape(values.shape) - return values # pylint: disable=C0302 + return values diff --git a/art/defences/trainer/adversarial_trainer_trades.py b/art/defences/trainer/adversarial_trainer_trades.py index 13d21babc2..74b21d6f95 100644 --- a/art/defences/trainer/adversarial_trainer_trades.py +++ b/art/defences/trainer/adversarial_trainer_trades.py @@ -24,10 +24,10 @@ loss on clean data and KL divergence loss between clean data and adversarial data. Consequently, framework specific implementations are being provided in ART. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import abc -from typing import Optional, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -65,14 +65,14 @@ def __init__( super().__init__(classifier) @abc.abstractmethod - def fit( # pylint: disable=W0221 + def fit( self, x: np.ndarray, y: np.ndarray, - validation_data: Optional[Tuple[np.ndarray, np.ndarray]] = None, + validation_data: tuple[np.ndarray, np.ndarray] | None = None, batch_size: int = 128, nb_epochs: int = 20, - **kwargs + **kwargs, ): """ Train a model adversarially with TRADES. See class documentation for more information on the exact procedure. diff --git a/art/defences/trainer/adversarial_trainer_trades_pytorch.py b/art/defences/trainer/adversarial_trainer_trades_pytorch.py index 3763d571e8..e2a857f24d 100644 --- a/art/defences/trainer/adversarial_trainer_trades_pytorch.py +++ b/art/defences/trainer/adversarial_trainer_trades_pytorch.py @@ -20,11 +20,11 @@ | Paper link: https://proceedings.mlr.press/v97/zhang19p.html """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging import time -from typing import Optional, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import trange @@ -66,12 +66,12 @@ def fit( self, x: np.ndarray, y: np.ndarray, - validation_data: Optional[Tuple[np.ndarray, np.ndarray]] = None, + validation_data: tuple[np.ndarray, np.ndarray] | None = None, batch_size: int = 128, nb_epochs: int = 20, - scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None, - **kwargs - ): # pylint: disable=W0221 + scheduler: "torch.optim.lr_scheduler._LRScheduler" | None = None, + **kwargs, + ): """ Train a model adversarially with TRADES protocol. See class documentation for more information on the exact procedure. @@ -88,10 +88,10 @@ def fit( import torch logger.info("Performing adversarial training with TRADES protocol") - # pylint: disable=W0212 + if (scheduler is not None) and ( not isinstance(scheduler, torch.optim.lr_scheduler._LRScheduler) - ): # pylint: enable=W0212 + ): # pylint: enable=protected-access raise ValueError("Invalid Pytorch scheduler is provided for adversarial training.") nb_batches = int(np.ceil(len(x) / batch_size)) @@ -104,9 +104,7 @@ def fit( (x_test, y_test) = validation_data y_test = check_and_transform_label_format(y_test, nb_classes=self.classifier.nb_classes) - x_preprocessed_test, y_preprocessed_test = self._classifier._apply_preprocessing( # pylint: disable=W0212 - x_test, y_test, fit=True - ) + x_preprocessed_test, y_preprocessed_test = self._classifier._apply_preprocessing(x_test, y_test, fit=True) for i_epoch in trange(nb_epochs, desc="Adversarial Training TRADES - Epochs"): # Shuffle the examples @@ -158,9 +156,9 @@ def fit_generator( self, generator: DataGenerator, nb_epochs: int = 20, - scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None, - **kwargs - ): # pylint: disable=W0221 + scheduler: "torch.optim.lr_scheduler._LRScheduler" | None = None, + **kwargs, + ): """ Train a model adversarially with TRADES protocol using a data generator. See class documentation for more information on the exact procedure. @@ -175,10 +173,9 @@ def fit_generator( logger.info("Performing adversarial training with TRADES protocol") - # pylint: disable=W0212 if (scheduler is not None) and ( not isinstance(scheduler, torch.optim.lr_scheduler._LRScheduler) - ): # pylint: enable=W0212 + ): # pylint: enable=protected-access raise ValueError("Invalid Pytorch scheduler is provided for adversarial training.") size = generator.size @@ -196,7 +193,7 @@ def fit_generator( train_acc = 0.0 train_n = 0.0 - for batch_id in range(nb_batches): # pylint: disable=W0612 + for batch_id in range(nb_batches): # pylint: disable=unused-variable # Create batch data x_batch, y_batch = generator.get_batch() x_batch = x_batch.copy() @@ -219,67 +216,63 @@ def fit_generator( train_acc / train_n, ) - def _batch_process(self, x_batch: np.ndarray, y_batch: np.ndarray) -> Tuple[float, float, float]: + def _batch_process(self, x_batch: np.ndarray, y_batch: np.ndarray) -> tuple[float, float, float]: """ Perform the operations of TRADES for a batch of data. See class documentation for more information on the exact procedure. :param x_batch: batch of x. :param y_batch: batch of y. - :return: tuple containing batch data loss, batch data accuracy and number of samples in the batch + :return: Tuple containing batch data loss, batch data accuracy and number of samples in the batch """ import torch from torch import nn import torch.nn.functional as F - if self._classifier._optimizer is None: # pylint: disable=W0212 + if self._classifier._optimizer is None: raise ValueError("Optimizer of classifier is currently None, but is required for adversarial training.") n = x_batch.shape[0] - self._classifier._model.train(mode=False) # pylint: disable=W0212 + self._classifier._model.train(mode=False) x_batch_pert = self._attack.generate(x_batch, y=y_batch) # Apply preprocessing y_batch = check_and_transform_label_format(y_batch, nb_classes=self.classifier.nb_classes) - x_preprocessed, y_preprocessed = self._classifier._apply_preprocessing( # pylint: disable=W0212 - x_batch, y_batch, fit=True - ) - x_preprocessed_pert, _ = self._classifier._apply_preprocessing( # pylint: disable=W0212 - x_batch_pert, y_batch, fit=True - ) + x_preprocessed, y_preprocessed = self._classifier._apply_preprocessing(x_batch, y_batch, fit=True) + x_preprocessed_pert, _ = self._classifier._apply_preprocessing(x_batch_pert, y_batch, fit=True) # Check label shape - if self._classifier._reduce_labels: # pylint: disable=W0212 + if self._classifier._reduce_labels: y_preprocessed = np.argmax(y_preprocessed, axis=1) - i_batch = torch.from_numpy(x_preprocessed).to(self._classifier._device) # pylint: disable=W0212 - i_batch_pert = torch.from_numpy(x_preprocessed_pert).to(self._classifier._device) # pylint: disable=W0212 - o_batch = torch.from_numpy(y_preprocessed).to(self._classifier._device) # pylint: disable=W0212 + i_batch = torch.from_numpy(x_preprocessed).to(self._classifier._device) + i_batch_pert = torch.from_numpy(x_preprocessed_pert).to(self._classifier._device) + o_batch = torch.from_numpy(y_preprocessed).to(self._classifier._device) - self._classifier._model.train(mode=True) # pylint: disable=W0212 + self._classifier._model.train(mode=True) # Zero the parameter gradients - self._classifier._optimizer.zero_grad() # pylint: disable=W0212 + self._classifier._optimizer.zero_grad() # Perform prediction - model_outputs = self._classifier._model(i_batch) # pylint: disable=W0212 - model_outputs_pert = self._classifier._model(i_batch_pert) # pylint: disable=W0212 + model_outputs = self._classifier._model(i_batch) + model_outputs_pert = self._classifier._model(i_batch_pert) # Form the loss function - loss_clean = self._classifier._loss(model_outputs[-1], o_batch) # pylint: disable=W0212 + loss_clean = self._classifier._loss(model_outputs[-1], o_batch) loss_kl = (1.0 / n) * nn.KLDivLoss(reduction="sum")( F.log_softmax(model_outputs_pert[-1], dim=1), torch.clamp(F.softmax(model_outputs[-1], dim=1), min=EPS) ) loss = loss_clean + self._beta * loss_kl loss.backward() - self._classifier._optimizer.step() # pylint: disable=W0212 + self._classifier._optimizer.step() train_loss = loss.item() * o_batch.size(0) train_acc = (model_outputs_pert[0].max(1)[1] == o_batch).sum().item() train_n = o_batch.size(0) - self._classifier._model.train(mode=False) # pylint: disable=W0212 + self._classifier._model.train(mode=False) return train_loss, train_acc, train_n diff --git a/art/defences/trainer/certified_adversarial_trainer_pytorch.py b/art/defences/trainer/certified_adversarial_trainer_pytorch.py index ab3bac9e36..313f317a5f 100644 --- a/art/defences/trainer/certified_adversarial_trainer_pytorch.py +++ b/art/defences/trainer/certified_adversarial_trainer_pytorch.py @@ -21,6 +21,8 @@ | Paper link: http://proceedings.mlr.press/v80/mirman18b/mirman18b.pdf | Paper link: https://arxiv.org/pdf/1810.12715.pdf """ +from __future__ import annotations + import logging import math import random @@ -36,9 +38,9 @@ from art.utils import check_and_transform_label_format if sys.version_info >= (3, 8): - from typing import TypedDict, List, Optional, Any, Tuple, Union, TYPE_CHECKING + from typing import TypedDict, Any, TYPE_CHECKING else: - from typing import Dict, List, Optional, Any, Tuple, Union, TYPE_CHECKING + from typing import Any, TYPE_CHECKING from functools import reduce if TYPE_CHECKING: @@ -59,7 +61,7 @@ class PGDParamDict(TypedDict): batch_size: int else: - PGDParamDict: Dict[str, Union[int, float]] + PGDParamDict: dict[str, int | float] logger = logging.getLogger(__name__) @@ -99,14 +101,14 @@ class AdversarialTrainerCertifiedPytorch(Trainer): def __init__( self, classifier: "CERTIFIER_TYPE", - nb_epochs: Optional[int] = 20, + nb_epochs: int | None = 20, bound: float = 0.1, loss_weighting: float = 0.1, batch_size: int = 10, use_certification_schedule: bool = True, - certification_schedule: Optional[Any] = None, + certification_schedule: Any | None = None, augment_with_pgd: bool = True, - pgd_params: Optional["PGDParamDict"] = None, + pgd_params: "PGDParamDict" | None = None, ) -> None: """ Create an :class:`.AdversarialTrainerCertified` instance. @@ -166,15 +168,15 @@ def __init__( num_random_init=self.pgd_params["num_random_init"], ) - def fit( # pylint: disable=W0221 + def fit( self, x: np.ndarray, y: np.ndarray, certification_loss: Any = "interval_loss_cce", - batch_size: Optional[int] = None, - nb_epochs: Optional[int] = None, + batch_size: int | None = None, + nb_epochs: int | None = None, training_mode: bool = True, - scheduler: Optional[Any] = None, + scheduler: Any | None = None, verbose: bool = True, **kwargs, ) -> None: @@ -211,7 +213,7 @@ def fit( # pylint: disable=W0221 raise ValueError("Value of `epochs` not defined.") # Set model mode - self.classifier._model.train(mode=training_mode) # pylint: disable=W0212 + self.classifier._model.train(mode=training_mode) if self.classifier.optimizer is None: # pragma: no cover raise ValueError("An optimizer is needed to train the model, but none is provided.") @@ -360,8 +362,8 @@ def fit( # pylint: disable=W0221 loss = certified_loss * self.loss_weighting + non_cert_loss * (1 - self.loss_weighting) # Do training - if self.classifier._use_amp: # pragma: no cover # pylint: disable=W0212 - from apex import amp # pylint: disable=E0611 + if self.classifier._use_amp: # pragma: no cover + from apex import amp with amp.scale_loss(loss, self.classifier.optimizer) as scaled_loss: scaled_loss.backward() @@ -390,7 +392,7 @@ def predict(self, x: np.ndarray, **kwargs) -> np.ndarray: return self.classifier.predict(x, **kwargs) - def predict_zonotopes(self, cent: np.ndarray, bound, **kwargs) -> Tuple[List[np.ndarray], List[np.ndarray]]: + def predict_zonotopes(self, cent: np.ndarray, bound, **kwargs) -> tuple[list[np.ndarray], list[np.ndarray]]: """ Perform prediction using the adversarially trained classifier using zonotopes diff --git a/art/defences/trainer/dp_instahide_trainer.py b/art/defences/trainer/dp_instahide_trainer.py index bc843a339d..bbcffa4b97 100644 --- a/art/defences/trainer/dp_instahide_trainer.py +++ b/art/defences/trainer/dp_instahide_trainer.py @@ -23,12 +23,12 @@ | This training method is dependent to the choice of data augmentation and noise parameters. Consequently, framework specific implementations are being provided in ART. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging import sys import time -from typing import List, Optional, Union, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import trange @@ -60,10 +60,10 @@ class DPInstaHideTrainer(Trainer): def __init__( self, classifier: "CLASSIFIER_LOSS_GRADIENTS_TYPE", - augmentations: Union["Preprocessor", List["Preprocessor"]], + augmentations: "Preprocessor" | list["Preprocessor"], noise: Literal["gaussian", "laplacian", "exponential"] = "laplacian", - loc: Union[int, float] = 0.0, - scale: Union[int, float] = 0.03, + loc: int | float = 0.0, + scale: int | float = 0.03, clip_values: "CLIP_VALUES_TYPE" = (0.0, 1.0), ): """ @@ -104,14 +104,14 @@ def _generate_noise(self, x: np.ndarray) -> np.ndarray: return x_noise.astype(x.dtype) - def fit( # pylint: disable=W0221 + def fit( self, x: np.ndarray, y: np.ndarray, - validation_data: Optional[Tuple[np.ndarray, np.ndarray]] = None, + validation_data: tuple[np.ndarray, np.ndarray] | None = None, batch_size: int = 128, nb_epochs: int = 20, - **kwargs + **kwargs, ): """ Train a model adversarially with the DP-InstaHide protocol. diff --git a/art/defences/trainer/ibp_certified_trainer_pytorch.py b/art/defences/trainer/ibp_certified_trainer_pytorch.py index b25ffdbdfb..ee0bf3db71 100644 --- a/art/defences/trainer/ibp_certified_trainer_pytorch.py +++ b/art/defences/trainer/ibp_certified_trainer_pytorch.py @@ -21,6 +21,8 @@ | Paper link: http://proceedings.mlr.press/v80/mirman18b/mirman18b.pdf | Paper link: https://arxiv.org/pdf/1810.12715.pdf """ +from __future__ import annotations + import logging import random import sys @@ -34,9 +36,9 @@ from art.utils import check_and_transform_label_format if sys.version_info >= (3, 8): - from typing import TypedDict, List, Optional, Any, Union, TYPE_CHECKING + from typing import TypedDict, Any, TYPE_CHECKING else: - from typing import Dict, List, Optional, Any, Union, TYPE_CHECKING + from typing import Any, TYPE_CHECKING if TYPE_CHECKING: from art.utils import IBP_CERTIFIER_TYPE @@ -56,7 +58,7 @@ class PGDParamDict(TypedDict): batch_size: int else: - PGDParamDict: Dict[str, Union[int, float]] + PGDParamDict: dict[str, int | float] logger = logging.getLogger(__name__) @@ -107,16 +109,16 @@ class AdversarialTrainerCertifiedIBPPyTorch(Trainer): def __init__( self, classifier: "IBP_CERTIFIER_TYPE", - nb_epochs: Optional[int] = 20, + nb_epochs: int | None = 20, bound: float = 0.1, batch_size: int = 32, - loss_weighting: Optional[int] = None, + loss_weighting: int | None = None, use_certification_schedule: bool = True, - certification_schedule: Optional[Any] = None, + certification_schedule: Any | None = None, use_loss_weighting_schedule: bool = True, - loss_weighting_schedule: Optional[Any] = None, + loss_weighting_schedule: Any | None = None, augment_with_pgd: bool = False, - pgd_params: Optional["PGDParamDict"] = None, + pgd_params: "PGDParamDict" | None = None, ) -> None: """ Create an :class:`.AdversarialTrainerCertified` instance. @@ -221,16 +223,16 @@ def initialise_default_scheduler(initial_val: float, final_val: float, epochs: i warmup=warm_up, ) - def fit( # pylint: disable=W0221 + def fit( self, x: np.ndarray, y: np.ndarray, - limits: Optional[Union[List[float], np.ndarray]] = None, + limits: list[float] | np.ndarray | None = None, certification_loss: Any = "interval_loss_cce", - batch_size: Optional[int] = None, - nb_epochs: Optional[int] = None, + batch_size: int | None = None, + nb_epochs: int | None = None, training_mode: bool = True, - scheduler: Optional[Any] = None, + scheduler: Any | None = None, verbose: bool = True, **kwargs, ) -> None: @@ -274,7 +276,7 @@ def fit( # pylint: disable=W0221 ) # Set model mode - self.classifier._model.train(mode=training_mode) # pylint: disable=W0212 + self.classifier._model.train(mode=training_mode) if self.classifier.optimizer is None: # pragma: no cover raise ValueError("An optimizer is needed to train the model, but none is provided.") @@ -395,8 +397,8 @@ def fit( # pylint: disable=W0221 loss = certified_loss * loss_weighting_k + non_cert_loss * (1 - loss_weighting_k) # Do training - if self.classifier._use_amp: # pragma: no cover # pylint: disable=W0212 - from apex import amp # pylint: disable=E0611 + if self.classifier._use_amp: # pragma: no cover + from apex import amp with amp.scale_loss(loss, self.classifier.optimizer) as scaled_loss: scaled_loss.backward() @@ -439,8 +441,8 @@ def predict_intervals( self, x: np.ndarray, is_interval: bool = False, - bounds: Optional[Union[float, List[float], np.ndarray]] = None, - limits: Optional[Union[List[float], np.ndarray]] = None, + bounds: float | list[float] | np.ndarray | None = None, + limits: list[float] | np.ndarray | None = None, batch_size: int = 128, **kwargs, ) -> np.ndarray: diff --git a/art/defences/transformer/evasion/defensive_distillation.py b/art/defences/transformer/evasion/defensive_distillation.py index 9ba95cc916..3c2f8eee26 100644 --- a/art/defences/transformer/evasion/defensive_distillation.py +++ b/art/defences/transformer/evasion/defensive_distillation.py @@ -23,7 +23,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Optional, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -90,7 +90,7 @@ def __call__(self, x: np.ndarray, transformed_classifier: "CLASSIFIER_TYPE") -> return transformed_classifier - def fit(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> None: + def fit(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> None: """ No parameters to learn for this method; do nothing. """ diff --git a/art/defences/transformer/poisoning/neural_cleanse.py b/art/defences/transformer/poisoning/neural_cleanse.py index e98e395573..54367e528e 100644 --- a/art/defences/transformer/poisoning/neural_cleanse.py +++ b/art/defences/transformer/poisoning/neural_cleanse.py @@ -20,10 +20,10 @@ | Paper link: http://people.cs.uchicago.edu/~ravenben/publications/abstracts/backdoor-sp19.html """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Optional, TYPE_CHECKING, Union +from typing import TYPE_CHECKING import numpy as np @@ -74,7 +74,7 @@ def __call__( # type: ignore transformed_classifier: "CLASSIFIER_TYPE", steps: int = 1000, init_cost: float = 1e-3, - norm: Union[int, float] = 2, + norm: int | float = 2, learning_rate: float = 0.1, attack_success_threshold: float = 0.99, patience: int = 5, @@ -122,7 +122,7 @@ def __call__( # type: ignore ) return transformed_classifier - def fit(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> None: + def fit(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> None: """ No parameters to learn for this method; do nothing. """ diff --git a/art/defences/transformer/poisoning/strip.py b/art/defences/transformer/poisoning/strip.py index dcd1463c1d..45c7e60502 100644 --- a/art/defences/transformer/poisoning/strip.py +++ b/art/defences/transformer/poisoning/strip.py @@ -23,7 +23,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging -from typing import Optional, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -80,7 +80,7 @@ def __call__( # type: ignore return self.classifier # type: ignore - def fit(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> None: + def fit(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> None: """ No parameters to learn for this method; do nothing. """ diff --git a/art/defences/transformer/transformer.py b/art/defences/transformer/transformer.py index 4400ed1ac0..da6419db5a 100644 --- a/art/defences/transformer/transformer.py +++ b/art/defences/transformer/transformer.py @@ -21,7 +21,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import abc -from typing import List, Optional, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -34,7 +34,7 @@ class Transformer(abc.ABC): Abstract base class for transformation defences. """ - params: List[str] = [] + params: list[str] = [] def __init__(self, classifier: "CLASSIFIER_TYPE") -> None: """ @@ -74,7 +74,7 @@ def __call__(self, x: np.ndarray, transformed_classifier: "CLASSIFIER_TYPE") -> raise NotImplementedError @abc.abstractmethod - def fit(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> None: + def fit(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> None: """ Fit the parameters of the transformer if it has any. diff --git a/art/estimators/certification/abstain.py b/art/estimators/certification/abstain.py index f4fb29db74..78f8b004c6 100644 --- a/art/estimators/certification/abstain.py +++ b/art/estimators/certification/abstain.py @@ -35,13 +35,6 @@ class AbstainPredictorMixin(ClassifierMixin): A mixin class that gives classifiers the ability to abstain """ - def __init__(self, **kwargs): - """ - Creates a predictor that can abstain from predictions - - """ - super().__init__(**kwargs) - def abstain(self) -> np.ndarray: """ Abstain from a prediction diff --git a/art/estimators/certification/deep_z/deep_z.py b/art/estimators/certification/deep_z/deep_z.py index 6efccb2df7..66016a8571 100644 --- a/art/estimators/certification/deep_z/deep_z.py +++ b/art/estimators/certification/deep_z/deep_z.py @@ -20,7 +20,7 @@ | Paper link: https://papers.nips.cc/paper/2018/file/f2f446980d8e971ef3da97af089481c3-Paper.pdf """ -from typing import Tuple, Union +from __future__ import annotations import numpy as np import torch @@ -135,7 +135,7 @@ def certify_via_subtraction( return np.sign(lbs) < 0 and np.sign(ubs) < 0 - def zonotope_get_bounds(self, cent: "torch.Tensor", eps: "torch.Tensor") -> Tuple[list, list]: + def zonotope_get_bounds(self, cent: "torch.Tensor", eps: "torch.Tensor") -> tuple[list, list]: """ Compute the upper and lower bounds for the final zonotopes @@ -157,7 +157,7 @@ def zonotope_get_bounds(self, cent: "torch.Tensor", eps: "torch.Tensor") -> Tupl return upper_bounds_output, lower_bounds_output @staticmethod - def adjust_to_within_bounds(cent: np.ndarray, eps: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + def adjust_to_within_bounds(cent: np.ndarray, eps: np.ndarray) -> tuple[np.ndarray, np.ndarray]: """ Simple helper function to pre-process and adjust zonotope values to be within 0 - 1 range. This is written with image data from MNIST and CIFAR10 in mind using L-infty bounds. @@ -180,7 +180,7 @@ def adjust_to_within_bounds(cent: np.ndarray, eps: np.ndarray) -> Tuple[np.ndarr return cent, eps - def pre_process(self, cent: np.ndarray, eps: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + def pre_process(self, cent: np.ndarray, eps: np.ndarray) -> tuple[np.ndarray, np.ndarray]: """ Simple helper function to reshape and adjust the zonotope values before pushing through the neural network. This is written with image data from MNIST and CIFAR10 in mind using L-infty bounds. @@ -213,10 +213,10 @@ def __init__( self, in_channels: int, out_channels: int, - kernel_size: Union[int, Tuple[int, int]], - stride: Union[int, Tuple[int, int]], - dilation: Union[int, Tuple[int, int]] = 1, - padding: Union[int, Tuple[int, int]] = 0, + kernel_size: int | tuple[int, int], + stride: int | tuple[int, int], + dilation: int | tuple[int, int] = 1, + padding: int | tuple[int, int] = 0, ): super().__init__() self.conv = torch.nn.Conv2d( diff --git a/art/estimators/certification/deep_z/pytorch.py b/art/estimators/certification/deep_z/pytorch.py index d17d2a1e7f..daeae79157 100644 --- a/art/estimators/certification/deep_z/pytorch.py +++ b/art/estimators/certification/deep_z/pytorch.py @@ -20,8 +20,10 @@ | Paper link: https://papers.nips.cc/paper/2018/file/f2f446980d8e971ef3da97af089481c3-Paper.pdf """ +from __future__ import annotations -from typing import List, Optional, Tuple, Union, Callable, Any, TYPE_CHECKING +from collections.abc import Callable +from typing import Any, TYPE_CHECKING import logging import math @@ -49,7 +51,7 @@ class ConvertedModel(torch.nn.Module): which uses abstract operations """ - def __init__(self, model: "torch.nn.Module", channels_first: bool, input_shape: Tuple[int, ...]): + def __init__(self, model: "torch.nn.Module", channels_first: bool, input_shape: tuple[int, ...]): super().__init__() modules = [] self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") @@ -57,7 +59,6 @@ def __init__(self, model: "torch.nn.Module", channels_first: bool, input_shape: self.forward_mode = "abstract" self.reshape_op_num = -1 - # pylint: disable=W0613 def forward_hook(input_module, hook_input, hook_output): modules.append(input_module) @@ -111,8 +112,8 @@ def forward_hook(input_module, hook_input, hook_output): print("Inferred reshape on op num", op_num) def forward( - self, cent: np.ndarray, eps: Optional[np.ndarray] = None - ) -> Union["torch.Tensor", Tuple["torch.Tensor", "torch.Tensor"]]: + self, cent: np.ndarray, eps: np.ndarray | None = None + ) -> "torch.Tensor" | tuple["torch.Tensor" | "torch.Tensor"]: """ Performs the neural network forward pass, either using abstract operations or concrete ones depending on the value of self.forward_mode @@ -130,7 +131,7 @@ def forward( raise ValueError("for abstract forward mode, please provide both cent and eps") raise ValueError("forward_mode must be set to abstract or concrete") - def abstract_forward(self, cent: np.ndarray, eps: np.ndarray) -> Tuple["torch.Tensor", "torch.Tensor"]: + def abstract_forward(self, cent: np.ndarray, eps: np.ndarray) -> tuple["torch.Tensor", "torch.Tensor"]: """ Do the forward pass through the NN with the given error terms and zonotope center. @@ -150,7 +151,7 @@ def abstract_forward(self, cent: np.ndarray, eps: np.ndarray) -> Tuple["torch.Te x = op(x) return x[0, :], x[1:, :] - def concrete_forward(self, in_x: Union[np.ndarray, "torch.Tensor"]) -> "torch.Tensor": + def concrete_forward(self, in_x: np.ndarray | "torch.Tensor") -> "torch.Tensor": """ Do the forward pass using the concrete operations @@ -193,16 +194,16 @@ def __init__( self, model: "torch.nn.Module", loss: "torch.nn.modules.loss._Loss", - input_shape: Tuple[int, ...], + input_shape: tuple[int, ...], nb_classes: int, - optimizer: Optional["torch.optim.Optimizer"] = None, + optimizer: "torch.optim.Optimizer" | None = None, channels_first: bool = True, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), device_type: str = "gpu", - concrete_to_zonotope: Optional[Callable] = None, + concrete_to_zonotope: Callable | None = None, ): """ Create a certifier based on the zonotope domain. @@ -246,7 +247,7 @@ def __init__( converted_model = ConvertedModel(model, channels_first, input_shape) if TYPE_CHECKING: - converted_optimizer: Union[torch.optim.Adam, torch.optim.SGD, None] + converted_optimizer: torch.optim.Adam | torch.optim.SGD | None if optimizer is not None: opt_state_dict = optimizer.state_dict() @@ -279,9 +280,9 @@ def __init__( device_type=device_type, ) - def predict_zonotopes( # pylint: disable=W0613 + def predict_zonotopes( self, cent: np.ndarray, bound: float, training_mode: bool = True, **kwargs - ) -> Tuple[List[np.ndarray], List[np.ndarray]]: + ) -> tuple[list[np.ndarray], list[np.ndarray]]: """ :param cent: The datapoint, representing the zonotope center. @@ -358,7 +359,7 @@ def concrete_loss(self, output: "torch.Tensor", target: "torch.Tensor") -> "torc """ return self._loss(output, target) - def apply_preprocessing(self, x: np.ndarray, y: np.ndarray, fit: bool) -> Tuple[Any, Any]: + def apply_preprocessing(self, x: np.ndarray, y: np.ndarray, fit: bool) -> tuple[Any, Any]: """ Access function to get preprocessing @@ -371,7 +372,7 @@ def apply_preprocessing(self, x: np.ndarray, y: np.ndarray, fit: bool) -> Tuple[ x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=fit) return x_preprocessed, y_preprocessed - def max_logit_loss(self, prediction: "torch.Tensor", target: "torch.Tensor") -> Union["torch.Tensor", None]: + def max_logit_loss(self, prediction: "torch.Tensor", target: "torch.Tensor") -> "torch.Tensor" | None: """ Computes the loss as the largest logit value amongst the incorrect classes. @@ -410,7 +411,7 @@ def interval_loss_cce(prediction: "torch.Tensor", target: "torch.Tensor") -> "to return criterion(ubs, target) @staticmethod - def get_accuracy(preds: Union[np.ndarray, "torch.Tensor"], labels: Union[np.ndarray, "torch.Tensor"]) -> np.ndarray: + def get_accuracy(preds: np.ndarray | "torch.Tensor", labels: np.ndarray | "torch.Tensor") -> np.ndarray: """ Helper function to print out the accuracy during training diff --git a/art/estimators/certification/derandomized_smoothing/ablators/ablate.py b/art/estimators/certification/derandomized_smoothing/ablators/ablate.py index 3970b5b862..32cf1c1f79 100644 --- a/art/estimators/certification/derandomized_smoothing/ablators/ablate.py +++ b/art/estimators/certification/derandomized_smoothing/ablators/ablate.py @@ -18,15 +18,15 @@ """ This module implements the abstract base class for the ablators. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations from abc import ABC, abstractmethod -from typing import Optional, Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np if TYPE_CHECKING: - # pylint: disable=C0412 + import tensorflow as tf import torch @@ -38,7 +38,7 @@ class BaseAblator(ABC): @abstractmethod def __call__( - self, x: np.ndarray, column_pos: Optional[Union[int, list]] = None, row_pos: Optional[Union[int, list]] = None + self, x: np.ndarray, column_pos: int | list | None = None, row_pos: int | list | None = None ) -> np.ndarray: """ Ablate the image x at location specified by "column_pos" for the case of column ablation or at the location @@ -52,8 +52,8 @@ def __call__( @abstractmethod def certify( - self, pred_counts: np.ndarray, size_to_certify: int, label: Union[np.ndarray, "tf.Tensor"] - ) -> Union[Tuple["tf.Tensor", "tf.Tensor", "tf.Tensor"], Tuple["torch.Tensor", "torch.Tensor", "torch.Tensor"]]: + self, pred_counts: np.ndarray, size_to_certify: int, label: np.ndarray | "tf.Tensor" + ) -> tuple["tf.Tensor", "tf.Tensor", "tf.Tensor"] | tuple["torch.Tensor", "torch.Tensor", "torch.Tensor"]: """ Checks if based on the predictions supplied the classifications over the ablated datapoints result in a certified prediction against a patch attack of size size_to_certify. @@ -65,7 +65,7 @@ def certify( raise NotImplementedError @abstractmethod - def ablate(self, x: np.ndarray, column_pos: int, row_pos: int) -> Union[np.ndarray, "torch.Tensor"]: + def ablate(self, x: np.ndarray, column_pos: int, row_pos: int) -> np.ndarray | "torch.Tensor": """ Ablate the image x at location specified by "column_pos" for the case of column ablation or at the location specified by "column_pos" and "row_pos" in the case of block ablation. @@ -78,8 +78,8 @@ def ablate(self, x: np.ndarray, column_pos: int, row_pos: int) -> Union[np.ndarr @abstractmethod def forward( - self, x: np.ndarray, column_pos: Optional[int] = None, row_pos: Optional[int] = None - ) -> Union[np.ndarray, "torch.Tensor"]: + self, x: np.ndarray, column_pos: int | None = None, row_pos: int | None = None + ) -> np.ndarray | "torch.Tensor": """ Ablate batch of data at locations specified by column_pos and row_pos diff --git a/art/estimators/certification/derandomized_smoothing/ablators/pytorch.py b/art/estimators/certification/derandomized_smoothing/ablators/pytorch.py index 1f1ad1aeec..ee57bce8e0 100644 --- a/art/estimators/certification/derandomized_smoothing/ablators/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/ablators/pytorch.py @@ -23,8 +23,8 @@ | Paper link Arxiv version (more detail): https://arxiv.org/pdf/2110.07719.pdf """ +from __future__ import annotations -from typing import Optional, Union, Tuple import random import numpy as np @@ -71,8 +71,8 @@ def __init__( mode: str, to_reshape: bool, ablation_mode: str = "column", - original_shape: Optional[Tuple] = None, - output_shape: Optional[Tuple] = None, + original_shape: tuple | None = None, + output_shape: tuple | None = None, algorithm: str = "salman2021", device_type: str = "gpu", ): @@ -114,9 +114,7 @@ def __init__( if original_shape is not None and output_shape is not None: self.upsample = UpSamplerPyTorch(input_size=original_shape[1], final_size=output_shape[1]) - def ablate( - self, x: Union[torch.Tensor, np.ndarray], column_pos: int, row_pos: Optional[int] = None - ) -> torch.Tensor: + def ablate(self, x: torch.Tensor | np.ndarray, column_pos: int, row_pos: int | None = None) -> torch.Tensor: """ Ablates the input column wise @@ -138,9 +136,7 @@ def ablate( x[:, :, :, column_pos + k :] = 0.0 return x - def forward( - self, x: Union[torch.Tensor, np.ndarray], column_pos: Optional[int] = None, row_pos=None - ) -> torch.Tensor: + def forward(self, x: torch.Tensor | np.ndarray, column_pos: int | None = None, row_pos=None) -> torch.Tensor: """ Forward pass though the ablator. We insert a new channel to keep track of the ablation location. @@ -187,10 +183,10 @@ def forward( def certify( self, - pred_counts: Union[torch.Tensor, np.ndarray], + pred_counts: torch.Tensor | np.ndarray, size_to_certify: int, - label: Union[torch.Tensor, np.ndarray], - ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + label: torch.Tensor | np.ndarray, + ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Performs certification of the predictions @@ -245,8 +241,8 @@ def __init__( channels_first: bool, mode: str, to_reshape: bool, - original_shape: Optional[Tuple] = None, - output_shape: Optional[Tuple] = None, + original_shape: tuple | None = None, + output_shape: tuple | None = None, algorithm: str = "salman2021", device_type: str = "gpu", ): @@ -286,7 +282,7 @@ def __init__( if original_shape is not None and output_shape is not None: self.upsample = UpSamplerPyTorch(input_size=original_shape[1], final_size=output_shape[1]) - def ablate(self, x: Union[torch.Tensor, np.ndarray], column_pos: int, row_pos: int) -> torch.Tensor: + def ablate(self, x: torch.Tensor | np.ndarray, column_pos: int, row_pos: int) -> torch.Tensor: """ Ablates the input block wise @@ -316,7 +312,7 @@ def ablate(self, x: Union[torch.Tensor, np.ndarray], column_pos: int, row_pos: i return x def forward( - self, x: Union[torch.Tensor, np.ndarray], column_pos: Optional[int] = None, row_pos: Optional[int] = None + self, x: torch.Tensor | np.ndarray, column_pos: int | None = None, row_pos: int | None = None ) -> torch.Tensor: """ Forward pass though the ablator. We insert a new channel to keep track of the ablation location. @@ -355,10 +351,10 @@ def forward( def certify( self, - pred_counts: Union[torch.Tensor, np.ndarray], + pred_counts: torch.Tensor | np.ndarray, size_to_certify: int, - label: Union[torch.Tensor, np.ndarray], - ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + label: torch.Tensor | np.ndarray, + ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Performs certification of the predictions diff --git a/art/estimators/certification/derandomized_smoothing/ablators/tensorflow.py b/art/estimators/certification/derandomized_smoothing/ablators/tensorflow.py index e4b927358e..68a4663cd5 100644 --- a/art/estimators/certification/derandomized_smoothing/ablators/tensorflow.py +++ b/art/estimators/certification/derandomized_smoothing/ablators/tensorflow.py @@ -21,9 +21,9 @@ | Paper link: https://arxiv.org/abs/2002.10733 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations -from typing import Optional, Union, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import random import numpy as np @@ -31,7 +31,7 @@ from art.estimators.certification.derandomized_smoothing.ablators.ablate import BaseAblator if TYPE_CHECKING: - # pylint: disable=C0412 + import tensorflow as tf @@ -54,7 +54,7 @@ def __init__(self, ablation_size: int, channels_first: bool, row_ablation_mode: self.row_ablation_mode = row_ablation_mode def __call__( - self, x: np.ndarray, column_pos: Optional[Union[int, list]] = None, row_pos: Optional[Union[int, list]] = None + self, x: np.ndarray, column_pos: int | list | None = None, row_pos: int | list | None = None ) -> np.ndarray: """ Performs ablation on the input x. If no column_pos is specified a random location will be selected. @@ -71,8 +71,8 @@ def __call__( return self.forward(x=x, column_pos=column_pos) def certify( - self, pred_counts: "tf.Tensor", size_to_certify: int, label: Union[np.ndarray, "tf.Tensor"] - ) -> Tuple["tf.Tensor", "tf.Tensor", "tf.Tensor"]: + self, pred_counts: "tf.Tensor", size_to_certify: int, label: np.ndarray | "tf.Tensor" + ) -> tuple["tf.Tensor", "tf.Tensor", "tf.Tensor"]: """ Checks if based on the predictions supplied the classifications over the ablated datapoints result in a certified prediction against a patch attack of size size_to_certify. @@ -98,22 +98,17 @@ def certify( ) & (top_predicted_class < second_predicted_class) cert = tf.math.logical_or(certs, tie_break_certs) - # NB, newer versions of pylint do not require the disable. + # NB, newer versions of pylint do not require this disabled. if label.ndim > 1: cert_and_correct = cert & ( - tf.math.argmax(label, axis=1) - == tf.cast( # pylint: disable=E1120, E1123 - top_predicted_class, dtype=tf.math.argmax(label, axis=1).dtype - ) + tf.math.argmax(label, axis=1) == tf.cast(top_predicted_class, dtype=tf.math.argmax(label, axis=1).dtype) ) else: - cert_and_correct = cert & ( - label == tf.cast(top_predicted_class, dtype=label.dtype) # pylint: disable=E1120, E1123 - ) + cert_and_correct = cert & (label == tf.cast(top_predicted_class, dtype=label.dtype)) return cert, cert_and_correct, top_predicted_class - def ablate(self, x: np.ndarray, column_pos: int, row_pos: Optional[int] = None) -> np.ndarray: + def ablate(self, x: np.ndarray, column_pos: int, row_pos: int | None = None) -> np.ndarray: """ Ablates the image only retaining a column starting at "pos" of width "self.ablation_size" @@ -142,7 +137,7 @@ def ablate(self, x: np.ndarray, column_pos: int, row_pos: Optional[int] = None) return x def forward( - self, x: np.ndarray, column_pos: Optional[Union[int, list]] = None, row_pos: Optional[Union[int, list]] = None + self, x: np.ndarray, column_pos: int | list | None = None, row_pos: int | list | None = None ) -> np.ndarray: """ Performs ablation on the input x. If no column_pos is specified a random location will be selected. @@ -195,7 +190,7 @@ def __init__(self, ablation_size: int, channels_first: bool): self.channels_first = channels_first def __call__( - self, x: np.ndarray, column_pos: Optional[Union[int, list]] = None, row_pos: Optional[Union[int, list]] = None + self, x: np.ndarray, column_pos: int | list | None = None, row_pos: int | list | None = None ) -> np.ndarray: """ Performs ablation on the input x. If no row_pos/column_pos is specified a random location will be selected. @@ -212,8 +207,8 @@ def __call__( return self.forward(x=x, row_pos=row_pos, column_pos=column_pos) def certify( - self, pred_counts: Union["tf.Tensor", np.ndarray], size_to_certify: int, label: Union[np.ndarray, "tf.Tensor"] - ) -> Tuple["tf.Tensor", "tf.Tensor", "tf.Tensor"]: + self, pred_counts: "tf.Tensor" | np.ndarray, size_to_certify: int, label: np.ndarray | "tf.Tensor" + ) -> tuple["tf.Tensor", "tf.Tensor", "tf.Tensor"]: """ Checks if based on the predictions supplied the classifications over the ablated datapoints result in a certified prediction against a patch attack of size size_to_certify. @@ -238,26 +233,21 @@ def certify( ) & (top_predicted_class < second_predicted_class) cert = tf.math.logical_or(certs, tie_break_certs) - # NB, newer versions of pylint do not require the disable. + # NB, newer versions of pylint do not require this disabled. if label.ndim > 1: cert_and_correct = cert & ( - tf.math.argmax(label, axis=1) - == tf.cast( # pylint: disable=E1120, E1123 - top_predicted_class, dtype=tf.math.argmax(label, axis=1).dtype - ) + tf.math.argmax(label, axis=1) == tf.cast(top_predicted_class, dtype=tf.math.argmax(label, axis=1).dtype) ) else: - cert_and_correct = cert & ( - label == tf.cast(top_predicted_class, dtype=label.dtype) # pylint: disable=E1120, E1123 - ) + cert_and_correct = cert & (label == tf.cast(top_predicted_class, dtype=label.dtype)) return cert, cert_and_correct, top_predicted_class def forward( self, x: np.ndarray, - column_pos: Optional[Union[int, list]] = None, - row_pos: Optional[Union[int, list]] = None, + column_pos: int | list | None = None, + row_pos: int | list | None = None, ) -> np.ndarray: """ Performs ablation on the input x. If no column_pos/row_pos are specified a random location will be selected. diff --git a/art/estimators/certification/derandomized_smoothing/pytorch.py b/art/estimators/certification/derandomized_smoothing/pytorch.py index f47a7cd145..d250d64fbe 100644 --- a/art/estimators/certification/derandomized_smoothing/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/pytorch.py @@ -32,10 +32,10 @@ | Paper link Arxiv version (more detail): https://arxiv.org/pdf/2110.07719.pdf """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import importlib import logging -from typing import List, Optional, Tuple, Union, Any, TYPE_CHECKING +from typing import Any, TYPE_CHECKING import random import numpy as np @@ -46,7 +46,7 @@ from art.utils import check_and_transform_label_format if TYPE_CHECKING: - # pylint: disable=C0412 + import torch import torchvision from timm.models.vision_transformer import VisionTransformer @@ -71,24 +71,24 @@ class PyTorchDeRandomizedSmoothing(DeRandomizedSmoothingMixin, PyTorchClassifier def __init__( self, - model: Union[str, "VisionTransformer", "torch.nn.Module"], + model: str | "VisionTransformer" | "torch.nn.Module", loss: "torch.nn.modules.loss._Loss", - input_shape: Tuple[int, ...], + input_shape: tuple[int, ...], nb_classes: int, ablation_size: int, algorithm: str = "salman2021", ablation_type: str = "column", - replace_last_layer: Optional[bool] = None, + replace_last_layer: bool | None = None, drop_tokens: bool = True, load_pretrained: bool = True, - optimizer: Union[type, "torch.optim.Optimizer", None] = None, - optimizer_params: Optional[dict] = None, + optimizer: "torch.optim.Optimizer" | None = None, + optimizer_params: dict | None = None, channels_first: bool = True, - threshold: Optional[float] = None, - logits: Optional[bool] = True, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + threshold: float | None = None, + logits: bool | None = True, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), device_type: str = "gpu", verbose: bool = True, @@ -189,7 +189,7 @@ def __init__( if not isinstance(optimizer, torch.optim.Optimizer): raise ValueError("Optimizer error: must be a torch.optim.Optimizer instance") - converted_optimizer: Union[torch.optim.Adam, torch.optim.SGD] + converted_optimizer: torch.optim.Adam | torch.optim.SGD opt_state_dict = optimizer.state_dict() if isinstance(optimizer, torch.optim.Adam): logging.info("Converting Adam Optimiser") @@ -276,7 +276,7 @@ def __init__( ) if TYPE_CHECKING: - self.ablator: Union[ColumnAblatorPyTorch, BlockAblatorPyTorch] + self.ablator: ColumnAblatorPyTorch | BlockAblatorPyTorch if self.mode is None: raise ValueError("Model type not recognized.") @@ -308,7 +308,7 @@ def __init__( raise ValueError(f"ablation_type of {ablation_type} not recognized. Must be either column, row, or block") @classmethod - def get_models(cls, generate_from_null: bool = False) -> List[str]: + def get_models(cls, generate_from_null: bool = False) -> list[str]: """ Return the supported model names to the user. @@ -375,7 +375,7 @@ def get_models(cls, generate_from_null: bool = False) -> List[str]: pbar = tqdm(models) # store in case not re-assigned in the model creation due to unsuccessful creation - tmp_func = timm.models.vision_transformer._create_vision_transformer # pylint: disable=W0212 + tmp_func = timm.models.vision_transformer._create_vision_transformer for model in pbar: pbar.set_description(f"Testing {model} creation") @@ -395,7 +395,7 @@ def get_models(cls, generate_from_null: bool = False) -> List[str]: supported.append(model) except (TypeError, AttributeError): unsupported.append(model) - timm.models.vision_transformer._create_vision_transformer = tmp_func # pylint: disable=W0212 + timm.models.vision_transformer._create_vision_transformer = tmp_func if supported != supported_models: logger.warning( @@ -429,7 +429,7 @@ def create_vision_transformer(variant: str, pretrained: bool = False, **kwargs) **kwargs, ) - def fit( # pylint: disable=W0221 + def fit( self, x: np.ndarray, y: np.ndarray, @@ -437,11 +437,11 @@ def fit( # pylint: disable=W0221 nb_epochs: int = 10, training_mode: bool = True, drop_last: bool = False, - scheduler: Optional[Any] = None, + scheduler: Any | None = None, verbose: bool = False, update_batchnorm: bool = True, batchnorm_update_epochs: int = 1, - transform: Optional["torchvision.transforms.transforms.Compose"] = None, + transform: "torchvision.transforms.transforms.Compose" | None = None, **kwargs, ) -> None: """ @@ -531,7 +531,7 @@ def fit( # pylint: disable=W0221 # Do training if self._use_amp: # pragma: no cover - from apex import amp # pylint: disable=E0611 + from apex import amp with amp.scale_loss(loss, self._optimizer) as scaled_loss: scaled_loss.backward() @@ -555,7 +555,7 @@ def fit( # pylint: disable=W0221 scheduler.step() @staticmethod - def get_accuracy(preds: Union[np.ndarray, "torch.Tensor"], labels: Union[np.ndarray, "torch.Tensor"]) -> np.ndarray: + def get_accuracy(preds: np.ndarray | "torch.Tensor", labels: np.ndarray | "torch.Tensor") -> np.ndarray: """ Helper function to get the accuracy during training. @@ -604,7 +604,7 @@ def eval_and_certify( size_to_certify: int, batch_size: int = 128, verbose: bool = True, - ) -> Tuple["torch.Tensor", "torch.Tensor"]: + ) -> tuple["torch.Tensor", "torch.Tensor"]: """ Evaluates the ViT's normal and certified performance over the supplied data. @@ -684,7 +684,7 @@ def eval_and_certify( return (accuracy / n_samples), (cert_sum / n_samples) def _predict_classifier( - self, x: Union[np.ndarray, "torch.Tensor"], batch_size: int, training_mode: bool, **kwargs + self, x: np.ndarray | "torch.Tensor", batch_size: int, training_mode: bool, **kwargs ) -> np.ndarray: import torch diff --git a/art/estimators/certification/derandomized_smoothing/tensorflow.py b/art/estimators/certification/derandomized_smoothing/tensorflow.py index e99154198b..299e45e8a9 100644 --- a/art/estimators/certification/derandomized_smoothing/tensorflow.py +++ b/art/estimators/certification/derandomized_smoothing/tensorflow.py @@ -20,10 +20,11 @@ | Paper link: https://arxiv.org/abs/2002.10733 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations +from collections.abc import Callable import logging -from typing import Callable, List, Optional, Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from tqdm import tqdm @@ -33,7 +34,7 @@ from art.utils import check_and_transform_label_format if TYPE_CHECKING: - # pylint: disable=C0412 + import tensorflow as tf from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE, ABLATOR_TYPE from art.defences.preprocessor import Preprocessor @@ -65,14 +66,14 @@ def __init__( ablation_size: int, threshold: float, logits: bool, - input_shape: Tuple[int, ...], - loss_object: Optional["tf.Tensor"] = None, - optimizer: Optional["tf.keras.optimizers.legacy.Optimizer"] = None, - train_step: Optional[Callable] = None, + input_shape: tuple[int, ...], + loss_object: "tf.Tensor" | None = None, + optimizer: "tf.keras.optimizers.legacy.Optimizer" | None = None, + train_step: Callable | None = None, channels_first: bool = False, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), ): """ @@ -132,7 +133,7 @@ def __init__( ) if TYPE_CHECKING: - self.ablator: ABLATOR_TYPE # pylint: disable=used-before-assignment + self.ablator: ABLATOR_TYPE if self.ablation_type in {"column", "row"}: row_ablation_mode = self.ablation_type == "row" @@ -154,7 +155,7 @@ def _predict_classifier(self, x: np.ndarray, batch_size: int, training_mode: boo outputs = tf.nn.softmax(outputs) return np.asarray(outputs >= self.threshold).astype(int) - def fit( # pylint: disable=W0221 + def fit( self, x: np.ndarray, y: np.ndarray, @@ -303,7 +304,7 @@ def eval_and_certify( size_to_certify: int, batch_size: int = 128, verbose: bool = True, - ) -> Tuple["tf.Tensor", "tf.Tensor"]: + ) -> tuple["tf.Tensor", "tf.Tensor"]: """ Evaluates the normal and certified performance over the supplied data. diff --git a/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py b/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py index 48f96eefab..8c03388274 100644 --- a/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/vision_transformers/pytorch.py @@ -45,7 +45,7 @@ """ Implements functionality for running Vision Transformers in ART """ -from typing import Optional +from __future__ import annotations import torch from timm.models.vision_transformer import VisionTransformer @@ -72,16 +72,16 @@ def __init__(self, patch_size: int = 16, in_channels: int = 1, embed_dim: int = self.patch_size = patch_size self.in_channels = in_channels self.embed_dim = embed_dim - self.proj: Optional[torch.nn.Conv2d] = None + self.proj: torch.nn.Conv2d | None = None - def create(self, patch_size=None, embed_dim=None, device="cpu", **kwargs) -> None: # pylint: disable=W0613 + def create(self, patch_size=None, embed_dim=None, device="cpu", **kwargs) -> None: """ Creates a convolution that mimics the embedding layer to be used for the ablation mask to track where the image was ablated. :param patch_size: The patch size used by the ViT. :param embed_dim: The embedding dimension used by the ViT. - :param device: Which device to set the emdedding layer to. + :param device: Which device to set the embedding layer to. :param kwargs: Handles the remaining kwargs from the ViT configuration. """ diff --git a/art/estimators/certification/interval/interval.py b/art/estimators/certification/interval/interval.py index f4309f4bca..15b2d690c4 100644 --- a/art/estimators/certification/interval/interval.py +++ b/art/estimators/certification/interval/interval.py @@ -20,7 +20,7 @@ | Paper link: https://ieeexplore.ieee.org/document/8418593 """ -from typing import List, Union, Tuple, Optional +from __future__ import annotations import torch import numpy as np @@ -79,15 +79,15 @@ def __init__( self, in_channels: int, out_channels: int, - kernel_size: Union[int, Tuple[int, int]], - input_shape: Tuple[int, ...], - device: Union[str, "torch.device"], - stride: Union[int, Tuple[int, int]] = 1, - padding: Union[int, Tuple[int, int]] = 0, - dilation: Union[int, Tuple[int, int]] = 0, + kernel_size: int | tuple[int, int], + input_shape: tuple[int, ...], + device: str | "torch.device", + stride: int | tuple[int, int] = 1, + padding: int | tuple[int, int] = 0, + dilation: int | tuple[int, int] = 0, bias: bool = True, - supplied_input_weights: Union[None, "torch.Tensor"] = None, - supplied_input_bias: Union[None, "torch.Tensor"] = None, + supplied_input_weights: "torch.Tensor" | None = None, + supplied_input_bias: "torch.Tensor" | None = None, to_debug: bool = False, ): """ @@ -114,7 +114,7 @@ def __init__( self.stride = stride self.device = device self.include_bias = bias - self.cnn: Optional["torch.nn.Conv2d"] = None + self.cnn: "torch.nn.Conv2d" | None = None super().__init__() self.conv = torch.nn.Conv2d( @@ -191,7 +191,7 @@ def __init__( if self.bias is not None: self.bias = self.bias.to(device) - def re_convert(self, device: Union[str, "torch.device"]) -> None: + def re_convert(self, device: str | "torch.device") -> None: """ Re converts the weights into a dense equivalent layer. Must be called after every backwards if multiple gradients wish to be taken (like for crafting pgd). @@ -201,7 +201,7 @@ def re_convert(self, device: Union[str, "torch.device"]) -> None: if self.bias is not None: self.bias = self.bias.to(device) - def convert_to_dense(self, device: Union[str, "torch.device"]) -> Tuple["torch.Tensor", "torch.Tensor"]: + def convert_to_dense(self, device: str | "torch.device") -> tuple["torch.Tensor", "torch.Tensor"]: """ Converts the initialised convolutional layer into an equivalent dense layer. @@ -356,9 +356,6 @@ class PyTorchIntervalFlatten(torch.nn.Module): Layer to handle flattening on both interval and concrete data """ - def __init__(self): - super().__init__() - def __call__(self, x: "torch.Tensor") -> "torch.Tensor": return self.concrete_forward(x) @@ -444,8 +441,8 @@ def certify(preds: np.ndarray, labels: np.ndarray) -> np.ndarray: @staticmethod def concrete_to_interval( x: np.ndarray, - bounds: Union[float, List[float], np.ndarray], - limits: Optional[Union[List[float], np.ndarray]] = None, + bounds: float | list[float] | np.ndarray, + limits: list[float] | np.ndarray | None = None, ) -> np.ndarray: """ Helper function converts a datapoint it into its interval representation diff --git a/art/estimators/certification/interval/pytorch.py b/art/estimators/certification/interval/pytorch.py index 45758684ea..3a7f63b2ae 100644 --- a/art/estimators/certification/interval/pytorch.py +++ b/art/estimators/certification/interval/pytorch.py @@ -20,8 +20,10 @@ | Paper link: https://ieeexplore.ieee.org/document/8418593 """ +from __future__ import annotations -from typing import List, Optional, Tuple, Union, Callable, Any, TYPE_CHECKING +from collections.abc import Callable +from typing import Any, TYPE_CHECKING import logging import warnings @@ -50,16 +52,15 @@ class ConvertedModel(torch.nn.Module): which uses abstract operations """ - def __init__(self, model: "torch.nn.Module", channels_first: bool, input_shape: Tuple[int, ...]): + def __init__(self, model: "torch.nn.Module", channels_first: bool, input_shape: tuple[int, ...]): super().__init__() modules = [] - self.interim_shapes: List[Tuple] = [] + self.interim_shapes: list[tuple] = [] self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.forward_mode: str self.forward_mode = "abstract" self.reshape_op_num = -1 - # pylint: disable=W0613 def forward_hook(input_module, hook_input, hook_output): modules.append(input_module) self.interim_shapes.append(tuple(hook_input[0].shape)) @@ -160,7 +161,7 @@ def abstract_forward(self, x_interval: np.ndarray) -> "torch.Tensor": x = op.forward(x) return x - def concrete_forward(self, in_x: Union[np.ndarray, "torch.Tensor"]) -> "torch.Tensor": + def concrete_forward(self, in_x: np.ndarray | "torch.Tensor") -> "torch.Tensor": """ Do the forward pass using the concrete operations @@ -221,16 +222,16 @@ def __init__( self, model: "torch.nn.Module", loss: "torch.nn.modules.loss._Loss", - input_shape: Tuple[int, ...], + input_shape: tuple[int, ...], nb_classes: int, - optimizer: Optional["torch.optim.Optimizer"] = None, + optimizer: "torch.optim.Optimizer" | None = None, channels_first: bool = True, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), device_type: str = "gpu", - concrete_to_interval: Optional[Callable] = None, + concrete_to_interval: Callable | None = None, ): """ Create a certifier based on the interval (also called box) domain. @@ -272,7 +273,7 @@ def __init__( converted_model = ConvertedModel(model, channels_first, input_shape) if TYPE_CHECKING: - converted_optimizer: Union[torch.optim.Adam, torch.optim.SGD, None] + converted_optimizer: torch.optim.Adam | torch.optim.SGD | None if optimizer is not None: opt_state_dict = optimizer.state_dict() if isinstance(optimizer, torch.optim.Adam): @@ -304,15 +305,15 @@ def __init__( device_type=device_type, ) - def predict_intervals( # pylint: disable=W0613 + def predict_intervals( self, x: np.ndarray, is_interval: bool = False, - bounds: Optional[Union[float, List[float], np.ndarray]] = None, - limits: Optional[Union[List[float], np.ndarray]] = None, + bounds: float | list[float] | np.ndarray | None = None, + limits: list[float] | np.ndarray | None = None, batch_size: int = 128, training_mode: bool = False, - **kwargs + **kwargs, ) -> np.ndarray: """ Produce interval predictions over the supplied data @@ -365,7 +366,7 @@ def predict_intervals( # pylint: disable=W0613 return torch.concat(interval_predictions, dim=0).cpu().detach().numpy() - def apply_preprocessing(self, x: np.ndarray, y: np.ndarray, fit: bool) -> Tuple[Any, Any]: + def apply_preprocessing(self, x: np.ndarray, y: np.ndarray, fit: bool) -> tuple[Any, Any]: """ Access function to get preprocessing @@ -379,7 +380,7 @@ def apply_preprocessing(self, x: np.ndarray, y: np.ndarray, fit: bool) -> Tuple[ return x_preprocessed, y_preprocessed @staticmethod - def get_accuracy(preds: Union[np.ndarray, "torch.Tensor"], labels: Union[np.ndarray, "torch.Tensor"]) -> np.ndarray: + def get_accuracy(preds: np.ndarray | "torch.Tensor", labels: np.ndarray | "torch.Tensor") -> np.ndarray: """ Helper function to print out the accuracy during training diff --git a/art/estimators/certification/object_seeker/object_seeker.py b/art/estimators/certification/object_seeker/object_seeker.py index 0f63588793..5af59852af 100644 --- a/art/estimators/certification/object_seeker/object_seeker.py +++ b/art/estimators/certification/object_seeker/object_seeker.py @@ -46,7 +46,6 @@ import abc import logging -from typing import Dict, List, Tuple import numpy as np from sklearn.cluster import DBSCAN @@ -95,7 +94,7 @@ def __init__( self.verbose = verbose @abc.abstractmethod - def _image_dimensions(self) -> Tuple[int, int]: + def _image_dimensions(self) -> tuple[int, int]: """ Get the height and width of a sample input image. @@ -106,7 +105,7 @@ def _image_dimensions(self) -> Tuple[int, int]: @abc.abstractmethod def _masked_predictions( self, x_i: np.ndarray, batch_size: int = 128, **kwargs - ) -> Tuple[Dict[str, np.ndarray], Dict[str, np.ndarray]]: + ) -> tuple[dict[str, np.ndarray], dict[str, np.ndarray]]: """ Create masked copies of the image for each of lines following the ObjectSeeker algorithm. Then creates predictions on the base unmasked image and each of the masked image. @@ -118,8 +117,8 @@ def _masked_predictions( raise NotImplementedError def _prune_boxes( - self, masked_preds: Dict[str, np.ndarray], base_preds: Dict[str, np.ndarray] - ) -> Dict[str, np.ndarray]: + self, masked_preds: dict[str, np.ndarray], base_preds: dict[str, np.ndarray] + ) -> dict[str, np.ndarray]: """ Remove bounding boxes from the masked predictions of a single image based on the IoA score with the boxes on the base unmasked predictions. @@ -155,7 +154,7 @@ def _prune_boxes( } return pruned_preds - def _unionize_clusters(self, masked_preds: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: + def _unionize_clusters(self, masked_preds: dict[str, np.ndarray]) -> dict[str, np.ndarray]: """ Cluster the bounding boxes for the pruned masked predictions. @@ -217,13 +216,13 @@ def _unionize_clusters(self, masked_preds: Dict[str, np.ndarray]) -> Dict[str, n } return unionized_predictions - def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> List[Dict[str, np.ndarray]]: + def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> list[dict[str, np.ndarray]]: """ Perform prediction for a batch of inputs. :param x: Samples of shape NCHW or NHWC. :param batch_size: Batch size. - :return: Predictions of format `List[Dict[str, np.ndarray]]`, one for each input image. The fields of the Dict + :return: Predictions of format `list[dict[str, np.ndarray]]`, one for each input image. The fields of the dict are as follows: - boxes [N, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. @@ -253,7 +252,7 @@ def certify( patch_size: float = 0.01, offset: float = 0.1, batch_size: int = 128, - ) -> List[np.ndarray]: + ) -> list[np.ndarray]: """ Checks if there is certifiable IoA robustness for each predicted bounding box. @@ -272,7 +271,7 @@ def certify( # Get predictions predictions = self.predict(x, batch_size=batch_size) - certifications: List[np.ndarray] = [] + certifications: list[np.ndarray] = [] for pred in tqdm(predictions, desc="ObjectSeeker", disable=not self.verbose): boxes = pred["boxes"] diff --git a/art/estimators/certification/object_seeker/pytorch.py b/art/estimators/certification/object_seeker/pytorch.py index b43def0866..3e049cff6f 100644 --- a/art/estimators/certification/object_seeker/pytorch.py +++ b/art/estimators/certification/object_seeker/pytorch.py @@ -20,11 +20,11 @@ | Paper link: https://arxiv.org/abs/2202.01811 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging import sys -from typing import List, Dict, Optional, Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -43,7 +43,7 @@ from typing_extensions import Literal if TYPE_CHECKING: - # pylint: disable=C0412 + import torch from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE @@ -73,14 +73,14 @@ class PyTorchObjectSeeker(ObjectSeekerMixin, PyTorchObjectDetector): def __init__( self, model: "torch.nn.Module", - input_shape: Tuple[int, ...] = (3, 416, 416), - optimizer: Optional["torch.optim.Optimizer"] = None, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, + input_shape: tuple[int, ...] = (3, 416, 416), + optimizer: "torch.optim.Optimizer" | None = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, channels_first: bool = True, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = None, - attack_losses: Tuple[str, ...] = ( + attack_losses: tuple[str, ...] = ( "loss_classifier", "loss_box_reg", "loss_objectness", @@ -98,8 +98,8 @@ def __init__( """ Create an ObjectSeeker classifier. - :param model: Object detection model. The output of the model is `List[Dict[str, torch.Tensor]]`, - one for each input image. The fields of the Dict are as follows: + :param model: Object detection model. The output of the model is `list[dict[str, torch.Tensor]]`, + one for each input image. The fields of the dict are as follows: - boxes [N, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. @@ -174,7 +174,7 @@ def __init__( device_type=device_type, ) - def _image_dimensions(self) -> Tuple[int, int]: + def _image_dimensions(self) -> tuple[int, int]: """ Return the height and width of a sample input image. @@ -189,7 +189,7 @@ def _image_dimensions(self) -> Tuple[int, int]: def _masked_predictions( self, x_i: np.ndarray, batch_size: int = 128, **kwargs - ) -> Tuple[Dict[str, np.ndarray], Dict[str, np.ndarray]]: + ) -> tuple[dict[str, np.ndarray], dict[str, np.ndarray]]: """ Create masked copies of the image for each of lines following the ObjectSeeker algorithm. Then creates predictions on the base unmasked image and each of the masked image. @@ -263,13 +263,13 @@ def _masked_predictions( return base_predictions, masked_predictions - def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> List[Dict[str, np.ndarray]]: + def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> list[dict[str, np.ndarray]]: """ Perform prediction for a batch of inputs. :param x: Samples of shape NCHW or NHWC. :param batch_size: Batch size. - :return: Predictions of format `List[Dict[str, np.ndarray]]`, one for each input image. The fields of the Dict + :return: Predictions of format `list[dict[str, np.ndarray]]`, one for each input image. The fields of the dict are as follows: - boxes [N, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. @@ -278,22 +278,22 @@ def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> List[Dict[s """ return ObjectSeekerMixin.predict(self, x=x, batch_size=batch_size, **kwargs) - def fit( # pylint: disable=W0221 + def fit( self, x: np.ndarray, - y: List[Dict[str, Union[np.ndarray, "torch.Tensor"]]], + y: list[dict[str, np.ndarray | "torch.Tensor"]], batch_size: int = 128, nb_epochs: int = 10, drop_last: bool = False, - scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None, + scheduler: "torch.optim.lr_scheduler._LRScheduler" | None = None, **kwargs, ) -> None: """ Fit the classifier on the training set `(x, y)`. :param x: Samples of shape NCHW or NHWC. - :param y: Target values of format `List[Dict[str, Union[np.ndarray, torch.Tensor]]]`, one for each input image. - The fields of the Dict are as follows: + :param y: Target values of format `list[dict[str, np.ndarray | torch.Tensor]]`, one for each input image. + The fields of the dict are as follows: - boxes [N, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. - labels [N]: the labels for each image. @@ -316,9 +316,7 @@ def fit( # pylint: disable=W0221 **kwargs, ) - def get_activations( - self, x: np.ndarray, layer: Union[int, str], batch_size: int, framework: bool = False - ) -> np.ndarray: + def get_activations(self, x: np.ndarray, layer: int | str, batch_size: int, framework: bool = False) -> np.ndarray: """ Return the output of the specified layer for input `x`. `layer` is specified by layer index (between 0 and `nb_layers - 1`) or by name. The number of layers can be determined by counting the results returned by @@ -337,15 +335,15 @@ def get_activations( framework=framework, ) - def loss_gradient( # pylint: disable=W0613 - self, x: Union[np.ndarray, "torch.Tensor"], y: List[Dict[str, Union[np.ndarray, "torch.Tensor"]]], **kwargs + def loss_gradient( + self, x: np.ndarray | "torch.Tensor", y: list[dict[str, np.ndarray | "torch.Tensor"]], **kwargs ) -> np.ndarray: """ Compute the gradient of the loss function w.r.t. `x`. :param x: Samples of shape NCHW or NHWC. - :param y: Target values of format `List[Dict[str, Union[np.ndarray, torch.Tensor]]]`, one for each input image. - The fields of the Dict are as follows: + :param y: Target values of format `list[dict[str, np.ndarray | torch.Tensor]]`, one for each input image. + The fields of the dict are as follows: - boxes [N, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. - labels [N]: the labels for each image. @@ -358,14 +356,14 @@ def loss_gradient( # pylint: disable=W0613 ) def compute_losses( - self, x: Union[np.ndarray, "torch.Tensor"], y: List[Dict[str, Union[np.ndarray, "torch.Tensor"]]] - ) -> Dict[str, np.ndarray]: + self, x: np.ndarray | "torch.Tensor", y: list[dict[str, np.ndarray | "torch.Tensor"]] + ) -> dict[str, np.ndarray]: """ Compute all loss components. :param x: Samples of shape NCHW or NHWC. - :param y: Target values of format `List[Dict[str, Union[np.ndarray, torch.Tensor]]]`, one for each input image. - The fields of the Dict are as follows: + :param y: Target values of format `list[dict[str, np.ndarray | torch.Tensor]]`, one for each input image. + The fields of the dict are as follows: - boxes [N, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. - labels [N]: the labels for each image. @@ -377,14 +375,14 @@ def compute_losses( ) def compute_loss( # type: ignore - self, x: Union[np.ndarray, "torch.Tensor"], y: List[Dict[str, Union[np.ndarray, "torch.Tensor"]]], **kwargs - ) -> Union[np.ndarray, "torch.Tensor"]: + self, x: np.ndarray | "torch.Tensor", y: list[dict[str, np.ndarray | "torch.Tensor"]], **kwargs + ) -> np.ndarray | "torch.Tensor": """ Compute the loss of the neural network for samples `x`. :param x: Samples of shape NCHW or NHWC. - :param y: Target values of format `List[Dict[str, Union[np.ndarray, torch.Tensor]]]`, one for each input image. - The fields of the Dict are as follows: + :param y: Target values of format `list[dict[str, Union[np.ndarray, torch.Tensor]]]`, one for each input image. + The fields of the dict are as follows: - boxes [N, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. - labels [N]: the labels for each image. diff --git a/art/estimators/certification/randomized_smoothing/macer/pytorch.py b/art/estimators/certification/randomized_smoothing/macer/pytorch.py index ac3d1f3dfa..0278101db1 100644 --- a/art/estimators/certification/randomized_smoothing/macer/pytorch.py +++ b/art/estimators/certification/randomized_smoothing/macer/pytorch.py @@ -20,10 +20,10 @@ | Paper link: https://arxiv.org/abs/2001.02378 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import List, Optional, Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING from tqdm.auto import trange import numpy as np @@ -32,7 +32,7 @@ from art.utils import check_and_transform_label_format if TYPE_CHECKING: - # pylint: disable=C0412 + import torch from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE from art.defences.preprocessor import Preprocessor @@ -59,13 +59,13 @@ def __init__( self, model: "torch.nn.Module", loss: "torch.nn.modules.loss._Loss", - input_shape: Tuple[int, ...], + input_shape: tuple[int, ...], nb_classes: int, - optimizer: Optional["torch.optim.Optimizer"] = None, + optimizer: "torch.optim.Optimizer" | None = None, channels_first: bool = True, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), device_type: str = "gpu", sample_size: int = 32, @@ -126,7 +126,7 @@ def __init__( self.lmbda = lmbda self.gaussian_samples = gaussian_samples - def fit( # pylint: disable=W0221 + def fit( self, x: np.ndarray, y: np.ndarray, @@ -134,7 +134,7 @@ def fit( # pylint: disable=W0221 nb_epochs: int = 10, training_mode: bool = True, drop_last: bool = False, - scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None, + scheduler: "torch.optim.lr_scheduler._LRScheduler" | None = None, verbose: bool = False, **kwargs, ) -> None: diff --git a/art/estimators/certification/randomized_smoothing/macer/tensorflow.py b/art/estimators/certification/randomized_smoothing/macer/tensorflow.py index cf0c921a7b..7228555f97 100644 --- a/art/estimators/certification/randomized_smoothing/macer/tensorflow.py +++ b/art/estimators/certification/randomized_smoothing/macer/tensorflow.py @@ -20,10 +20,11 @@ | Paper link: https://arxiv.org/abs/2001.02378 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations +from collections.abc import Callable import logging -from typing import Callable, List, Optional, Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING from tqdm.auto import trange import numpy as np @@ -32,7 +33,7 @@ from art.utils import check_and_transform_label_format if TYPE_CHECKING: - # pylint: disable=C0412 + import tensorflow as tf from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE from art.defences.preprocessor import Preprocessor @@ -59,14 +60,14 @@ def __init__( self, model, nb_classes: int, - input_shape: Tuple[int, ...], - loss_object: Optional["tf.Tensor"] = None, - optimizer: Optional["tf.keras.optimizers.Optimizer"] = None, - train_step: Optional[Callable] = None, + input_shape: tuple[int, ...], + loss_object: "tf.Tensor" | None = None, + optimizer: "tf.keras.optimizers.Optimizer" | None = None, + train_step: Callable | None = None, channels_first: bool = False, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), sample_size: int = 32, scale: float = 0.1, @@ -180,8 +181,8 @@ def train_step(model, images, labels): icdf_out0 = tf.math.erfinv(2 * out0 - 1) * np.sqrt(2) robustness_loss = icdf_out1 - icdf_out0 indices = ( - ~tf.math.is_nan(robustness_loss) # pylint: disable=E1130 - & ~tf.math.is_inf(robustness_loss) # pylint: disable=E1130 + ~tf.math.is_nan(robustness_loss) + & ~tf.math.is_inf(robustness_loss) & (tf.abs(robustness_loss) <= self.gamma) ) out0, out1 = out0[indices], out1[indices] diff --git a/art/estimators/certification/randomized_smoothing/numpy.py b/art/estimators/certification/randomized_smoothing/numpy.py index bb27876c41..2663fc7c1c 100644 --- a/art/estimators/certification/randomized_smoothing/numpy.py +++ b/art/estimators/certification/randomized_smoothing/numpy.py @@ -20,10 +20,10 @@ | Paper link: https://arxiv.org/abs/1902.02918 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import List, Optional, Union, TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING import warnings import numpy as np @@ -95,7 +95,7 @@ def __init__( self.classifier = classifier @property - def input_shape(self) -> Tuple[int, ...]: + def input_shape(self) -> tuple[int, ...]: return self._input_shape def _predict_classifier(self, x: np.ndarray, batch_size: int, training_mode: bool, **kwargs) -> np.ndarray: @@ -128,9 +128,7 @@ def _fit_classifier(self, x: np.ndarray, y: np.ndarray, batch_size: int, nb_epoc x_rs = x_rs.astype(ART_NUMPY_DTYPE) self.classifier.fit(x_rs, y, batch_size=batch_size, nb_epochs=1, **kwargs) - def loss_gradient( # pylint: disable=W0221 - self, x: np.ndarray, y: np.ndarray, training_mode: bool = False, **kwargs - ) -> np.ndarray: + def loss_gradient(self, x: np.ndarray, y: np.ndarray, training_mode: bool = False, **kwargs) -> np.ndarray: """ Compute the gradient of the given classifier's loss function w.r.t. `x` of the original classifier. :param x: Sample input with shape as expected by the model. @@ -140,12 +138,8 @@ def loss_gradient( # pylint: disable=W0221 """ return self.classifier.loss_gradient(x=x, y=y, training_mode=training_mode, **kwargs) # type: ignore - def class_gradient( # pylint: disable=W0221 - self, - x: np.ndarray, - label: Optional[Union[int, List[int], np.ndarray]] = None, - training_mode: bool = False, - **kwargs + def class_gradient( + self, x: np.ndarray, label: int | list[int] | np.ndarray | None = None, training_mode: bool = False, **kwargs ) -> np.ndarray: """ Compute per-class derivatives of the given classifier w.r.t. `x` of original classifier. @@ -164,9 +158,7 @@ def class_gradient( # pylint: disable=W0221 def compute_loss(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray: return self.classifier.compute_loss(x=x, y=y, **kwargs) # type: ignore - def get_activations( - self, x: np.ndarray, layer: Union[int, str], batch_size: int, framework: bool = False - ) -> np.ndarray: + def get_activations(self, x: np.ndarray, layer: int | str, batch_size: int, framework: bool = False) -> np.ndarray: return self.classifier.get_activations( # type: ignore x=x, layer=layer, batch_size=batch_size, framework=framework ) diff --git a/art/estimators/certification/randomized_smoothing/pytorch.py b/art/estimators/certification/randomized_smoothing/pytorch.py index 57ec55a3ee..0c64c58faf 100644 --- a/art/estimators/certification/randomized_smoothing/pytorch.py +++ b/art/estimators/certification/randomized_smoothing/pytorch.py @@ -20,10 +20,10 @@ | Paper link: https://arxiv.org/abs/1902.02918 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import List, Optional, Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import warnings from tqdm.auto import trange @@ -35,7 +35,7 @@ from art.utils import check_and_transform_label_format if TYPE_CHECKING: - # pylint: disable=C0412 + import torch from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE @@ -59,13 +59,13 @@ def __init__( self, model: "torch.nn.Module", loss: "torch.nn.modules.loss._Loss", - input_shape: Tuple[int, ...], + input_shape: tuple[int, ...], nb_classes: int, - optimizer: Optional["torch.optim.Optimizer"] = None, + optimizer: "torch.optim.Optimizer" | None = None, channels_first: bool = True, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), device_type: str = "gpu", sample_size: int = 32, @@ -128,7 +128,7 @@ def _fit_classifier(self, x: np.ndarray, y: np.ndarray, batch_size: int, nb_epoc x = x.astype(ART_NUMPY_DTYPE) return PyTorchClassifier.fit(self, x, y, batch_size=batch_size, nb_epochs=nb_epochs, **kwargs) - def fit( # pylint: disable=W0221 + def fit( self, x: np.ndarray, y: np.ndarray, @@ -136,7 +136,7 @@ def fit( # pylint: disable=W0221 nb_epochs: int = 10, training_mode: bool = True, drop_last: bool = False, - scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None, + scheduler: "torch.optim.lr_scheduler._LRScheduler" | None = None, verbose: bool = False, **kwargs, ) -> None: @@ -209,7 +209,7 @@ def fit( # pylint: disable=W0221 # Do training if self._use_amp: # pragma: no cover - from apex import amp # pylint: disable=E0611 + from apex import amp with amp.scale_loss(loss, self._optimizer) as scaled_loss: scaled_loss.backward() @@ -308,7 +308,7 @@ def loss_gradient( # type: ignore def class_gradient( self, x: np.ndarray, - label: Optional[Union[int, List[int], np.ndarray]] = None, + label: int | list[int] | np.ndarray | None = None, training_mode: bool = False, **kwargs, ) -> np.ndarray: diff --git a/art/estimators/certification/randomized_smoothing/randomized_smoothing.py b/art/estimators/certification/randomized_smoothing/randomized_smoothing.py index 6027bd4d5e..0c19f1a4b5 100644 --- a/art/estimators/certification/randomized_smoothing/randomized_smoothing.py +++ b/art/estimators/certification/randomized_smoothing/randomized_smoothing.py @@ -20,11 +20,11 @@ | Paper link: https://arxiv.org/abs/1902.02918 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations from abc import ABC import logging -from typing import Optional, Tuple + import numpy as np from scipy.stats import norm @@ -143,7 +143,7 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in """ self._fit_classifier(x, y, batch_size=batch_size, nb_epochs=nb_epochs, **kwargs) - def certify(self, x: np.ndarray, n: int, batch_size: int = 32) -> Tuple[np.ndarray, np.ndarray]: + def certify(self, x: np.ndarray, n: int, batch_size: int = 32) -> tuple[np.ndarray, np.ndarray]: """ Computes certifiable radius around input `x` and returns radius `r` and prediction. @@ -176,7 +176,7 @@ def certify(self, x: np.ndarray, n: int, batch_size: int = 32) -> Tuple[np.ndarr return np.array(prediction), np.array(radius) - def _noisy_samples(self, x: np.ndarray, n: Optional[int] = None) -> np.ndarray: + def _noisy_samples(self, x: np.ndarray, n: int | None = None) -> np.ndarray: """ Adds Gaussian noise to `x` to generate samples. Optionally augments `y` similarly. @@ -195,7 +195,7 @@ def _noisy_samples(self, x: np.ndarray, n: Optional[int] = None) -> np.ndarray: return x - def _prediction_counts(self, x: np.ndarray, n: Optional[int] = None, batch_size: int = 128) -> np.ndarray: + def _prediction_counts(self, x: np.ndarray, n: int | None = None, batch_size: int = 128) -> np.ndarray: """ Makes predictions and then converts probability distribution to counts. diff --git a/art/estimators/certification/randomized_smoothing/smooth_adv/pytorch.py b/art/estimators/certification/randomized_smoothing/smooth_adv/pytorch.py index e57f4c7c88..8e9a2c9b6a 100644 --- a/art/estimators/certification/randomized_smoothing/smooth_adv/pytorch.py +++ b/art/estimators/certification/randomized_smoothing/smooth_adv/pytorch.py @@ -20,10 +20,10 @@ | Paper link: https://arxiv.org/abs/1906.04584 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import List, Optional, Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING from tqdm.auto import trange import numpy as np @@ -34,7 +34,7 @@ from art.utils import check_and_transform_label_format if TYPE_CHECKING: - # pylint: disable=C0412 + import torch from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE from art.defences.preprocessor import Preprocessor @@ -61,13 +61,13 @@ def __init__( self, model: "torch.nn.Module", loss: "torch.nn.modules.loss._Loss", - input_shape: Tuple[int, ...], + input_shape: tuple[int, ...], nb_classes: int, - optimizer: Optional["torch.optim.Optimizer"] = None, + optimizer: "torch.optim.Optimizer" | None = None, channels_first: bool = True, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), device_type: str = "gpu", sample_size: int = 32, @@ -143,7 +143,7 @@ def __init__( ) self.attack = ProjectedGradientDescent(classifier, eps=self.epsilon, max_iter=1, verbose=False) - def fit( # pylint: disable=W0221 + def fit( self, x: np.ndarray, y: np.ndarray, @@ -151,7 +151,7 @@ def fit( # pylint: disable=W0221 nb_epochs: int = 10, training_mode: bool = True, drop_last: bool = False, - scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None, + scheduler: "torch.optim.lr_scheduler._LRScheduler" | None = None, verbose: bool = False, **kwargs, ) -> None: diff --git a/art/estimators/certification/randomized_smoothing/smooth_adv/tensorflow.py b/art/estimators/certification/randomized_smoothing/smooth_adv/tensorflow.py index 0887e7ce6c..8b8bf9c9b5 100644 --- a/art/estimators/certification/randomized_smoothing/smooth_adv/tensorflow.py +++ b/art/estimators/certification/randomized_smoothing/smooth_adv/tensorflow.py @@ -20,10 +20,11 @@ | Paper link: https://arxiv.org/abs/1906.04584 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations +from collections.abc import Callable import logging -from typing import Callable, List, Optional, Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING from tqdm.auto import trange import numpy as np @@ -34,7 +35,7 @@ from art.utils import check_and_transform_label_format if TYPE_CHECKING: - # pylint: disable=C0412 + import tensorflow as tf from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE from art.defences.preprocessor import Preprocessor @@ -61,14 +62,14 @@ def __init__( self, model, nb_classes: int, - input_shape: Tuple[int, ...], - loss_object: Optional["tf.Tensor"] = None, - optimizer: Optional["tf.keras.optimizers.Optimizer"] = None, - train_step: Optional[Callable] = None, + input_shape: tuple[int, ...], + loss_object: "tf.Tensor" | None = None, + optimizer: "tf.keras.optimizers.Optimizer" | None = None, + train_step: Callable | None = None, channels_first: bool = False, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), sample_size: int = 32, scale: float = 0.1, diff --git a/art/estimators/certification/randomized_smoothing/smooth_mix/pytorch.py b/art/estimators/certification/randomized_smoothing/smooth_mix/pytorch.py index a23fba769e..4e29658eb7 100644 --- a/art/estimators/certification/randomized_smoothing/smooth_mix/pytorch.py +++ b/art/estimators/certification/randomized_smoothing/smooth_mix/pytorch.py @@ -42,10 +42,10 @@ | Paper link: https://arxiv.org/abs/2111.09277 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import List, Optional, Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING from tqdm.auto import trange import numpy as np @@ -54,7 +54,7 @@ from art.utils import check_and_transform_label_format if TYPE_CHECKING: - # pylint: disable=C0412 + import torch from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE from art.defences.preprocessor import Preprocessor @@ -84,13 +84,13 @@ def __init__( self, model: "torch.nn.Module", loss: "torch.nn.modules.loss._Loss", - input_shape: Tuple[int, ...], + input_shape: tuple[int, ...], nb_classes: int, - optimizer: Optional["torch.optim.Optimizer"] = None, + optimizer: "torch.optim.Optimizer" | None = None, channels_first: bool = True, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), device_type: str = "gpu", sample_size: int = 32, @@ -101,8 +101,8 @@ def __init__( num_steps: int = 10, warmup: int = 1, mix_step: int = 0, - maxnorm_s: Optional[float] = None, - maxnorm: Optional[float] = None, + maxnorm_s: float | None = None, + maxnorm: float | None = None, ) -> None: """ Create a SmoothMix classifier. @@ -160,7 +160,7 @@ def __init__( self.maxnorm_s = maxnorm_s self.maxnorm = maxnorm - def fit( # pylint: disable=W0221 + def fit( self, x: np.ndarray, y: np.ndarray, @@ -168,7 +168,7 @@ def fit( # pylint: disable=W0221 nb_epochs: int = 10, training_mode: bool = True, drop_last: bool = False, - scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None, + scheduler: "torch.optim.lr_scheduler._LRScheduler" | None = None, verbose: bool = False, **kwargs, ) -> None: @@ -270,9 +270,9 @@ def _smooth_mix_pgd_attack( self, inputs: "torch.Tensor", labels: "torch.Tensor", - noises: List["torch.Tensor"], + noises: list["torch.Tensor"], warmup_v: float, - ) -> Tuple["torch.Tensor", "torch.Tensor"]: + ) -> tuple["torch.Tensor", "torch.Tensor"]: """ The authors' implementation of the SmoothMixPGD attack. Code modified from https://github.com/jh-jeong/smoothmix/blob/main/code/train.py @@ -293,7 +293,7 @@ def _batch_l2_norm(x: torch.Tensor) -> torch.Tensor: x_flat = x.reshape(x.size(0), -1) return torch.norm(x_flat, dim=1) - def _project(x: torch.Tensor, x_0: torch.Tensor, maxnorm: Optional[float] = None): + def _project(x: torch.Tensor, x_0: torch.Tensor, maxnorm: float | None = None): """ Apply a projection of the current inputs with the maxnorm @@ -338,7 +338,7 @@ def _project(x: torch.Tensor, x_0: torch.Tensor, maxnorm: Optional[float] = None def _mix_data( self, inputs: "torch.Tensor", inputs_adv: "torch.Tensor", labels: "torch.Tensor" - ) -> Tuple["torch.Tensor", "torch.Tensor"]: + ) -> tuple["torch.Tensor", "torch.Tensor"]: """ Returns mixed inputs and labels. diff --git a/art/estimators/certification/randomized_smoothing/tensorflow.py b/art/estimators/certification/randomized_smoothing/tensorflow.py index 636b62f547..450557876c 100644 --- a/art/estimators/certification/randomized_smoothing/tensorflow.py +++ b/art/estimators/certification/randomized_smoothing/tensorflow.py @@ -20,10 +20,11 @@ | Paper link: https://arxiv.org/abs/1902.02918 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations +from collections.abc import Callable import logging -from typing import Callable, List, Optional, Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import warnings from tqdm.auto import trange @@ -34,7 +35,7 @@ from art.utils import check_and_transform_label_format if TYPE_CHECKING: - # pylint: disable=C0412 + import tensorflow as tf from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE @@ -58,14 +59,14 @@ def __init__( self, model, nb_classes: int, - input_shape: Tuple[int, ...], - loss_object: Optional["tf.Tensor"] = None, - optimizer: Optional["tf.keras.optimizers.Optimizer"] = None, - train_step: Optional[Callable] = None, + input_shape: tuple[int, ...], + loss_object: "tf.Tensor" | None = None, + optimizer: "tf.keras.optimizers.Optimizer" | None = None, + train_step: Callable | None = None, channels_first: bool = False, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), sample_size: int = 32, scale: float = 0.1, @@ -128,7 +129,7 @@ def _predict_classifier(self, x: np.ndarray, batch_size: int, training_mode: boo def _fit_classifier(self, x: np.ndarray, y: np.ndarray, batch_size: int, nb_epochs: int, **kwargs) -> None: return TensorFlowV2Classifier.fit(self, x, y, batch_size=batch_size, nb_epochs=nb_epochs, **kwargs) - def fit( # pylint: disable=W0221 + def fit( self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: int = 10, verbose: bool = False, **kwargs ) -> None: """ @@ -279,9 +280,9 @@ def loss_gradient(self, x: np.ndarray, y: np.ndarray, training_mode: bool = Fals def class_gradient( self, x: np.ndarray, - label: Optional[Union[int, List[int], np.ndarray]] = None, + label: int | list[int] | np.ndarray | None = None, training_mode: bool = False, - **kwargs + **kwargs, ) -> np.ndarray: """ Compute per-class derivatives of the given classifier w.r.t. `x` of original classifier. diff --git a/art/estimators/classification/GPy.py b/art/estimators/classification/GPy.py index 458bc961a0..cb39d73fb4 100644 --- a/art/estimators/classification/GPy.py +++ b/art/estimators/classification/GPy.py @@ -18,12 +18,11 @@ """ This module implements a wrapper class for GPy Gaussian Process classification models. """ -# pylint: disable=C0103 -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging import os -from typing import List, Optional, Union, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -31,7 +30,7 @@ from art import config if TYPE_CHECKING: - # pylint: disable=C0412 + from GPy.models import GPClassification from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE @@ -41,7 +40,7 @@ logger = logging.getLogger(__name__) -# pylint: disable=C0103 +# pylint: disable=invalid-name class GPyGaussianProcessClassifier(ClassifierClassLossGradients): """ Wrapper class for GPy Gaussian Process classification models. @@ -49,10 +48,10 @@ class GPyGaussianProcessClassifier(ClassifierClassLossGradients): def __init__( self, - model: Optional["GPClassification"] = None, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + model: "GPClassification" | None = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), ) -> None: """ @@ -82,7 +81,7 @@ def __init__( self.nb_classes = 2 # always binary @property - def input_shape(self) -> Tuple[int, ...]: + def input_shape(self) -> tuple[int, ...]: """ Return the shape of one input sample. @@ -90,9 +89,8 @@ def input_shape(self) -> Tuple[int, ...]: """ return self._input_shape # type: ignore - # pylint: disable=W0221 def class_gradient( # type: ignore - self, x: np.ndarray, label: Optional[Union[int, List[int], np.ndarray]] = None, eps: float = 0.0001, **kwargs + self, x: np.ndarray, label: int | list[int] | np.ndarray | None = None, eps: float = 0.0001, **kwargs ) -> np.ndarray: """ Compute per-class derivatives w.r.t. `x`. @@ -158,7 +156,6 @@ def loss_gradient(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray: return grads - # pylint: disable=W0221 def predict(self, x: np.ndarray, logits: bool = False, **kwargs) -> np.ndarray: """ Perform prediction for a batch of inputs. @@ -213,7 +210,7 @@ def fit(self, x: np.ndarray, y: np.ndarray, **kwargs) -> None: """ raise NotImplementedError - def save(self, filename: str, path: Optional[str] = None) -> None: # pragma: no cover + def save(self, filename: str, path: str | None = None) -> None: # pragma: no cover """ Save a model to file in the format specific to the backend framework. diff --git a/art/estimators/classification/blackbox.py b/art/estimators/classification/blackbox.py index 249ca74d51..88b1df27d4 100644 --- a/art/estimators/classification/blackbox.py +++ b/art/estimators/classification/blackbox.py @@ -18,11 +18,12 @@ """ This module implements the classifier `BlackBoxClassifier` for black-box classifiers. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations +from collections.abc import Callable from functools import total_ordering import logging -from typing import Callable, List, Optional, Union, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -46,12 +47,12 @@ class BlackBoxClassifier(ClassifierMixin, BaseEstimator): def __init__( self, - predict_fn: Union[Callable, Tuple[np.ndarray, np.ndarray]], - input_shape: Tuple[int, ...], + predict_fn: Callable[np.ndarray, np.ndarray], + input_shape: tuple[int, ...], nb_classes: int, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), fuzzy_float_compare: bool = False, ): @@ -91,7 +92,7 @@ def __init__( self.nb_classes = nb_classes @property - def input_shape(self) -> Tuple[int, ...]: + def input_shape(self) -> tuple[int, ...]: """ Return the shape of one input sample. @@ -108,7 +109,6 @@ def predict_fn(self) -> Callable: """ return self._predict_fn # type: ignore - # pylint: disable=W0221 def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> np.ndarray: """ Perform prediction for a batch of inputs. @@ -149,7 +149,7 @@ def fit(self, x: np.ndarray, y: np.ndarray, **kwargs) -> None: """ raise NotImplementedError - def save(self, filename: str, path: Optional[str] = None) -> None: + def save(self, filename: str, path: str | None = None) -> None: """ Save a model to file in the format specific to the backend framework. For Keras, .h5 format is used. @@ -175,13 +175,13 @@ class BlackBoxClassifierNeuralNetwork(NeuralNetworkMixin, ClassifierMixin, BaseE def __init__( self, - predict_fn: Union[Callable, Tuple[np.ndarray, np.ndarray]], - input_shape: Tuple[int, ...], + predict_fn: Callable[np.ndarray, np.ndarray], + input_shape: tuple[int, ...], nb_classes: int, channels_first: bool = True, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0, 1), fuzzy_float_compare: bool = False, ): @@ -226,7 +226,7 @@ def __init__( self._layer_names = None @property - def input_shape(self) -> Tuple[int, ...]: + def input_shape(self) -> tuple[int, ...]: """ Return the shape of one input sample. @@ -274,9 +274,7 @@ def fit(self, x: np.ndarray, y, batch_size: int = 128, nb_epochs: int = 20, **kw """ raise NotImplementedError - def get_activations( - self, x: np.ndarray, layer: Union[int, str], batch_size: int, framework: bool = False - ) -> np.ndarray: + def get_activations(self, x: np.ndarray, layer: int | str, batch_size: int, framework: bool = False) -> np.ndarray: """ Return the output of a specific layer for samples `x` where `layer` is the index of the layer between 0 and `nb_layers - 1 or the name of the layer. The number of layers can be determined by counting the results @@ -341,7 +339,7 @@ def __ge__(self, other): return self.key[compare_idx] >= other.key[compare_idx] -def _make_lookup_predict_fn(existing_predictions: Tuple[np.ndarray, np.ndarray], fuzzy_float_compare: bool) -> Callable: +def _make_lookup_predict_fn(existing_predictions: tuple[np.ndarray, np.ndarray], fuzzy_float_compare: bool) -> Callable: """ Makes a predict_fn callback based on a table of existing predictions. diff --git a/art/estimators/classification/catboost.py b/art/estimators/classification/catboost.py index e30db41bc2..250360a4d6 100644 --- a/art/estimators/classification/catboost.py +++ b/art/estimators/classification/catboost.py @@ -18,12 +18,12 @@ """ This module implements the classifier `CatBoostARTClassifier` for CatBoost models. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging import os import pickle -from typing import List, Optional, Union, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -31,7 +31,7 @@ from art import config if TYPE_CHECKING: - # pylint: disable=C0412 + from catboost.core import CatBoostClassifier from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE @@ -50,12 +50,12 @@ class CatBoostARTClassifier(ClassifierDecisionTree): def __init__( self, - model: Optional["CatBoostClassifier"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + model: "CatBoostClassifier" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - nb_features: Optional[int] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + nb_features: int | None = None, ) -> None: """ Create a `Classifier` instance from a CatBoost model. @@ -70,7 +70,7 @@ def __init__( for features. :param nb_features: Number of features. """ - # pylint: disable=E0611,E0401 + from catboost.core import CatBoostClassifier if not isinstance(model, CatBoostClassifier): # pragma: no cover @@ -90,7 +90,7 @@ def __init__( self.nb_classes = nb_classes @property - def input_shape(self) -> Tuple[int, ...]: + def input_shape(self) -> tuple[int, ...]: """ Return the shape of one input sample. @@ -151,7 +151,7 @@ def _get_nb_classes(self) -> int: return -1 - def save(self, filename: str, path: Optional[str] = None) -> None: + def save(self, filename: str, path: str | None = None) -> None: """ Save a model to file in the format specific to the backend framework. diff --git a/art/estimators/classification/classifier.py b/art/estimators/classification/classifier.py index 191f4a784a..33e52202ff 100644 --- a/art/estimators/classification/classifier.py +++ b/art/estimators/classification/classifier.py @@ -18,8 +18,10 @@ """ This module implements mixin abstract base classes defining properties for all classifiers in ART. """ +from __future__ import annotations + from abc import ABC, ABCMeta, abstractmethod -from typing import List, Optional, Union + import numpy as np @@ -33,13 +35,13 @@ class InputFilter(ABCMeta): """ - Metaclass to ensure that inputs are ndarray for all of the subclass generate and extract calls. + Metaclass to ensure that inputs are ndarray for all the subclass generate and extract calls. """ - def __init__(cls, name, bases, clsdict): # pylint: disable=W0231,W0613 + def __init__(cls, name, bases, clsdict): """ This function overrides any existing generate or extract methods with a new method that - ensures the input is an ndarray. There is an assumption that the input object has implemented + ensures the input is a ndarray. There is an assumption that the input object has implemented __array__ with np.array calls. """ @@ -128,9 +130,7 @@ class `Classifier`. """ @abstractmethod - def class_gradient( - self, x: np.ndarray, label: Optional[Union[int, List[int], np.ndarray]] = None, **kwargs - ) -> np.ndarray: + def class_gradient(self, x: np.ndarray, label: int | list[int] | np.ndarray | None = None, **kwargs) -> np.ndarray: """ Compute per-class derivatives w.r.t. `x`. @@ -183,7 +183,7 @@ class ClassifierNeuralNetwork( ) @abstractmethod - def save(self, filename: str, path: Optional[str] = None) -> None: + def save(self, filename: str, path: str | None = None) -> None: """ Save a model to file in the format specific to the backend framework. This function is not supported for ensembles. diff --git a/art/estimators/classification/deep_partition_ensemble.py b/art/estimators/classification/deep_partition_ensemble.py index e309db2bac..e47c1f913b 100644 --- a/art/estimators/classification/deep_partition_ensemble.py +++ b/art/estimators/classification/deep_partition_ensemble.py @@ -18,11 +18,12 @@ """ Creates a Deep Partition Aggregation ensemble classifier. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations +from collections.abc import Callable import logging import warnings -from typing import List, Optional, Union, Callable, Dict, TYPE_CHECKING +from typing import TYPE_CHECKING import copy import numpy as np @@ -52,13 +53,13 @@ class DeepPartitionEnsemble(EnsembleClassifier): def __init__( self, - classifiers: Union["CLASSIFIER_NEURALNETWORK_TYPE", List["CLASSIFIER_NEURALNETWORK_TYPE"]], - hash_function: Optional[Callable] = None, + classifiers: "CLASSIFIER_NEURALNETWORK_TYPE" | list["CLASSIFIER_NEURALNETWORK_TYPE"], + hash_function: Callable | None = None, ensemble_size: int = 50, channels_first: bool = False, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), ) -> None: """ @@ -122,7 +123,7 @@ def default_hash(x): self.ensemble_size = ensemble_size - def predict( # pylint: disable=W0221 + def predict( self, x: np.ndarray, batch_size: int = 128, raw: bool = False, max_aggregate: bool = True, **kwargs ) -> np.ndarray: """ @@ -146,7 +147,7 @@ def predict( # pylint: disable=W0221 # Aggregate based on top-1 prediction from each classifier if max_aggregate: preds = super().predict(x, batch_size=batch_size, raw=True, **kwargs) - aggregated_preds = np.zeros_like(preds, shape=preds.shape[1:]) # pylint: disable=E1123 + aggregated_preds = np.zeros_like(preds, shape=preds.shape[1:]) for i in range(preds.shape[0]): aggregated_preds[np.arange(len(aggregated_preds)), np.argmax(preds[i], axis=1)] += 1 return aggregated_preds @@ -154,14 +155,14 @@ def predict( # pylint: disable=W0221 # Aggregate based on summing predictions from each classifier return super().predict(x, batch_size=batch_size, raw=False, **kwargs) - def fit( # pylint: disable=W0221 + def fit( self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: int = 20, - train_dict: Optional[Dict] = None, - **kwargs + train_dict: dict | None = None, + **kwargs, ) -> None: """ Fit the classifier on the training set `(x, y)`. Each classifier will be trained with the @@ -179,7 +180,7 @@ def fit( # pylint: disable=W0221 """ if self.can_fit: # First, partition the data using the hash function - partition_ind = [[] for _ in range(self.ensemble_size)] # type: List[List[int]] + partition_ind = [[] for _ in range(self.ensemble_size)] # type: list[list[int]] for i, p_x in enumerate(x): partition_id = int(self.hash_function(p_x)) partition_ind[partition_id].append(i) diff --git a/art/estimators/classification/detector_classifier.py b/art/estimators/classification/detector_classifier.py index e3733352bb..ad8de25b62 100644 --- a/art/estimators/classification/detector_classifier.py +++ b/art/estimators/classification/detector_classifier.py @@ -21,10 +21,10 @@ Paper link: https://arxiv.org/abs/1705.07263 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import List, Optional, Union, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -51,8 +51,8 @@ def __init__( self, classifier: ClassifierNeuralNetwork, detector: ClassifierNeuralNetwork, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), ) -> None: """ @@ -85,7 +85,7 @@ def __init__( self._input_shape = classifier.input_shape @property - def input_shape(self) -> Tuple[int, ...]: + def input_shape(self) -> tuple[int, ...]: """ Return the shape of one input sample. @@ -139,10 +139,10 @@ def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwarg """ raise NotImplementedError - def class_gradient( # pylint: disable=W0221 + def class_gradient( self, x: np.ndarray, - label: Optional[Union[int, List[int], np.ndarray]] = None, + label: int | list[int] | np.ndarray | None = None, training_mode: bool = False, **kwargs, ) -> np.ndarray: @@ -265,9 +265,7 @@ def compute_loss(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray: """ raise NotImplementedError - def loss_gradient( # pylint: disable=W0221 - self, x: np.ndarray, y: np.ndarray, training_mode: bool = False, **kwargs - ) -> np.ndarray: + def loss_gradient(self, x: np.ndarray, y: np.ndarray, training_mode: bool = False, **kwargs) -> np.ndarray: """ Compute the gradient of the loss function w.r.t. `x`. @@ -281,7 +279,7 @@ def loss_gradient( # pylint: disable=W0221 raise NotImplementedError @property - def layer_names(self) -> List[str]: + def layer_names(self) -> list[str]: """ Return the hidden layers in the model, if applicable. This function is not supported for the Classifier and Detector classes. @@ -292,7 +290,7 @@ def layer_names(self) -> List[str]: raise NotImplementedError def get_activations( - self, x: np.ndarray, layer: Union[int, str], batch_size: int = 128, framework: bool = False + self, x: np.ndarray, layer: int | str, batch_size: int = 128, framework: bool = False ) -> np.ndarray: """ Return the output of the specified layer for input `x`. `layer` is specified by layer index (between 0 and @@ -308,7 +306,7 @@ def get_activations( """ raise NotImplementedError - def save(self, filename: str, path: Optional[str] = None) -> None: + def save(self, filename: str, path: str | None = None) -> None: """ Save a model to file in the format specific to the backend framework. @@ -333,7 +331,7 @@ def __repr__(self): ) return repr_ - def _compute_combined_grads(self, x: np.ndarray, label: Optional[Union[int, List[int]]] = None) -> np.ndarray: + def _compute_combined_grads(self, x: np.ndarray, label: int | list[int] | None = None) -> np.ndarray: # Compute the classifier gradients classifier_grads = self.classifier.class_gradient(x=x, label=label) diff --git a/art/estimators/classification/ensemble.py b/art/estimators/classification/ensemble.py index c02508330b..d40ec42e7f 100644 --- a/art/estimators/classification/ensemble.py +++ b/art/estimators/classification/ensemble.py @@ -18,10 +18,10 @@ """ This module implements the classifier `EnsembleClassifier` for ensembles of multiple classifiers. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import List, Optional, Union, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -50,12 +50,12 @@ class EnsembleClassifier(ClassifierNeuralNetwork): def __init__( self, - classifiers: List["CLASSIFIER_NEURALNETWORK_TYPE"], - classifier_weights: Union[list, np.ndarray, None] = None, + classifiers: list["CLASSIFIER_NEURALNETWORK_TYPE"], + classifier_weights: list | np.ndarray | None = None, channels_first: bool = False, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), ) -> None: """ @@ -138,7 +138,7 @@ def __init__( self._classifiers = classifiers @property - def input_shape(self) -> Tuple[int, ...]: + def input_shape(self) -> tuple[int, ...]: """ Return the shape of one input sample. @@ -147,7 +147,7 @@ def input_shape(self) -> Tuple[int, ...]: return self._input_shape # type: ignore @property - def classifiers(self) -> List[ClassifierNeuralNetwork]: + def classifiers(self) -> list[ClassifierNeuralNetwork]: """ Return the Classifier instances that are ensembled together. @@ -164,9 +164,7 @@ def classifier_weights(self) -> np.ndarray: """ return self._classifier_weights # type: ignore - def predict( # pylint: disable=W0221 - self, x: np.ndarray, batch_size: int = 128, raw: bool = False, **kwargs - ) -> np.ndarray: + def predict(self, x: np.ndarray, batch_size: int = 128, raw: bool = False, **kwargs) -> np.ndarray: """ Perform prediction for a batch of inputs. Predictions from classifiers should only be aggregated if they all have the same type of output (e.g., probabilities). Otherwise, use `raw=True` to get predictions from all @@ -220,7 +218,7 @@ def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwarg raise NotImplementedError @property - def layer_names(self) -> List[str]: + def layer_names(self) -> list[str]: """ Return the hidden layers in the model, if applicable. This function is not supported for ensembles. @@ -230,7 +228,7 @@ def layer_names(self) -> List[str]: raise NotImplementedError def get_activations( - self, x: np.ndarray, layer: Union[int, str], batch_size: int = 128, framework: bool = False + self, x: np.ndarray, layer: int | str, batch_size: int = 128, framework: bool = False ) -> np.ndarray: """ Return the output of the specified layer for input `x`. `layer` is specified by layer index (between 0 and @@ -246,10 +244,10 @@ def get_activations( """ raise NotImplementedError - def class_gradient( # pylint: disable=W0221 + def class_gradient( self, x: np.ndarray, - label: Optional[Union[int, List[int], np.ndarray]] = None, + label: int | list[int] | np.ndarray | None = None, training_mode: bool = False, raw: bool = False, **kwargs, @@ -279,7 +277,7 @@ def class_gradient( # pylint: disable=W0221 return np.sum(grads, axis=0) - def loss_gradient( # pylint: disable=W0221 + def loss_gradient( self, x: np.ndarray, y: np.ndarray, training_mode: bool = False, raw: bool = False, **kwargs ) -> np.ndarray: """ @@ -320,7 +318,7 @@ def __repr__(self): return repr_ - def save(self, filename: str, path: Optional[str] = None) -> None: + def save(self, filename: str, path: str | None = None) -> None: """ Save a model to file in the format specific to the backend framework. This function is not supported for ensembles. diff --git a/art/estimators/classification/hugging_face.py b/art/estimators/classification/hugging_face.py index 3bf8099e1b..0ada16d6ea 100644 --- a/art/estimators/classification/hugging_face.py +++ b/art/estimators/classification/hugging_face.py @@ -19,9 +19,11 @@ This module implements the abstract estimator `HuggingFaceClassifier` using the PyTorchClassifier as a backend to interface with ART. """ -import logging +from __future__ import annotations -from typing import List, Optional, Tuple, Union, Dict, Callable, Any, TYPE_CHECKING +from collections.abc import Callable +import logging +from typing import Any, TYPE_CHECKING import numpy as np import six @@ -48,18 +50,18 @@ def __init__( self, model: "transformers.PreTrainedModel", loss: "torch.nn.modules.loss._Loss", - input_shape: Tuple[int, ...], + input_shape: tuple[int, ...], nb_classes: int, - optimizer: Optional["torch.optim.Optimizer"] = None, + optimizer: "torch.optim.Optimizer" | None = None, use_amp: bool = False, opt_level: str = "O1", - loss_scale: Optional[Union[float, str]] = "dynamic", + loss_scale: float | str | None = "dynamic", channels_first: bool = True, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), - processor: Optional[Callable] = None, + processor: Callable | None = None, device_type: str = "gpu", ): """ @@ -186,7 +188,6 @@ def __init__(self, model: torch.nn.Module): super().__init__() self._model = model - # pylint: disable=W0221 # disable pylint because of API requirements for function def forward(self, x): """ @@ -197,7 +198,7 @@ def forward(self, x): :return: a list of output layers, where the last 2 layers are logit and final outputs. :rtype: `list` """ - # pylint: disable=W0212 + # disable pylint because access to _model required result = [] @@ -214,7 +215,7 @@ def forward(self, x): return result @property - def get_layers(self) -> List[str]: + def get_layers(self) -> list[str]: """ Return the hidden layers in the model, if applicable. @@ -230,7 +231,6 @@ def get_layers(self) -> List[str]: modules = [] - # pylint: disable=W0613 def forward_hook(input_module, hook_input, hook_output): logger.info("input_module is %s with id %i", input_module, id(input_module)) modules.append(id(input_module)) @@ -282,11 +282,11 @@ def forward_hook(input_module, hook_input, hook_output): def get_activations( # type: ignore self, - x: Union[np.ndarray, "torch.Tensor"], - layer: Optional[Union[int, str]] = None, + x: np.ndarray | "torch.Tensor", + layer: int | str | None = None, batch_size: int = 128, framework: bool = False, - ) -> Union[np.ndarray, "torch.Tensor"]: + ) -> np.ndarray | "torch.Tensor": """ Return the output of the specified layer for input `x`. `layer` is specified by layer index (between 0 and `nb_layers - 1`) or by name. The number of layers can be determined by counting the results returned by @@ -323,7 +323,7 @@ def get_activations( # type: ignore def get_feature(name): # the hook signature - def hook(model, input, output): # pylint: disable=W0622,W0613 + def hook(model, input, output): # pylint: disable=redefined-builtin,unused-argument # TODO: this is using the input, rather than the output, to circumvent the fact # TODO: that flatten is not a layer in pytorch, and the activation defence expects # TODO: a flattened input. A better option is to refactor the activation defence @@ -333,7 +333,7 @@ def hook(model, input, output): # pylint: disable=W0622,W0613 return hook if not hasattr(self, "_features"): - self._features: Dict[str, torch.Tensor] = {} + self._features: dict[str, torch.Tensor] = {} # register forward hooks on the layers of choice handles = [] @@ -350,7 +350,7 @@ def hook(model, input, output): # pylint: disable=W0622,W0613 return self._features[self._layer_names[layer_index]][0] input_tensor = torch.from_numpy(x_preprocessed) self._model(input_tensor.to(self._device)) - return self._features[self._layer_names[layer_index]][0] # pylint: disable=W0212 + return self._features[self._layer_names[layer_index]][0] # Run prediction with batch processing results = [] @@ -365,7 +365,7 @@ def hook(model, input, output): # pylint: disable=W0622,W0613 # Run prediction for the current batch self._model(torch.from_numpy(x_preprocessed[begin:end]).to(self._device)) - layer_output = self._features[self._layer_names[layer_index]] # pylint: disable=W0212 + layer_output = self._features[self._layer_names[layer_index]] if isinstance(layer_output, tuple): results.append(layer_output[0].detach().cpu().numpy()) diff --git a/art/estimators/classification/keras.py b/art/estimators/classification/keras.py index c322a51358..6575523361 100644 --- a/art/estimators/classification/keras.py +++ b/art/estimators/classification/keras.py @@ -18,19 +18,15 @@ """ This module implements the classifier `KerasClassifier` for Keras models. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations +from collections.abc import Callable import logging import os import time from typing import ( Any, - Callable, - Dict, Iterator, - List, - Optional, - Tuple, Union, TYPE_CHECKING, ) @@ -47,7 +43,7 @@ from art.utils import check_and_transform_label_format if TYPE_CHECKING: - # pylint: disable=C0412 + import keras import tensorflow as tf @@ -58,7 +54,7 @@ logger = logging.getLogger(__name__) -KERAS_MODEL_TYPE = Union["keras.models.Model", "tf.keras.models.Model"] # pylint: disable=C0103 +KERAS_MODEL_TYPE = Union["keras.models.Model", "tf.keras.models.Model"] # pylint: disable=invalid-name class KerasClassifier(ClassGradientsMixin, ClassifierMixin, KerasEstimator): @@ -77,9 +73,9 @@ def __init__( model: KERAS_MODEL_TYPE, use_logits: bool = False, channels_first: bool = False, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), input_layer: int = 0, output_layer: int = 0, @@ -144,14 +140,14 @@ def _initialize_params( :param input_layer: Which layer to consider as the Input when the model has multiple input layers. :param output_layer: Which layer to consider as the Output when the model has multiple output layers. """ - # pylint: disable=E0401 + if self.is_tensorflow: import tensorflow as tf if tf.executing_eagerly(): # pragma: no cover raise ValueError("TensorFlow is executing eagerly. Please disable eager execution.") - import tensorflow.keras as keras # pylint: disable=R0402 - import tensorflow.keras.backend as k # pylint: disable=E0611 + import tensorflow.keras as keras # pylint: disable=consider-using-from-import + import tensorflow.keras.backend as k self._losses = keras.losses else: @@ -318,7 +314,7 @@ def _initialize_params( self._layer_names = self._get_layers() @property - def input_shape(self) -> Tuple[int, ...]: + def input_shape(self) -> tuple[int, ...]: """ Return the shape of one input sample. @@ -355,9 +351,7 @@ def output_layer(self) -> int: """ return self._output_layer # type: ignore - def compute_loss( # pylint: disable=W0221 - self, x: np.ndarray, y: np.ndarray, reduction: str = "none", **kwargs - ) -> np.ndarray: + def compute_loss(self, x: np.ndarray, y: np.ndarray, reduction: str = "none", **kwargs) -> np.ndarray: """ Compute the loss of the neural network for samples `x`. @@ -376,7 +370,7 @@ def compute_loss( # pylint: disable=W0221 raise NotImplementedError("loss method is only supported for keras versions >= 2.3.1") if self.is_tensorflow: - import tensorflow.keras.backend as k # pylint: disable=E0611 + import tensorflow.keras.backend as k else: import keras.backend as k @@ -427,9 +421,7 @@ def compute_loss( # pylint: disable=W0221 return loss_value - def loss_gradient( # pylint: disable=W0221 - self, x: np.ndarray, y: np.ndarray, training_mode: bool = False, **kwargs - ) -> np.ndarray: + def loss_gradient(self, x: np.ndarray, y: np.ndarray, training_mode: bool = False, **kwargs) -> np.ndarray: """ Compute the gradient of the loss function w.r.t. `x`. @@ -460,10 +452,10 @@ def loss_gradient( # pylint: disable=W0221 return gradients - def class_gradient( # pylint: disable=W0221 + def class_gradient( self, x: np.ndarray, - label: Optional[Union[int, List[int], np.ndarray]] = None, + label: int | list[int] | np.ndarray | None = None, training_mode: bool = False, **kwargs, ) -> np.ndarray: @@ -536,9 +528,7 @@ def class_gradient( # pylint: disable=W0221 return gradients - def predict( # pylint: disable=W0221 - self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs - ) -> np.ndarray: + def predict(self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs) -> np.ndarray: """ Perform prediction for a batch of inputs. @@ -561,7 +551,7 @@ def predict( # pylint: disable=W0221 return predictions - def fit( # pylint: disable=W0221 + def fit( self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: int = 20, verbose: bool = False, **kwargs ) -> None: """ @@ -591,9 +581,7 @@ def fit( # pylint: disable=W0221 x=x_preprocessed, y=y_preprocessed, batch_size=batch_size, epochs=nb_epochs, verbose=int(verbose), **kwargs ) - def fit_generator( # pylint: disable=W0221 - self, generator: "DataGenerator", nb_epochs: int = 20, verbose: bool = False, **kwargs - ) -> None: + def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, verbose: bool = False, **kwargs) -> None: """ Fit the classifier using the generator that yields batches as specified. @@ -630,7 +618,7 @@ def fit_generator( # pylint: disable=W0221 super().fit_generator(generator, nb_epochs=nb_epochs, verbose=verbose, **kwargs) def get_activations( - self, x: np.ndarray, layer: Union[int, str], batch_size: int = 128, framework: bool = False + self, x: np.ndarray, layer: int | str, batch_size: int = 128, framework: bool = False ) -> np.ndarray: """ Return the output of the specified layer for input `x`. `layer` is specified by layer index (between 0 and @@ -643,9 +631,9 @@ def get_activations( :param framework: If true, return the intermediate tensor representation of the activation. :return: The output of `layer`, where the first dimension is the batch size corresponding to `x`. """ - # pylint: disable=E0401 + if self.is_tensorflow: - import tensorflow.keras.backend as k # pylint: disable=E0611 + import tensorflow.keras.backend as k else: import keras.backend as k from art.config import ART_NUMPY_DTYPE @@ -672,7 +660,7 @@ def get_activations( x_preprocessed, _ = self._apply_preprocessing(x=x_expanded, y=None, fit=False) if not hasattr(self, "_activations_func"): - self._activations_func: Dict[str, Callable] = {} + self._activations_func: dict[str, Callable] = {} keras_layer = self._model.get_layer(layer_name) if layer_name not in self._activations_func: @@ -717,7 +705,7 @@ def custom_loss_gradient(self, nn_function, tensors, input_values, name="default :rtype: `np.ndarray` """ if self.is_tensorflow: - import tensorflow.keras.backend as k # pylint: disable=E0611 + import tensorflow.keras.backend as k else: import keras.backend as k @@ -745,10 +733,10 @@ def clone_for_refitting( return cloned_classifier raise ValueError("Type of cloned classifier not expected.") - def _init_class_gradients(self, label: Optional[Union[int, List[int], np.ndarray]] = None) -> None: - # pylint: disable=E0401 + def _init_class_gradients(self, label: int | list[int] | np.ndarray | None = None) -> None: + if self.is_tensorflow: - import tensorflow.keras.backend as k # pylint: disable=E0611 + import tensorflow.keras.backend as k else: import keras.backend as k @@ -780,24 +768,24 @@ def _init_class_gradients(self, label: Optional[Union[int, List[int], np.ndarray [self._input, k.learning_phase()], class_gradients ) - def _get_layers(self) -> List[str]: + def _get_layers(self) -> list[str]: """ Return the hidden layers in the model, if applicable. :return: The hidden layers in the model, input and output layers excluded. """ - # pylint: disable=E0401 + if self.is_tensorflow: - from tensorflow.keras.layers import InputLayer # pylint: disable=E0611 + from tensorflow.keras.layers import InputLayer else: - from keras.engine.topology import InputLayer # pylint: disable=E0611 + from keras.engine.topology import InputLayer layer_names = [layer.name for layer in self._model.layers[:-1] if not isinstance(layer, InputLayer)] logger.info("Inferred %i hidden layers on Keras classifier.", len(layer_names)) return layer_names - def save(self, filename: str, path: Optional[str] = None) -> None: + def save(self, filename: str, path: str | None = None) -> None: """ Save a model to file in the format specific to the backend framework. For Keras, .h5 format is used. @@ -816,7 +804,7 @@ def save(self, filename: str, path: Optional[str] = None) -> None: self._model.save(str(full_path)) logger.info("Model saved in path: %s.", full_path) - def __getstate__(self) -> Dict[str, Any]: + def __getstate__(self) -> dict[str, Any]: """ Use to ensure `KerasClassifier` can be pickled. @@ -855,7 +843,7 @@ def __getstate__(self) -> Dict[str, Any]: self.save(model_name) return state - def __setstate__(self, state: Dict[str, Any]) -> None: + def __setstate__(self, state: dict[str, Any]) -> None: """ Use to ensure `KerasClassifier` can be unpickled. @@ -864,7 +852,7 @@ def __setstate__(self, state: Dict[str, Any]) -> None: self.__dict__.update(state) if self.is_tensorflow: - from tensorflow.keras.models import load_model # pylint: disable=E0611 + from tensorflow.keras.models import load_model else: from keras.models import load_model @@ -888,7 +876,7 @@ def __repr__(self): def generator_fit( x: np.ndarray, y: np.ndarray, batch_size: int = 128 -) -> Iterator[Tuple[np.ndarray, np.ndarray]]: # pragma: no cover +) -> Iterator[tuple[np.ndarray, np.ndarray]]: # pragma: no cover """ Minimal data generator for randomly batching large datasets. diff --git a/art/estimators/classification/lightgbm.py b/art/estimators/classification/lightgbm.py index 72930f6e22..f890ec5d0b 100644 --- a/art/estimators/classification/lightgbm.py +++ b/art/estimators/classification/lightgbm.py @@ -18,13 +18,13 @@ """ This module implements the classifier `LightGBMClassifier` for LightGBM models. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations from copy import deepcopy import logging import os import pickle -from typing import List, Optional, Union, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -32,7 +32,7 @@ from art import config if TYPE_CHECKING: - # pylint: disable=C0412 + import lightgbm from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE @@ -50,10 +50,10 @@ class LightGBMClassifier(ClassifierDecisionTree): def __init__( self, - model: Optional["lightgbm.Booster"] = None, # type: ignore - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + model: "lightgbm.Booster" | None = None, # type: ignore + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), ) -> None: """ @@ -85,7 +85,7 @@ def __init__( self.nb_classes = self._get_nb_classes() @property - def input_shape(self) -> Tuple[int, ...]: + def input_shape(self) -> tuple[int, ...]: """ Return the shape of one input sample. @@ -129,10 +129,10 @@ def _get_nb_classes(self) -> int: :return: Number of classes in the data. """ - # pylint: disable=W0212 + return self._model._Booster__num_class - def save(self, filename: str, path: Optional[str] = None) -> None: # pragma: no cover + def save(self, filename: str, path: str | None = None) -> None: # pragma: no cover """ Save a model to file in the format specific to the backend framework. @@ -165,7 +165,6 @@ def get_trees(self) -> list: for i_tree, tree_dump in enumerate(booster_dump): box = Box() - # pylint: disable=W0212 if self._model._Booster__num_class == 2: class_label = -1 else: @@ -180,10 +179,10 @@ def get_trees(self) -> list: return trees - def _get_leaf_nodes(self, node, i_tree, class_label, box) -> List["LeafNode"]: + def _get_leaf_nodes(self, node, i_tree, class_label, box) -> list["LeafNode"]: from art.metrics.verification_decisions_trees import Box, Interval, LeafNode - leaf_nodes: List[LeafNode] = [] + leaf_nodes: list[LeafNode] = [] if "split_index" in node: node_left = node["left_child"] diff --git a/art/estimators/classification/mxnet.py b/art/estimators/classification/mxnet.py index b8481d68b9..b6ed747a9d 100644 --- a/art/estimators/classification/mxnet.py +++ b/art/estimators/classification/mxnet.py @@ -18,11 +18,11 @@ """ This module implements the classifier `MXClassifier` for MXNet Gluon models. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging import os -from typing import List, Optional, Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np import six @@ -33,7 +33,7 @@ from art.utils import check_and_transform_label_format if TYPE_CHECKING: - # pylint: disable=C0412 + import mxnet as mx from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE @@ -65,15 +65,15 @@ class MXClassifier(ClassGradientsMixin, ClassifierMixin, MXEstimator): def __init__( self, model: "mx.gluon.Block", - loss: Union["mx.nd.loss", "mx.gluon.loss"], - input_shape: Tuple[int, ...], + loss: "mx.nd.loss" | "mx.gluon.loss", + input_shape: tuple[int, ...], nb_classes: int, - optimizer: Optional["mx.gluon.Trainer"] = None, - ctx: Optional["mx.context.Context"] = None, + optimizer: "mx.gluon.Trainer" | None = None, + ctx: "mx.context.Context" | None = None, channels_first: bool = True, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), ) -> None: """ @@ -124,7 +124,7 @@ def __init__( self._layer_names = self._get_layers() @property - def input_shape(self) -> Tuple[int, ...]: + def input_shape(self) -> tuple[int, ...]: """ Return the shape of one input sample. @@ -133,7 +133,7 @@ def input_shape(self) -> Tuple[int, ...]: return self._input_shape # type: ignore @property - def loss(self) -> Union["mx.nd.loss", "mx.gluon.loss"]: + def loss(self) -> "mx.nd.loss" | "mx.gluon.loss": """ Return the loss function. @@ -260,9 +260,7 @@ def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwarg # Fit a generic data generator through the API super().fit_generator(generator, nb_epochs=nb_epochs) - def predict( # pylint: disable=W0221 - self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs - ) -> np.ndarray: + def predict(self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs) -> np.ndarray: """ Perform prediction for a batch of inputs. @@ -299,10 +297,10 @@ def predict( # pylint: disable=W0221 return predictions - def class_gradient( # pylint: disable=W0221 + def class_gradient( self, x: np.ndarray, - label: Optional[Union[int, List[int], np.ndarray]] = None, + label: int | list[int] | np.ndarray | None = None, training_mode: bool = False, **kwargs, ) -> np.ndarray: @@ -379,9 +377,7 @@ def class_gradient( # pylint: disable=W0221 return grads - def loss_gradient( # pylint: disable=W0221 - self, x: np.ndarray, y: np.ndarray, training_mode: bool = False, **kwargs - ) -> np.ndarray: + def loss_gradient(self, x: np.ndarray, y: np.ndarray, training_mode: bool = False, **kwargs) -> np.ndarray: """ Compute the gradient of the loss function w.r.t. `x`. @@ -426,7 +422,7 @@ def compute_loss(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray: raise NotImplementedError @property - def layer_names(self) -> List[str]: + def layer_names(self) -> list[str]: """ Return the hidden layers in the model, if applicable. @@ -440,7 +436,7 @@ def layer_names(self) -> List[str]: return self._layer_names def get_activations( - self, x: np.ndarray, layer: Union[int, str], batch_size: int = 128, framework: bool = False + self, x: np.ndarray, layer: int | str, batch_size: int = 128, framework: bool = False ) -> np.ndarray: # pragma: no cover """ Return the output of the specified layer for input `x`. `layer` is specified by layer index (between 0 and @@ -500,7 +496,7 @@ def get_activations( activations_array = np.vstack(activations) return activations_array - def save(self, filename: str, path: Optional[str] = None) -> None: + def save(self, filename: str, path: str | None = None) -> None: """ Save a model to file in the format specific to the backend framework. For Gluon, only parameters are saved in file with name `.params` at the specified path. To load the saved model, the original model code needs diff --git a/art/estimators/classification/pytorch.py b/art/estimators/classification/pytorch.py index 5216c02c21..48ef96fb4c 100644 --- a/art/estimators/classification/pytorch.py +++ b/art/estimators/classification/pytorch.py @@ -18,14 +18,14 @@ """ This module implements the classifier `PyTorchClassifier` for PyTorch models. """ -# pylint: disable=C0302,R0904 -from __future__ import absolute_import, division, print_function, unicode_literals + +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import copy import logging import os import time -from typing import Any, Dict, List, Optional, Tuple, Union, TYPE_CHECKING +from typing import Any, TYPE_CHECKING from tqdm.auto import tqdm import numpy as np @@ -40,7 +40,7 @@ from art.utils import check_and_transform_label_format if TYPE_CHECKING: - # pylint: disable=C0412, C0302 + import torch from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE @@ -73,16 +73,16 @@ def __init__( self, model: "torch.nn.Module", loss: "torch.nn.modules.loss._Loss", - input_shape: Tuple[int, ...], + input_shape: tuple[int, ...], nb_classes: int, - optimizer: Optional["torch.optim.Optimizer"] = None, # type: ignore + optimizer: "torch.optim.Optimizer" | None = None, # type: ignore use_amp: bool = False, opt_level: str = "O1", - loss_scale: Optional[Union[float, str]] = "dynamic", + loss_scale: float | str | None = "dynamic", channels_first: bool = True, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), device_type: str = "gpu", ) -> None: @@ -133,7 +133,7 @@ def __init__( self._loss = loss self._optimizer = optimizer self._use_amp = use_amp - self._learning_phase: Optional[bool] = None + self._learning_phase: bool | None = None self._opt_level = opt_level self._loss_scale = loss_scale @@ -142,7 +142,7 @@ def __init__( self.is_rnn = any((isinstance(m, torch.nn.modules.RNNBase) for m in self._model.modules())) # Get the internal layers - self._layer_names: List[str] = self._model.get_layers # type: ignore + self._layer_names: list[str] = self._model.get_layers # type: ignore self._model.to(self._device) @@ -168,7 +168,7 @@ def __init__( # Setup for AMP use if self._use_amp: # pragma: no cover - from apex import amp # pylint: disable=E0611 + from apex import amp if self._optimizer is None: logger.warning( @@ -204,10 +204,10 @@ def device(self) -> "torch.device": @property def model(self) -> "torch.nn.Module": - return self._model._model # pylint: disable=W0212 + return self._model._model @property - def input_shape(self) -> Tuple[int, ...]: + def input_shape(self) -> tuple[int, ...]: """ Return the shape of one input sample. @@ -253,7 +253,7 @@ def opt_level(self) -> str: return self._opt_level # type: ignore @property - def loss_scale(self) -> Union[float, str]: + def loss_scale(self) -> float | str: """ Return the loss scaling value. @@ -262,11 +262,11 @@ def loss_scale(self) -> Union[float, str]: """ return self._loss_scale # type: ignore - def reduce_labels(self, y: Union[np.ndarray, "torch.Tensor"]) -> Union[np.ndarray, "torch.Tensor"]: + def reduce_labels(self, y: np.ndarray | "torch.Tensor") -> np.ndarray | "torch.Tensor": """ Reduce labels from one-hot encoded to index labels. """ - # pylint: disable=R0911 + import torch # Check if the loss function supports probability labels and probability labels are provided @@ -297,9 +297,7 @@ def reduce_labels(self, y: Union[np.ndarray, "torch.Tensor"]) -> Union[np.ndarra return y.float() return y.astype(np.float32) - def predict( # pylint: disable=W0221 - self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs - ) -> np.ndarray: + def predict(self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs) -> np.ndarray: """ Perform prediction for a batch of inputs. @@ -345,8 +343,8 @@ def predict( # pylint: disable=W0221 return predictions def _predict_framework( - self, x: "torch.Tensor", y: Optional["torch.Tensor"] = None - ) -> Tuple["torch.Tensor", Optional["torch.Tensor"]]: + self, x: "torch.Tensor", y: "torch.Tensor" | None = None + ) -> tuple["torch.Tensor", "torch.Tensor" | None]: """ Perform prediction for a batch of inputs. @@ -366,7 +364,7 @@ def _predict_framework( return output, y_preprocessed - def fit( # pylint: disable=W0221 + def fit( self, x: np.ndarray, y: np.ndarray, @@ -374,7 +372,7 @@ def fit( # pylint: disable=W0221 nb_epochs: int = 10, training_mode: bool = True, drop_last: bool = False, - scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None, + scheduler: "torch.optim.lr_scheduler._LRScheduler" | None = None, verbose: bool = False, **kwargs, ) -> None: @@ -444,7 +442,7 @@ def fit( # pylint: disable=W0221 # Do training if self._use_amp: # pragma: no cover - from apex import amp # pylint: disable=E0611 + from apex import amp with amp.scale_loss(loss, self._optimizer) as scaled_loss: scaled_loss.backward() @@ -456,9 +454,7 @@ def fit( # pylint: disable=W0221 if scheduler is not None: scheduler.step() - def fit_generator( # pylint: disable=W0221 - self, generator: "DataGenerator", nb_epochs: int = 20, verbose: bool = False, **kwargs - ) -> None: + def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, verbose: bool = False, **kwargs) -> None: """ Fit the classifier using the generator that yields batches as specified. @@ -514,7 +510,7 @@ def fit_generator( # pylint: disable=W0221 # Do training if self._use_amp: # pragma: no cover - from apex import amp # pylint: disable=E0611 + from apex import amp with amp.scale_loss(loss, self._optimizer) as scaled_loss: scaled_loss.backward() @@ -566,10 +562,10 @@ def weight_reset(module): self.model.apply(weight_reset) - def class_gradient( # pylint: disable=W0221 + def class_gradient( self, x: np.ndarray, - label: Optional[Union[int, List[int], np.ndarray]] = None, + label: int | list[int] | np.ndarray | None = None, training_mode: bool = False, **kwargs, ) -> np.ndarray: @@ -703,13 +699,13 @@ def hook(grad): return grads - def compute_loss( # type: ignore # pylint: disable=W0221 + def compute_loss( # type: ignore self, - x: Union[np.ndarray, "torch.Tensor"], - y: Union[np.ndarray, "torch.Tensor"], + x: np.ndarray | "torch.Tensor", + y: np.ndarray | "torch.Tensor", reduction: str = "none", **kwargs, - ) -> Union[np.ndarray, "torch.Tensor"]: + ) -> np.ndarray | "torch.Tensor": """ Compute the loss. @@ -759,10 +755,10 @@ def compute_loss( # type: ignore # pylint: disable=W0221 def compute_losses( self, - x: Union[np.ndarray, "torch.Tensor"], - y: Union[np.ndarray, "torch.Tensor"], + x: np.ndarray | "torch.Tensor", + y: np.ndarray | "torch.Tensor", reduction: str = "none", - ) -> Dict[str, Union[np.ndarray, "torch.Tensor"]]: + ) -> dict[str, np.ndarray | "torch.Tensor"]: """ Compute all loss components. @@ -777,13 +773,13 @@ def compute_losses( """ return {"total": self.compute_loss(x=x, y=y, reduction=reduction)} - def loss_gradient( # pylint: disable=W0221 + def loss_gradient( self, - x: Union[np.ndarray, "torch.Tensor"], - y: Union[np.ndarray, "torch.Tensor"], + x: np.ndarray | "torch.Tensor", + y: np.ndarray | "torch.Tensor", training_mode: bool = False, **kwargs, - ) -> Union[np.ndarray, "torch.Tensor"]: + ) -> np.ndarray | "torch.Tensor": """ Compute the gradient of the loss function w.r.t. `x`. @@ -852,7 +848,7 @@ def loss_gradient( # pylint: disable=W0221 # Compute gradients if self._use_amp: # pragma: no cover - from apex import amp # pylint: disable=E0611 + from apex import amp with amp.scale_loss(loss, self._optimizer) as scaled_loss: scaled_loss.backward() @@ -875,14 +871,14 @@ def loss_gradient( # pylint: disable=W0221 return grads - def custom_loss_gradient( # pylint: disable=W0221 + def custom_loss_gradient( self, loss_fn, - x: Union[np.ndarray, "torch.Tensor"], - y: Union[np.ndarray, "torch.Tensor"], + x: np.ndarray | "torch.Tensor", + y: np.ndarray | "torch.Tensor", layer_name, training_mode: bool = False, - ) -> Union[np.ndarray, "torch.Tensor"]: + ) -> np.ndarray | "torch.Tensor": """ Compute the gradient of the loss function w.r.t. `x`. @@ -932,7 +928,7 @@ def custom_loss_gradient( # pylint: disable=W0221 # Compute gradients if self._use_amp: # pragma: no cover - from apex import amp # pylint: disable=E0611 + from apex import amp with amp.scale_loss(loss, self._optimizer) as scaled_loss: scaled_loss.backward() @@ -954,11 +950,11 @@ def custom_loss_gradient( # pylint: disable=W0221 def get_activations( # type: ignore self, - x: Union[np.ndarray, "torch.Tensor"], - layer: Optional[Union[int, str]] = None, + x: np.ndarray | "torch.Tensor", + layer: int | str | None = None, batch_size: int = 128, framework: bool = False, - ) -> Union[np.ndarray, "torch.Tensor"]: + ) -> np.ndarray | "torch.Tensor": """ Return the output of the specified layer for input `x`. `layer` is specified by layer index (between 0 and `nb_layers - 1`) or by name. The number of layers can be determined by counting the results returned by @@ -995,19 +991,17 @@ def get_activations( # type: ignore def get_feature(name): # the hook signature - def hook(model, input, output): # pylint: disable=W0622,W0613 + def hook(model, input, output): # pylint: disable=redefined-builtin,unused-argument self._features[name] = output return hook if not hasattr(self, "_features"): - self._features: Dict[str, torch.Tensor] = {} + self._features: dict[str, torch.Tensor] = {} # register forward hooks on the layers of choice if layer not in self._features: - interim_layer = dict([*self._model._model.named_modules()])[ # pylint: disable=W0212,W0622,W0613 - self._layer_names[layer_index] - ] + interim_layer = dict([*self._model._model.named_modules()])[self._layer_names[layer_index]] interim_layer.register_forward_hook(get_feature(self._layer_names[layer_index])) if framework: @@ -1016,7 +1010,7 @@ def hook(model, input, output): # pylint: disable=W0622,W0613 return self._features[self._layer_names[layer_index]] input_tensor = torch.from_numpy(x_preprocessed) self._model(input_tensor.to(self._device)) - return self._features[self._layer_names[layer_index]] # pylint: disable=W0212 + return self._features[self._layer_names[layer_index]] # Run prediction with batch processing results = [] @@ -1031,14 +1025,14 @@ def hook(model, input, output): # pylint: disable=W0622,W0613 # Run prediction for the current batch self._model(torch.from_numpy(x_preprocessed[begin:end]).to(self._device)) - layer_output = self._features[self._layer_names[layer_index]] # pylint: disable=W0212 + layer_output = self._features[self._layer_names[layer_index]] results.append(layer_output.detach().cpu().numpy()) results_array = np.concatenate(results) return results_array - def save(self, filename: str, path: Optional[str] = None) -> None: + def save(self, filename: str, path: str | None = None) -> None: """ Save a model to file in the format specific to the backend framework. @@ -1056,7 +1050,6 @@ def save(self, filename: str, path: Optional[str] = None) -> None: if not os.path.exists(folder): os.makedirs(folder) - # pylint: disable=W0212 # disable pylint because access to _modules required torch.save(self._model._model.state_dict(), full_path + ".model") if self._optimizer is not None: @@ -1064,13 +1057,13 @@ def save(self, filename: str, path: Optional[str] = None) -> None: logger.info("Optimizer state dict saved in path: %s.", full_path + ".optimizer") logger.info("Model state dict saved in path: %s.", full_path + ".model") - def __getstate__(self) -> Dict[str, Any]: + def __getstate__(self) -> dict[str, Any]: """ Use to ensure `PyTorchClassifier` can be pickled. :return: State dictionary with instance parameters. """ - # pylint: disable=W0212 + # disable pylint because access to _model required state = self.__dict__.copy() state["inner_model"] = copy.copy(state["_model"]._model) @@ -1086,7 +1079,7 @@ def __getstate__(self) -> Dict[str, Any]: return state - def __setstate__(self, state: Dict[str, Any]) -> None: + def __setstate__(self, state: dict[str, Any]) -> None: """ Use to ensure `PyTorchClassifier` can be unpickled. @@ -1148,7 +1141,6 @@ def __init__(self, model: torch.nn.Module): super().__init__() self._model = model - # pylint: disable=W0221 # disable pylint because of API requirements for function def forward(self, x): """ @@ -1159,7 +1151,7 @@ def forward(self, x): :return: a list of output layers, where the last 2 layers are logit and final outputs. :rtype: `list` """ - # pylint: disable=W0212 + # disable pylint because access to _model required result = [] @@ -1178,7 +1170,7 @@ def forward(self, x): return result @property - def get_layers(self) -> List[str]: + def get_layers(self) -> list[str]: """ Return the hidden layers in the model, if applicable. @@ -1195,7 +1187,7 @@ def get_layers(self) -> List[str]: result = [] if isinstance(self._model, torch.nn.Module): - for name, _ in self._model._modules.items(): # pylint: disable=W0212 + for name, _ in self._model._modules.items(): result.append(name) else: # pragma: no cover diff --git a/art/estimators/classification/query_efficient_bb.py b/art/estimators/classification/query_efficient_bb.py index 748617e3cb..b766c3f104 100644 --- a/art/estimators/classification/query_efficient_bb.py +++ b/art/estimators/classification/query_efficient_bb.py @@ -18,8 +18,10 @@ """ Provides black-box gradient estimation using NES. """ +from __future__ import annotations + import logging -from typing import List, Optional, Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from scipy.stats import entropy @@ -59,7 +61,6 @@ def __init__( disable. """ super().__init__(model=classifier.model, clip_values=classifier.clip_values) - # pylint: disable=E0203 self._classifier = classifier self.num_basis = num_basis self.sigma = sigma @@ -67,7 +68,7 @@ def __init__( self._nb_classes = self._classifier.nb_classes @property - def input_shape(self) -> Tuple[int, ...]: + def input_shape(self) -> tuple[int, ...]: """ Return the shape of one input sample. @@ -75,7 +76,7 @@ def input_shape(self) -> Tuple[int, ...]: """ return self._classifier.input_shape # type: ignore - def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> np.ndarray: # pylint: disable=W0221 + def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> np.ndarray: """ Perform prediction of the classifier for input `x`. Rounds results first. @@ -98,7 +99,7 @@ def fit(self, x: np.ndarray, y: np.ndarray, **kwargs) -> None: """ raise NotImplementedError - def _generate_samples(self, x: np.ndarray, epsilon_map: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + def _generate_samples(self, x: np.ndarray, epsilon_map: np.ndarray) -> tuple[np.ndarray, np.ndarray]: """ Generate samples around the current image. @@ -118,9 +119,7 @@ def _generate_samples(self, x: np.ndarray, epsilon_map: np.ndarray) -> Tuple[np. ) return minus, plus - def class_gradient( - self, x: np.ndarray, label: Optional[Union[int, List[int], np.ndarray]] = None, **kwargs - ) -> np.ndarray: + def class_gradient(self, x: np.ndarray, label: int | list[int] | np.ndarray | None = None, **kwargs) -> np.ndarray: """ Compute per-class derivatives w.r.t. `x`. @@ -166,7 +165,7 @@ def loss_gradient(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray: grads_array = self._apply_preprocessing_gradient(x, np.array(grads)) return grads_array - def get_activations(self, x: np.ndarray, layer: Union[int, str], batch_size: int) -> np.ndarray: + def get_activations(self, x: np.ndarray, layer: int | str, batch_size: int) -> np.ndarray: """ Return the output of the specified layer for input `x`. `layer` is specified by layer index (between 0 and `nb_layers - 1`) or by name. The number of layers can be determined by counting the results returned by @@ -179,7 +178,7 @@ def get_activations(self, x: np.ndarray, layer: Union[int, str], batch_size: int """ raise NotImplementedError - def save(self, filename: str, path: Optional[str] = None) -> None: + def save(self, filename: str, path: str | None = None) -> None: """ Save a model to file specific to the backend framework. diff --git a/art/estimators/classification/scikitlearn.py b/art/estimators/classification/scikitlearn.py index 43aeee07b8..0ba012ea05 100644 --- a/art/estimators/classification/scikitlearn.py +++ b/art/estimators/classification/scikitlearn.py @@ -18,15 +18,16 @@ """ This module implements the classifiers for scikit-learn models. """ -# pylint: disable=C0302 -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations + +from collections.abc import Callable from copy import deepcopy import importlib import logging import os import pickle -from typing import Callable, List, Optional, Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -41,7 +42,7 @@ from art import config if TYPE_CHECKING: - # pylint: disable=C0412 + import sklearn from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE @@ -52,12 +53,12 @@ logger = logging.getLogger(__name__) -# pylint: disable=C0103 +# pylint: disable=invalid-name def SklearnClassifier( model: "sklearn.base.BaseEstimator", - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), use_logits: bool = False, ) -> "ScikitlearnClassifier": @@ -109,9 +110,9 @@ class ScikitlearnClassifier(ClassifierMixin, ScikitlearnEstimator): def __init__( self, model: "sklearn.base.BaseEstimator", - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), use_logits: bool = False, ) -> None: @@ -143,7 +144,7 @@ def __init__( self._use_logits = use_logits @property - def input_shape(self) -> Tuple[int, ...]: + def input_shape(self) -> tuple[int, ...]: """ Return the shape of one input sample. @@ -210,7 +211,7 @@ def predict(self, x: np.ndarray, **kwargs) -> np.ndarray: return predictions - def save(self, filename: str, path: Optional[str] = None) -> None: + def save(self, filename: str, path: str | None = None) -> None: """ Save a model to file in the format specific to the backend framework. @@ -270,9 +271,9 @@ class ScikitlearnDecisionTreeClassifier(ScikitlearnClassifier): def __init__( self, model: "sklearn.tree.DecisionTreeClassifier", - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), ) -> None: """ @@ -367,7 +368,7 @@ def get_values_at_node(self, node_id: int) -> np.ndarray: """ return self.model.tree_.value[node_id] / np.linalg.norm(self.model.tree_.value[node_id]) - def _get_leaf_nodes(self, node_id, i_tree, class_label, box) -> List["LeafNode"]: + def _get_leaf_nodes(self, node_id, i_tree, class_label, box) -> list["LeafNode"]: from art.metrics.verification_decisions_trees import LeafNode, Box, Interval leaf_nodes = [] @@ -416,9 +417,9 @@ class ScikitlearnExtraTreeClassifier(ScikitlearnDecisionTreeClassifier): def __init__( self, model: "sklearn.tree.ExtraTreeClassifier", - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), ) -> None: """ @@ -455,9 +456,9 @@ class ScikitlearnAdaBoostClassifier(ScikitlearnClassifier): def __init__( self, model: "sklearn.ensemble.AdaBoostClassifier", - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), ) -> None: """ @@ -494,9 +495,9 @@ class ScikitlearnBaggingClassifier(ScikitlearnClassifier): def __init__( self, model: "sklearn.ensemble.BaggingClassifier", - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), ) -> None: """ @@ -511,7 +512,7 @@ def __init__( used for data preprocessing. The first value will be subtracted from the input. The input will then be divided by the second one. """ - # pylint: disable=E0001 + import sklearn if not isinstance(model, sklearn.ensemble.BaggingClassifier): @@ -534,9 +535,9 @@ class ScikitlearnExtraTreesClassifier(ScikitlearnClassifier, DecisionTreeMixin): def __init__( self, model: "sklearn.ensemble.ExtraTreesClassifier", - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), ): """ @@ -551,7 +552,7 @@ def __init__( used for data preprocessing. The first value will be subtracted from the input. The input will then be divided by the second one. """ - # pylint: disable=E0001 + import sklearn if not isinstance(model, sklearn.ensemble.ExtraTreesClassifier): @@ -565,7 +566,7 @@ def __init__( preprocessing=preprocessing, ) - def get_trees(self) -> List["Tree"]: + def get_trees(self) -> list["Tree"]: """ Get the decision trees. @@ -588,7 +589,6 @@ def get_trees(self) -> List["Tree"]: for i_class in range(self.model.n_classes_): class_label = i_class - # pylint: disable=W0212 trees.append( Tree( class_id=class_label, @@ -607,9 +607,9 @@ class ScikitlearnGradientBoostingClassifier(ScikitlearnClassifier, DecisionTreeM def __init__( self, model: "sklearn.ensemble.GradientBoostingClassifier", - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), ) -> None: """ @@ -624,7 +624,7 @@ def __init__( used for data preprocessing. The first value will be subtracted from the input. The input will then be divided by the second one. """ - # pylint: disable=E0001 + import sklearn if not isinstance(model, sklearn.ensemble.GradientBoostingClassifier): @@ -638,7 +638,7 @@ def __init__( preprocessing=preprocessing, ) - def get_trees(self) -> List["Tree"]: + def get_trees(self) -> list["Tree"]: """ Get the decision trees. @@ -662,7 +662,6 @@ def get_trees(self) -> List["Tree"]: else: class_label = i_class - # pylint: disable=W0212 trees.append( Tree( class_id=class_label, @@ -681,9 +680,9 @@ class ScikitlearnRandomForestClassifier(ScikitlearnClassifier): def __init__( self, model: "sklearn.ensemble.RandomForestClassifier", - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), ) -> None: """ @@ -698,7 +697,7 @@ def __init__( used for data preprocessing. The first value will be subtracted from the input. The input will then be divided by the second one. """ - # pylint: disable=E0001 + import sklearn if not isinstance(model, sklearn.ensemble.RandomForestClassifier): @@ -712,7 +711,7 @@ def __init__( preprocessing=preprocessing, ) - def get_trees(self) -> List["Tree"]: + def get_trees(self) -> list["Tree"]: """ Get the decision trees. @@ -735,7 +734,6 @@ def get_trees(self) -> List["Tree"]: for i_class in range(self.model.n_classes_): class_label = i_class - # pylint: disable=W0212 trees.append( Tree( class_id=class_label, @@ -754,9 +752,9 @@ class ScikitlearnLogisticRegression(ClassGradientsMixin, LossGradientsMixin, Sci def __init__( self, model: "sklearn.linear_model.LogisticRegression", - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), ) -> None: """ @@ -771,7 +769,7 @@ def __init__( used for data preprocessing. The first value will be subtracted from the input. The input will then be divided by the second one. """ - # pylint: disable=E0001 + import sklearn if not isinstance(model, sklearn.linear_model.LogisticRegression): @@ -785,9 +783,7 @@ def __init__( preprocessing=preprocessing, ) - def class_gradient( - self, x: np.ndarray, label: Optional[Union[int, List[int], np.ndarray]] = None, **kwargs - ) -> np.ndarray: + def class_gradient(self, x: np.ndarray, label: int | list[int] | np.ndarray | None = None, **kwargs) -> np.ndarray: """ Compute per-class derivatives w.r.t. `x`. @@ -889,7 +885,7 @@ def loss_gradient(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray: :return: Array of gradients of the same shape as `x`. :raises `ValueError`: If the model has not been fitted prior to calling this method. """ - # pylint: disable=E0001 + from sklearn.utils.class_weight import compute_class_weight if not hasattr(self.model, "coef_"): # pragma: no cover @@ -926,7 +922,7 @@ def loss_gradient(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray: return gradients @staticmethod - def get_trainable_attribute_names() -> Tuple[str, str]: + def get_trainable_attribute_names() -> tuple[str, str]: """ Get the names of trainable attributes. @@ -942,10 +938,10 @@ class ScikitlearnGaussianNB(ScikitlearnClassifier): def __init__( self, - model: Union["sklearn.naive_bayes.GaussianNB"], - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + model: "sklearn.naive_bayes.GaussianNB", + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), ) -> None: """ @@ -960,7 +956,7 @@ def __init__( used for data preprocessing. The first value will be subtracted from the input. The input will then be divided by the second one. """ - # pylint: disable=E0001 + import sklearn if not isinstance(model, sklearn.naive_bayes.GaussianNB): # pragma: no cover @@ -974,7 +970,7 @@ def __init__( preprocessing=preprocessing, ) - def get_trainable_attribute_names(self) -> Tuple[str, str]: + def get_trainable_attribute_names(self) -> tuple[str, str]: """ Get the names of trainable attributes. @@ -994,10 +990,10 @@ class ScikitlearnSVC(ClassGradientsMixin, LossGradientsMixin, ScikitlearnClassif def __init__( self, - model: Union["sklearn.svm.SVC", "sklearn.svm.LinearSVC"], - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + model: "sklearn.svm.SVC" | "sklearn.svm.LinearSVC", + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), ) -> None: """ @@ -1012,7 +1008,7 @@ def __init__( used for data preprocessing. The first value will be subtracted from the input. The input will then be divided by the second one. """ - # pylint: disable=E0001 + import sklearn if not isinstance(model, sklearn.svm.SVC) and not isinstance(model, sklearn.svm.LinearSVC): @@ -1027,9 +1023,7 @@ def __init__( ) self._kernel = self._kernel_func() - def class_gradient( - self, x: np.ndarray, label: Optional[Union[int, List[int], np.ndarray]] = None, **kwargs - ) -> np.ndarray: + def class_gradient(self, x: np.ndarray, label: int | list[int] | np.ndarray | None = None, **kwargs) -> np.ndarray: """ Compute per-class derivatives w.r.t. `x`. @@ -1042,7 +1036,7 @@ def class_gradient( `(batch_size, nb_classes, input_shape)` when computing for all classes, otherwise shape becomes `(batch_size, 1, input_shape)` when `label` parameter is specified. """ - # pylint: disable=E0001 + import sklearn # Apply preprocessing @@ -1230,7 +1224,7 @@ def _kernel_grad(self, sv: np.ndarray, x_sample: np.ndarray) -> np.ndarray: :param x_sample: The sample the gradient is taken with respect to. :return: the kernel gradient. """ - # pylint: disable=W0212 + if self.model.kernel == "linear": grad = sv elif self.model.kernel == "poly": @@ -1276,7 +1270,7 @@ def loss_gradient(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray: `(nb_samples,)`. :return: Array of gradients of the same shape as `x`. """ - # pylint: disable=E0001 + import sklearn # Apply preprocessing @@ -1353,7 +1347,7 @@ def _kernel_func(self) -> Callable: :return: A callable kernel function. """ - # pylint: disable=E0001 + import sklearn from sklearn.metrics.pairwise import ( polynomial_kernel, @@ -1408,7 +1402,7 @@ def predict(self, x: np.ndarray, **kwargs) -> np.ndarray: :param x: Input samples. :return: Array of predictions of shape `(nb_inputs, nb_classes)`. """ - # pylint: disable=E0001 + import sklearn # Apply defences diff --git a/art/estimators/classification/tensorflow.py b/art/estimators/classification/tensorflow.py index 33cc515ae1..272174066e 100644 --- a/art/estimators/classification/tensorflow.py +++ b/art/estimators/classification/tensorflow.py @@ -18,15 +18,16 @@ """ This module implements the classifier `TensorFlowClassifier` for TensorFlow models. """ -# pylint: disable=C0302 -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations + +from collections.abc import Callable import logging import os import random import shutil import time -from typing import Any, Callable, Dict, List, Optional, Tuple, Union, TYPE_CHECKING +from typing import Any, TYPE_CHECKING from tqdm.auto import tqdm import numpy as np @@ -38,7 +39,7 @@ from art.utils import check_and_transform_label_format if TYPE_CHECKING: - # pylint: disable=C0412 + import tensorflow.compat.v1 as tf from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE @@ -73,17 +74,17 @@ def __init__( self, input_ph: "tf.Placeholder", output: "tf.Tensor", - labels_ph: Optional["tf.Placeholder"] = None, - train: Optional["tf.Tensor"] = None, - loss: Optional["tf.Tensor"] = None, - learning: Optional["tf.Placeholder"] = None, - sess: Optional["tf.Session"] = None, + labels_ph: "tf.Placeholder" | None = None, + train: "tf.Tensor" | None = None, + loss: "tf.Tensor" | None = None, + learning: "tf.Placeholder" | None = None, + sess: "tf.Session" | None = None, channels_first: bool = False, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), - feed_dict: Optional[Dict[Any, Any]] = None, + feed_dict: dict[Any, Any] | None = None, ) -> None: """ Initialization specific to TensorFlow models implementation. @@ -112,7 +113,7 @@ def __init__( :param feed_dict: A feed dictionary for the session run evaluating the classifier. This dictionary includes all additionally required placeholders except the placeholders defined in this class. """ - # pylint: disable=E0401 + import tensorflow.compat.v1 as tf super().__init__( @@ -156,7 +157,7 @@ def __init__( self._reduce_labels = False @property - def input_shape(self) -> Tuple[int, ...]: + def input_shape(self) -> tuple[int, ...]: """ Return the shape of one input sample. @@ -219,7 +220,7 @@ def learning(self) -> "tf.Placeholder": return self._learning # type: ignore @property - def feed_dict(self) -> Dict[Any, Any]: + def feed_dict(self) -> dict[Any, Any]: """ Return the feed dictionary for the session run evaluating the classifier. @@ -227,9 +228,7 @@ def feed_dict(self) -> Dict[Any, Any]: """ return self._feed_dict # type: ignore - def predict( # pylint: disable=W0221 - self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs - ) -> np.ndarray: + def predict(self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs) -> np.ndarray: """ Perform prediction for a batch of inputs. @@ -266,7 +265,7 @@ def predict( # pylint: disable=W0221 return predictions - def fit( # pylint: disable=W0221 + def fit( self, x: np.ndarray, y: np.ndarray, @@ -323,9 +322,7 @@ def fit( # pylint: disable=W0221 # Run train step self._sess.run(self.train, feed_dict=feed_dict) - def fit_generator( # pylint: disable=W0221 - self, generator: "DataGenerator", nb_epochs: int = 20, verbose: bool = False, **kwargs - ) -> None: + def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, verbose: bool = False, **kwargs) -> None: """ Fit the classifier using the generator that yields batches as specified. @@ -377,10 +374,10 @@ def fit_generator( # pylint: disable=W0221 else: super().fit_generator(generator, nb_epochs=nb_epochs, **kwargs) - def class_gradient( # pylint: disable=W0221 + def class_gradient( self, x: np.ndarray, - label: Optional[Union[int, List[int], np.ndarray]] = None, + label: int | list[int] | np.ndarray | None = None, training_mode: bool = False, **kwargs, ) -> np.ndarray: @@ -446,9 +443,7 @@ def class_gradient( # pylint: disable=W0221 return grads - def loss_gradient( # pylint: disable=W0221 - self, x: np.ndarray, y: np.ndarray, training_mode: bool = False, **kwargs - ) -> np.ndarray: + def loss_gradient(self, x: np.ndarray, y: np.ndarray, training_mode: bool = False, **kwargs) -> np.ndarray: """ Compute the gradient of the loss function w.r.t. `x`. @@ -483,9 +478,7 @@ def loss_gradient( # pylint: disable=W0221 return grads - def compute_loss( # pylint: disable=W0221 - self, x: np.ndarray, y: np.ndarray, reduction: str = "none", **kwargs - ) -> np.ndarray: + def compute_loss(self, x: np.ndarray, y: np.ndarray, reduction: str = "none", **kwargs) -> np.ndarray: """ Compute the loss of the neural network for samples `x`. @@ -536,7 +529,7 @@ def clone_for_refitting(self) -> "TensorFlowClassifier": raise NotImplementedError def _init_class_grads(self, label=None): - # pylint: disable=E0401 + import tensorflow.compat.v1 as tf if not hasattr(self, "_class_grads"): @@ -556,13 +549,13 @@ def _init_class_grads(self, label=None): if self._class_grads[unique_label] is None: self._class_grads[unique_label] = tf.gradients(self.output[:, unique_label], self.input_ph)[0] - def _get_layers(self) -> List[str]: + def _get_layers(self) -> list[str]: """ Return the hidden layers in the model, if applicable. :return: The hidden layers in the model, input and output layers excluded. """ - # pylint: disable=E0401 + import tensorflow.compat.v1 as tf # Get the computational graph @@ -573,7 +566,7 @@ def _get_layers(self) -> List[str]: tmp_list = [] ops = graph.get_operations() - # pylint: disable=R1702 + # pylint: disable=too-many-nested-blocks for op in ops: if op.values(): if op.values()[0].get_shape() is not None: @@ -599,7 +592,7 @@ def _get_layers(self) -> List[str]: return result def get_activations( - self, x: np.ndarray, layer: Union[int, str], batch_size: int = 128, framework: bool = False + self, x: np.ndarray, layer: int | str, batch_size: int = 128, framework: bool = False ) -> np.ndarray: """ Return the output of the specified layer for input `x`. `layer` is specified by layer index (between 0 and @@ -612,7 +605,7 @@ def get_activations( :param framework: If true, return the intermediate tensor representation of the activation. :return: The output of `layer`, where the first dimension is the batch size corresponding to `x`. """ - # pylint: disable=E0401 + import tensorflow.compat.v1 as tf if self.learning is not None: @@ -661,7 +654,7 @@ def get_activations( return results_array - def save(self, filename: str, path: Optional[str] = None) -> None: + def save(self, filename: str, path: str | None = None) -> None: """ Save a model to file in the format specific to the backend framework. For TensorFlow, .ckpt is used. @@ -669,7 +662,7 @@ def save(self, filename: str, path: Optional[str] = None) -> None: :param path: Path of the folder where to store the model. If no path is specified, the model will be stored in the default data location of the library `ART_DATA_PATH`. """ - # pylint: disable=E0611 + from tensorflow.python import saved_model from tensorflow.python.saved_model import tag_constants from tensorflow.python.saved_model.signature_def_utils_impl import predict_signature_def @@ -696,7 +689,7 @@ def save(self, filename: str, path: Optional[str] = None) -> None: logger.info("Model saved in path: %s.", full_path) - def __getstate__(self) -> Dict[str, Any]: + def __getstate__(self) -> dict[str, Any]: """ Use to ensure `TensorFlowClassifier` can be pickled. @@ -737,7 +730,7 @@ def __getstate__(self) -> Dict[str, Any]: return state - def __setstate__(self, state: Dict[str, Any]) -> None: + def __setstate__(self, state: dict[str, Any]) -> None: """ Use to ensure `TensorFlowClassifier` can be unpickled. @@ -746,7 +739,6 @@ def __setstate__(self, state: Dict[str, Any]) -> None: self.__dict__.update(state) # Load and update all functionality related to TensorFlow - # pylint: disable=E0611, E0401 import tensorflow.compat.v1 as tf from tensorflow.python.saved_model import tag_constants @@ -832,14 +824,14 @@ def __init__( self, model: Callable, nb_classes: int, - input_shape: Tuple[int, ...], - loss_object: Optional["tf.keras.losses.Loss"] = None, - optimizer: Optional["tf.keras.optimizers.Optimizer"] = None, - train_step: Optional[Callable] = None, + input_shape: tuple[int, ...], + loss_object: "tf.keras.losses.Loss" | None = None, + optimizer: "tf.keras.optimizers.Optimizer" | None = None, + train_step: Callable | None = None, channels_first: bool = False, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), ) -> None: """ @@ -890,7 +882,7 @@ def __init__( self._reduce_labels = False @property - def input_shape(self) -> Tuple[int, ...]: + def input_shape(self) -> tuple[int, ...]: """ Return the shape of one input sample. @@ -925,9 +917,7 @@ def train_step(self) -> Callable: """ return self._train_step # type: ignore - def predict( # pylint: disable=W0221 - self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs - ) -> np.ndarray: + def predict(self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs) -> np.ndarray: """ Perform prediction for a batch of inputs. @@ -971,7 +961,7 @@ def _predict_framework(self, x: "tf.Tensor", training_mode: bool = False) -> "tf return self._model(x_preprocessed, training=training_mode) - def fit( # pylint: disable=W0221 + def fit( self, x: np.ndarray, y: np.ndarray, @@ -1038,9 +1028,7 @@ def train_step(model, images, labels): if scheduler is not None: scheduler(epoch) - def fit_generator( # pylint: disable=W0221 - self, generator: "DataGenerator", nb_epochs: int = 20, verbose: bool = False, **kwargs - ) -> None: + def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, verbose: bool = False, **kwargs) -> None: """ Fit the classifier using the generator that yields batches as specified. @@ -1106,10 +1094,10 @@ def train_step(model, images, labels): # Fit a generic data generator through the API super().fit_generator(generator, nb_epochs=nb_epochs) - def class_gradient( # pylint: disable=W0221 + def class_gradient( self, x: np.ndarray, - label: Optional[Union[int, List[int], np.ndarray]] = None, + label: int | list[int] | np.ndarray | None = None, training_mode: bool = False, **kwargs, ) -> np.ndarray: @@ -1197,10 +1185,10 @@ def class_gradient( # pylint: disable=W0221 return gradients - def compute_loss( # pylint: disable=W0221 + def compute_loss( self, - x: Union[np.ndarray, "tf.Tensor"], - y: Union[np.ndarray, "tf.Tensor"], + x: np.ndarray | "tf.Tensor", + y: np.ndarray | "tf.Tensor", reduction: str = "none", training_mode: bool = False, **kwargs, @@ -1250,10 +1238,10 @@ def compute_loss( # pylint: disable=W0221 def compute_losses( self, - x: Union[np.ndarray, "tf.Tensor"], - y: Union[np.ndarray, "tf.Tensor"], + x: np.ndarray | "tf.Tensor", + y: np.ndarray | "tf.Tensor", reduction: str = "none", - ) -> Dict[str, Union[np.ndarray, "tf.Tensor"]]: + ) -> dict[str, np.ndarray | "tf.Tensor"]: """ Compute all loss components. @@ -1268,13 +1256,13 @@ def compute_losses( """ return {"total": self.compute_loss(x=x, y=y, reduction=reduction)} - def loss_gradient( # pylint: disable=W0221 + def loss_gradient( self, - x: Union[np.ndarray, "tf.Tensor"], - y: Union[np.ndarray, "tf.Tensor"], + x: np.ndarray | "tf.Tensor", + y: np.ndarray | "tf.Tensor", training_mode: bool = False, **kwargs, - ) -> Union[np.ndarray, "tf.Tensor"]: + ) -> np.ndarray | "tf.Tensor": """ Compute the gradient of the loss function w.r.t. `x`. @@ -1352,8 +1340,8 @@ def clone_for_refitting( optimizer=optimizer, loss=self.model.loss, metrics=self.model.metrics, - loss_weights=self.model.compiled_loss._loss_weights, # pylint: disable=W0212 - weighted_metrics=self.model.compiled_metrics._weighted_metrics, # pylint: disable=W0212 + loss_weights=self.model.compiled_loss._loss_weights, + weighted_metrics=self.model.compiled_metrics._weighted_metrics, run_eagerly=self.model.run_eagerly, ) @@ -1361,10 +1349,10 @@ def clone_for_refitting( params = self.get_params() del params["model"] clone.set_params(**params) - clone._train_step = self._train_step # pylint: disable=W0212 - clone._reduce_labels = self._reduce_labels # pylint: disable=W0212 - clone._loss_object = self._loss_object # pylint: disable=W0212 - clone._optimizer = self._optimizer # pylint: disable=W0212 + clone._train_step = self._train_step + clone._reduce_labels = self._reduce_labels + clone._loss_object = self._loss_object + clone._optimizer = self._optimizer return clone def reset(self) -> None: @@ -1407,7 +1395,7 @@ def _get_layers(self) -> list: raise NotImplementedError @property - def layer_names(self) -> Optional[List[str]]: + def layer_names(self) -> list[str] | None: """ Return the hidden layers in the model, if applicable. @@ -1426,8 +1414,8 @@ def layer_names(self) -> Optional[List[str]]: return None # type: ignore def get_activations( # type: ignore - self, x: np.ndarray, layer: Union[int, str], batch_size: int = 128, framework: bool = False - ) -> Optional[np.ndarray]: + self, x: np.ndarray, layer: int | str, batch_size: int = 128, framework: bool = False + ) -> np.ndarray | None: """ Return the output of the specified layer for input `x`. `layer` is specified by layer index (between 0 and `nb_layers - 1`) or by name. The number of layers can be determined by counting the results returned by @@ -1490,7 +1478,7 @@ def get_activations( # type: ignore return activations - def save(self, filename: str, path: Optional[str] = None) -> None: + def save(self, filename: str, path: str | None = None) -> None: """ Save a model to file in the format specific to the backend framework. For TensorFlow, .ckpt is used. diff --git a/art/estimators/classification/xgboost.py b/art/estimators/classification/xgboost.py index d469c3c6a6..99d1dc4633 100644 --- a/art/estimators/classification/xgboost.py +++ b/art/estimators/classification/xgboost.py @@ -18,14 +18,14 @@ """ This module implements the classifier `XGBoostClassifier` for XGBoost models. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations from copy import deepcopy import json import logging import os import pickle -from typing import List, Optional, Union, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -34,7 +34,7 @@ from art import config if TYPE_CHECKING: - # pylint: disable=C0412 + import xgboost from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE @@ -56,13 +56,13 @@ class XGBoostClassifier(ClassifierDecisionTree): def __init__( self, - model: Union["xgboost.Booster", "xgboost.XGBClassifier", None] = None, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + model: "xgboost.Booster" | "xgboost.XGBClassifier" | None = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), - nb_features: Optional[int] = None, - nb_classes: Optional[int] = None, + nb_features: int | None = None, + nb_classes: int | None = None, ) -> None: """ Create a `Classifier` instance from a XGBoost model. @@ -97,7 +97,7 @@ def __init__( self._nb_classes = _nb_classes @property - def input_shape(self) -> Tuple[int, ...]: + def input_shape(self) -> tuple[int, ...]: """ Return the shape of one input sample. @@ -184,7 +184,7 @@ def clone_for_refitting( raise NotImplementedError - def _get_nb_classes(self, nb_classes: Optional[int]) -> Optional[int]: + def _get_nb_classes(self, nb_classes: int | None) -> int | None: """ Return the number of output classes. @@ -205,7 +205,7 @@ def _get_nb_classes(self, nb_classes: Optional[int]) -> Optional[int]: return nb_classes return None - def save(self, filename: str, path: Optional[str] = None) -> None: + def save(self, filename: str, path: str | None = None) -> None: """ Save a model to file in the format specific to the backend framework. @@ -224,7 +224,7 @@ def save(self, filename: str, path: Optional[str] = None) -> None: with open(full_path + ".pickle", "wb") as file_pickle: pickle.dump(self._model, file=file_pickle) - def get_trees(self) -> List["Tree"]: + def get_trees(self) -> list["Tree"]: """ Get the decision trees. @@ -253,10 +253,10 @@ def get_trees(self) -> List["Tree"]: return trees - def _get_leaf_nodes(self, node, i_tree, class_label, box) -> List["LeafNode"]: + def _get_leaf_nodes(self, node, i_tree, class_label, box) -> list["LeafNode"]: from art.metrics.verification_decisions_trees import LeafNode, Box, Interval - leaf_nodes: List[LeafNode] = [] + leaf_nodes: list[LeafNode] = [] if "children" in node: if node["children"][0]["nodeid"] == node["yes"] and node["children"][1]["nodeid"] == node["no"]: diff --git a/art/estimators/encoding/tensorflow.py b/art/estimators/encoding/tensorflow.py index 3d7a1be33c..df978e1a42 100644 --- a/art/estimators/encoding/tensorflow.py +++ b/art/estimators/encoding/tensorflow.py @@ -18,16 +18,16 @@ """ This module implements the classifier `TensorFlowEncoder` for TensorFlow models. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Any, Dict, List, Optional, Union, Tuple, TYPE_CHECKING +from typing import Any, TYPE_CHECKING from art.estimators.encoding.encoder import EncoderMixin from art.estimators.tensorflow import TensorFlowEstimator if TYPE_CHECKING: - # pylint: disable=C0412 + import numpy as np import tensorflow.compat.v1 as tf @@ -55,14 +55,14 @@ def __init__( self, input_ph: "tf.Placeholder", model: "tf.Tensor", - loss: Optional["tf.Tensor"] = None, - sess: Optional["tf.compat.v1.Session"] = None, + loss: "tf.Tensor" | None = None, + sess: "tf.compat.v1.Session" | None = None, channels_first: bool = False, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), - feed_dict: Optional[Dict[Any, Any]] = None, + feed_dict: dict[Any, Any] | None = None, ): """ Initialization specific to encoder estimator implementation in TensorFlow. @@ -116,7 +116,7 @@ def __init__( self._loss_grads = tf.gradients(self.loss, self.input_ph)[0] @property - def input_shape(self) -> Tuple[int, ...]: + def input_shape(self) -> tuple[int, ...]: """ Return the shape of one input sample. @@ -143,7 +143,7 @@ def loss(self) -> "tf.Tensor": return self._loss # type: ignore @property - def feed_dict(self) -> Dict[Any, Any]: + def feed_dict(self) -> dict[Any, Any]: """ Return the feed dictionary for the session run evaluating the classifier. @@ -173,7 +173,7 @@ def fit(self, x: "np.ndarray", y: "np.ndarray", batch_size: int = 128, nb_epochs raise NotImplementedError def get_activations( - self, x: "np.ndarray", layer: Union[int, str], batch_size: int, framework: bool = False + self, x: "np.ndarray", layer: int | str, batch_size: int, framework: bool = False ) -> "np.ndarray": """ Do nothing. @@ -183,7 +183,7 @@ def get_activations( def compute_loss(self, x: "np.ndarray", y: "np.ndarray", **kwargs) -> "np.ndarray": raise NotImplementedError - def loss_gradient(self, x: "np.ndarray", y: "np.ndarray", **kwargs) -> "np.ndarray": # pylint: disable=W0221 + def loss_gradient(self, x: "np.ndarray", y: "np.ndarray", **kwargs) -> "np.ndarray": """ No gradients to compute for this method; do nothing. """ diff --git a/art/estimators/estimator.py b/art/estimators/estimator.py index 38dfe98a15..0d2e84618b 100644 --- a/art/estimators/estimator.py +++ b/art/estimators/estimator.py @@ -18,8 +18,10 @@ """ This module implements abstract base and mixin classes for estimators in ART. """ +from __future__ import annotations + from abc import ABC, abstractmethod -from typing import Any, Dict, List, Optional, Tuple, Union, TYPE_CHECKING +from typing import Any, TYPE_CHECKING import numpy as np from tqdm.auto import trange @@ -27,7 +29,7 @@ from art.config import ART_NUMPY_DTYPE if TYPE_CHECKING: - # pylint: disable=R0401 + # pylint: disable=cyclic-import from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE, ESTIMATOR_TYPE from art.data_generators import DataGenerator from art.metrics.verification_decisions_trees import Tree @@ -52,10 +54,10 @@ class BaseEstimator(ABC): def __init__( self, model, - clip_values: Optional["CLIP_VALUES_TYPE"], - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, - preprocessing: Union["PREPROCESSING_TYPE", "Preprocessor"] = (0.0, 1.0), + clip_values: "CLIP_VALUES_TYPE" | None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, + preprocessing: "PREPROCESSING_TYPE" | "Preprocessor" = (0.0, 1.0), ): """ Initialize a `BaseEstimator` object. @@ -77,7 +79,7 @@ def __init__( self.preprocessing = self._set_preprocessing(preprocessing) self.preprocessing_defences = self._set_preprocessing_defences(preprocessing_defences) self.postprocessing_defences = self._set_postprocessing_defences(postprocessing_defences) - self.preprocessing_operations: List["Preprocessor"] = [] + self.preprocessing_operations: list["Preprocessor"] = [] BaseEstimator._update_preprocessing_operations(self) BaseEstimator._check_params(self) @@ -107,9 +109,7 @@ def _update_preprocessing_operations(self): raise ValueError("Preprocessing argument not recognised.") @staticmethod - def _set_preprocessing( - preprocessing: Optional[Union["PREPROCESSING_TYPE", "Preprocessor"]] - ) -> Optional["Preprocessor"]: + def _set_preprocessing(preprocessing: "PREPROCESSING_TYPE" | "Preprocessor" | None) -> "Preprocessor" | None: from art.defences.preprocessor.preprocessor import Preprocessor if preprocessing is None: @@ -125,8 +125,8 @@ def _set_preprocessing( @staticmethod def _set_preprocessing_defences( - preprocessing_defences: Optional[Union["Preprocessor", List["Preprocessor"]]] - ) -> Optional[List["Preprocessor"]]: + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None, + ) -> list["Preprocessor"] | None: from art.defences.preprocessor.preprocessor import Preprocessor if isinstance(preprocessing_defences, Preprocessor): @@ -136,8 +136,8 @@ def _set_preprocessing_defences( @staticmethod def _set_postprocessing_defences( - postprocessing_defences: Optional[Union["Postprocessor", List["Postprocessor"]]] - ) -> Optional[List["Postprocessor"]]: + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None, + ) -> list["Postprocessor"] | None: from art.defences.postprocessor.postprocessor import Postprocessor if isinstance(postprocessing_defences, Postprocessor): @@ -174,7 +174,7 @@ def set_params(self, **kwargs) -> None: self._update_preprocessing_operations() self._check_params() - def get_params(self) -> Dict[str, Any]: + def get_params(self) -> dict[str, Any]: """ Get all parameters and their values of this estimator. @@ -271,7 +271,7 @@ def model(self): @property @abstractmethod - def input_shape(self) -> Tuple[int, ...]: + def input_shape(self) -> tuple[int, ...]: """ Return the shape of one input sample. @@ -280,7 +280,7 @@ def input_shape(self) -> Tuple[int, ...]: raise NotImplementedError @property - def clip_values(self) -> Optional["CLIP_VALUES_TYPE"]: + def clip_values(self) -> "CLIP_VALUES_TYPE" | None: """ Return the clip values of the input samples. @@ -288,7 +288,7 @@ def clip_values(self) -> Optional["CLIP_VALUES_TYPE"]: """ return self._clip_values - def _apply_preprocessing(self, x, y, fit: bool) -> Tuple[Any, Any]: + def _apply_preprocessing(self, x, y, fit: bool) -> tuple[Any, Any]: """ Apply all defences and preprocessing operations on the inputs `x` and `y`. This function has to be applied to all raw inputs `x` and `y` provided to the estimator. @@ -479,9 +479,7 @@ def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwarg self.fit(x, y, nb_epochs=1, batch_size=generator.batch_size, **kwargs) @abstractmethod - def get_activations( - self, x: np.ndarray, layer: Union[int, str], batch_size: int, framework: bool = False - ) -> np.ndarray: + def get_activations(self, x: np.ndarray, layer: int | str, batch_size: int, framework: bool = False) -> np.ndarray: """ Return the output of a specific layer for samples `x` where `layer` is the index of the layer between 0 and `nb_layers - 1 or the name of the layer. The number of layers can be determined by counting the results @@ -503,7 +501,7 @@ def channels_first(self) -> bool: return self._channels_first @property - def layer_names(self) -> Optional[List[str]]: + def layer_names(self) -> list[str] | None: """ Return the names of the hidden layers in the model, if applicable. @@ -536,7 +534,7 @@ class DecisionTreeMixin(ABC): """ @abstractmethod - def get_trees(self) -> List["Tree"]: + def get_trees(self) -> list["Tree"]: """ Get the decision trees. diff --git a/art/estimators/gan/tensorflow.py b/art/estimators/gan/tensorflow.py index 8ec0072344..7cf617ee0a 100644 --- a/art/estimators/gan/tensorflow.py +++ b/art/estimators/gan/tensorflow.py @@ -18,7 +18,9 @@ """ This module creates GANs using the TensorFlow ML Framework """ -from typing import Tuple, TYPE_CHECKING, Union +from __future__ import annotations + +from typing import TYPE_CHECKING import numpy as np from art.estimators.tensorflow import TensorFlowV2Estimator @@ -71,7 +73,7 @@ def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> np.ndarray: return self.generator.predict(x, batch_size=batch_size, **kwargs) @property - def input_shape(self) -> Tuple[int, int]: + def input_shape(self) -> tuple[int, int]: """ Return the shape of one input sample. @@ -169,7 +171,5 @@ def discriminator_optimizer_fct(self) -> "tf.Tensor": def loss_gradient(self, x, y, **kwargs): raise NotImplementedError - def get_activations( - self, x: np.ndarray, layer: Union[int, str], batch_size: int, framework: bool = False - ) -> np.ndarray: + def get_activations(self, x: np.ndarray, layer: int | str, batch_size: int, framework: bool = False) -> np.ndarray: raise NotImplementedError diff --git a/art/estimators/generation/tensorflow.py b/art/estimators/generation/tensorflow.py index e0dac9a1c8..431e3e1531 100644 --- a/art/estimators/generation/tensorflow.py +++ b/art/estimators/generation/tensorflow.py @@ -18,10 +18,10 @@ """ This module implements the classifier `TensorFlowGenerator` for TensorFlow models. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Any, Dict, List, Optional, Union, Tuple, TYPE_CHECKING +from typing import Any, TYPE_CHECKING import numpy as np @@ -29,7 +29,7 @@ from art.estimators.tensorflow import TensorFlowEstimator, TensorFlowV2Estimator if TYPE_CHECKING: - # pylint: disable=C0412 + import tensorflow.compat.v1 as tf from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE @@ -55,14 +55,14 @@ def __init__( self, input_ph: "tf.Placeholder", model: "tf.Tensor", - loss: Optional["tf.Tensor"] = None, - sess: Optional["tf.compat.v1.Session"] = None, + loss: "tf.Tensor" | None = None, + sess: "tf.compat.v1.Session" | None = None, channels_first=False, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), - feed_dict: Optional[Dict[Any, Any]] = None, + feed_dict: dict[Any, Any] | None = None, ): """ Initialization specific to TensorFlow generator implementations. @@ -113,7 +113,7 @@ def __init__( self._sess = sess @property - def input_shape(self) -> Tuple[int, ...]: + def input_shape(self) -> tuple[int, ...]: """ Return the shape of one input sample. :return: Shape of one input sample. @@ -137,7 +137,7 @@ def loss(self) -> "tf.Tensor": return self._loss # type: ignore @property - def feed_dict(self) -> Dict[Any, Any]: + def feed_dict(self) -> dict[Any, Any]: """ Return the feed dictionary for the session run evaluating the classifier. :return: The feed dictionary for the session run evaluating the classifier. @@ -159,7 +159,7 @@ def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> np.ndarray: y = self._sess.run(self._model, feed_dict=feed_dict) return y - def loss_gradient(self, x, y, training_mode: bool = False, **kwargs) -> np.ndarray: # pylint: disable=W0221 + def loss_gradient(self, x, y, training_mode: bool = False, **kwargs) -> np.ndarray: raise NotImplementedError def fit(self, x, y, batch_size=128, nb_epochs=10, **kwargs): @@ -168,9 +168,7 @@ def fit(self, x, y, batch_size=128, nb_epochs=10, **kwargs): """ raise NotImplementedError - def get_activations( - self, x: np.ndarray, layer: Union[int, str], batch_size: int, framework: bool = False - ) -> np.ndarray: + def get_activations(self, x: np.ndarray, layer: int | str, batch_size: int, framework: bool = False) -> np.ndarray: """ Do nothing. """ @@ -210,9 +208,9 @@ def __init__( encoding_length: int, model: "tf.Tensor", channels_first: bool = False, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), ): """ @@ -256,12 +254,10 @@ def encoding_length(self) -> int: return self._encoding_length @property - def input_shape(self) -> Tuple[int, ...]: + def input_shape(self) -> tuple[int, ...]: raise NotImplementedError - def predict( # pylint: disable=W0221 - self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs - ) -> np.ndarray: + def predict(self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs) -> np.ndarray: """ Perform projections over a batch of encodings. @@ -296,9 +292,7 @@ def fit(self, x, y, batch_size=128, nb_epochs=10, **kwargs): """ raise NotImplementedError - def get_activations( - self, x: np.ndarray, layer: Union[int, str], batch_size: int, framework: bool = False - ) -> np.ndarray: + def get_activations(self, x: np.ndarray, layer: int | str, batch_size: int, framework: bool = False) -> np.ndarray: """ Do nothing. """ diff --git a/art/estimators/keras.py b/art/estimators/keras.py index c6e1e943e9..2815dbc25d 100644 --- a/art/estimators/keras.py +++ b/art/estimators/keras.py @@ -45,12 +45,6 @@ class KerasEstimator(NeuralNetworkMixin, LossGradientsMixin, BaseEstimator): estimator_params = BaseEstimator.estimator_params + NeuralNetworkMixin.estimator_params - def __init__(self, **kwargs) -> None: - """ - Estimator class for Keras models. - """ - super().__init__(**kwargs) - def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs): """ Perform prediction of the neural network for samples `x`. @@ -116,9 +110,9 @@ def clone_for_refitting( loss_weights = None weighted_metrics = None if self.model.compiled_loss: - loss_weights = self.model.compiled_loss._loss_weights # pylint: disable=W0212 + loss_weights = self.model.compiled_loss._loss_weights if self.model.compiled_metrics: - weighted_metrics = self.model.compiled_metrics._weighted_metrics # pylint: disable=W0212 + weighted_metrics = self.model.compiled_metrics._weighted_metrics model.compile( optimizer=optimizer, diff --git a/art/estimators/mxnet.py b/art/estimators/mxnet.py index 322456d4b5..7d7daf8c77 100644 --- a/art/estimators/mxnet.py +++ b/art/estimators/mxnet.py @@ -40,12 +40,6 @@ class MXEstimator(NeuralNetworkMixin, LossGradientsMixin, BaseEstimator): estimator_params = BaseEstimator.estimator_params + NeuralNetworkMixin.estimator_params - def __init__(self, **kwargs) -> None: - """ - Estimator class for MXNet Gluon models. - """ - super().__init__(**kwargs) - def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs): """ Perform prediction of the neural network for samples `x`. diff --git a/art/estimators/object_detection/detr.py b/art/estimators/object_detection/detr.py index f6534d93f2..774f48f0c9 100644 --- a/art/estimators/object_detection/detr.py +++ b/art/estimators/object_detection/detr.py @@ -25,8 +25,8 @@ - Line 459: returning original tensor list - Line 462: function name changed to distinguish that it now facilitates gradients """ +from __future__ import annotations -from typing import List, Optional, Tuple, Union import torch @@ -36,7 +36,7 @@ class NestedTensor: (detr/util/misc.py) """ - def __init__(self, tensors, mask: Optional["torch.Tensor"]): + def __init__(self, tensors, mask: "torch.Tensor" | None): self.tensors = tensors self.mask = mask @@ -369,7 +369,7 @@ def box_xyxy_to_cxcywh(x: "torch.Tensor"): return torch.stack(box, dim=-1) -def rescale_bboxes(out_bbox: "torch.Tensor", size: Tuple[int, int]): +def rescale_bboxes(out_bbox: "torch.Tensor", size: tuple[int, int]): """ From DETR source: https://github.com/facebookresearch/detr (inference notebook) @@ -381,7 +381,7 @@ def rescale_bboxes(out_bbox: "torch.Tensor", size: Tuple[int, int]): return box -def revert_rescale_bboxes(out_bbox: "torch.Tensor", size: Tuple[int, int]): +def revert_rescale_bboxes(out_bbox: "torch.Tensor", size: tuple[int, int]): """ Adapted from DETR source: https://github.com/facebookresearch/detr (inference notebook) @@ -436,7 +436,7 @@ def generalized_box_iou(boxes1: "torch.Tensor", boxes2: "torch.Tensor"): return iou - (area - union) / area -def nested_tensor_from_tensor_list(tensor_list: Union[List, "torch.Tensor"]): +def nested_tensor_from_tensor_list(tensor_list: list | "torch.Tensor"): """ Adapted from DETR source: https://github.com/facebookresearch/detr (detr/util/misc.py) @@ -477,5 +477,5 @@ def grad_enabled_forward(self, samples: NestedTensor): outputs_coord = self.bbox_embed(h_s).sigmoid() out = {"pred_logits": outputs_class[-1], "pred_boxes": outputs_coord[-1]} if self.aux_loss: - out["aux_outputs"] = self._set_aux_loss(outputs_class, outputs_coord) # pylint: disable=W0212 + out["aux_outputs"] = self._set_aux_loss(outputs_class, outputs_coord) return out diff --git a/art/estimators/object_detection/pytorch_detection_transformer.py b/art/estimators/object_detection/pytorch_detection_transformer.py index 79d6a6c23d..de2232bec8 100644 --- a/art/estimators/object_detection/pytorch_detection_transformer.py +++ b/art/estimators/object_detection/pytorch_detection_transformer.py @@ -20,15 +20,17 @@ | Paper link: https://arxiv.org/abs/2005.12872 """ +from __future__ import annotations + import logging -from typing import Dict, List, Optional, Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from art.estimators.object_detection.pytorch_object_detector import PyTorchObjectDetector if TYPE_CHECKING: - # pylint: disable=C0412 + import torch from art.defences.postprocessor.postprocessor import Postprocessor @@ -48,15 +50,15 @@ class PyTorchDetectionTransformer(PyTorchObjectDetector): def __init__( self, - model: Optional["torch.nn.Module"] = None, - input_shape: Tuple[int, ...] = (3, 800, 800), - optimizer: Optional["torch.optim.Optimizer"] = None, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, + model: "torch.nn.Module" | None = None, + input_shape: tuple[int, ...] = (3, 800, 800), + optimizer: "torch.optim.Optimizer" | None = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, channels_first: bool = True, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = None, - attack_losses: Tuple[str, ...] = ( + attack_losses: tuple[str, ...] = ( "loss_ce", "loss_bbox", "loss_giou", @@ -66,8 +68,8 @@ def __init__( """ Initialization. - :param model: DETR model. The output of the model is `List[Dict[str, torch.Tensor]]`, one for each input - image. The fields of the Dict are as follows: + :param model: DETR model. The output of the model is `list[dict[str, torch.Tensor]]`, one for each input + image. The fields of the dict are as follows: - boxes [N, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. @@ -127,7 +129,7 @@ def __init__( num_classes, matcher=matcher, weight_dict=self.weight_dict, eos_coef=eos_coef, losses=losses ) - def _translate_labels(self, labels: List[Dict[str, "torch.Tensor"]]) -> List[Dict[str, "torch.Tensor"]]: + def _translate_labels(self, labels: list[dict[str, "torch.Tensor"]]) -> list[dict[str, "torch.Tensor"]]: """ Translate object detection labels from ART format (torchvision) to the model format (DETR) and move tensors to GPU, if applicable. @@ -144,7 +146,7 @@ def _translate_labels(self, labels: List[Dict[str, "torch.Tensor"]]) -> List[Dic height = self.input_shape[0] width = self.input_shape[1] - labels_translated: List[Dict[str, "torch.Tensor"]] = [] + labels_translated: list[dict[str, "torch.Tensor"]] = [] for label_dict in labels: label_dict_translated = {} @@ -163,7 +165,7 @@ def _translate_labels(self, labels: List[Dict[str, "torch.Tensor"]]) -> List[Dic return labels_translated - def _translate_predictions(self, predictions: Dict[str, "torch.Tensor"]) -> List[Dict[str, np.ndarray]]: + def _translate_predictions(self, predictions: dict[str, "torch.Tensor"]) -> list[dict[str, np.ndarray]]: """ Translate object detection predictions from the model format (DETR) to ART format (torchvision) and convert tensors to numpy arrays. @@ -183,7 +185,7 @@ def _translate_predictions(self, predictions: Dict[str, "torch.Tensor"]) -> List pred_boxes = predictions["pred_boxes"] pred_logits = predictions["pred_logits"] - predictions_x1y1x2y2: List[Dict[str, np.ndarray]] = [] + predictions_x1y1x2y2: list[dict[str, np.ndarray]] = [] for pred_box, pred_logit in zip(pred_boxes, pred_logits): boxes = rescale_bboxes(pred_box.detach().cpu(), (height, width)).numpy() diff --git a/art/estimators/object_detection/pytorch_faster_rcnn.py b/art/estimators/object_detection/pytorch_faster_rcnn.py index bc8bcc23ad..8f15638283 100644 --- a/art/estimators/object_detection/pytorch_faster_rcnn.py +++ b/art/estimators/object_detection/pytorch_faster_rcnn.py @@ -20,13 +20,15 @@ | Paper link: https://arxiv.org/abs/1506.01497 """ +from __future__ import annotations + import logging -from typing import List, Optional, Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING from art.estimators.object_detection.pytorch_object_detector import PyTorchObjectDetector if TYPE_CHECKING: - # pylint: disable=C0412 + import torch import torchvision @@ -47,15 +49,15 @@ class PyTorchFasterRCNN(PyTorchObjectDetector): def __init__( self, - model: Optional["torchvision.models.detection.FasterRCNN"] = None, - input_shape: Tuple[int, ...] = (-1, -1, -1), - optimizer: Optional["torch.optim.Optimizer"] = None, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, + model: "torchvision.models.detection.FasterRCNN" | None = None, + input_shape: tuple[int, ...] = (-1, -1, -1), + optimizer: "torch.optim.Optimizer" | None = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, channels_first: bool = True, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = None, - attack_losses: Tuple[str, ...] = ( + attack_losses: tuple[str, ...] = ( "loss_classifier", "loss_box_reg", "loss_objectness", @@ -66,8 +68,8 @@ def __init__( """ Initialization. - :param model: Faster R-CNN model. The output of the model is `List[Dict[str, torch.Tensor]]`, one for - each input image. The fields of the Dict are as follows: + :param model: Faster R-CNN model. The output of the model is `list[dict[str, torch.Tensor]]`, one for + each input image. The fields of the dict are as follows: - boxes [N, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. diff --git a/art/estimators/object_detection/pytorch_object_detector.py b/art/estimators/object_detection/pytorch_object_detector.py index d783f6a49a..49bb14c15d 100644 --- a/art/estimators/object_detection/pytorch_object_detector.py +++ b/art/estimators/object_detection/pytorch_object_detector.py @@ -18,8 +18,10 @@ """ This module implements the task specific estimator for PyTorch object detectors. """ +from __future__ import annotations + import logging -from typing import Any, List, Dict, Optional, Tuple, Union, TYPE_CHECKING +from typing import Any, TYPE_CHECKING import numpy as np @@ -28,7 +30,7 @@ from art.estimators.pytorch import PyTorchEstimator if TYPE_CHECKING: - # pylint: disable=C0412 + import torch from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE @@ -49,14 +51,14 @@ class PyTorchObjectDetector(ObjectDetectorMixin, PyTorchEstimator): def __init__( self, model: "torch.nn.Module", - input_shape: Tuple[int, ...] = (-1, -1, -1), - optimizer: Optional["torch.optim.Optimizer"] = None, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, + input_shape: tuple[int, ...] = (-1, -1, -1), + optimizer: "torch.optim.Optimizer" | None = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, channels_first: bool = True, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = None, - attack_losses: Tuple[str, ...] = ( + attack_losses: tuple[str, ...] = ( "loss_classifier", "loss_box_reg", "loss_objectness", @@ -67,8 +69,8 @@ def __init__( """ Initialization. - :param model: Object detection model. The output of the model is `List[Dict[str, torch.Tensor]]`, one for - each input image. The fields of the Dict are as follows: + :param model: Object detection model. The output of the model is `list[dict[str, torch.Tensor]]`, one for + each input image. The fields of the dict are as follows: - boxes [N, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. @@ -120,8 +122,8 @@ def __init__( self._attack_losses = attack_losses # Parameters used for subclasses - self.weight_dict: Optional[Dict[str, float]] = None - self.criterion: Optional[torch.nn.Module] = None + self.weight_dict: dict[str, float] | None = None + self.criterion: torch.nn.Module | None = None if self.clip_values is not None: if self.clip_values[0] != 0: @@ -148,7 +150,7 @@ def model(self) -> "torch.nn.Module": return self._model @property - def input_shape(self) -> Tuple[int, ...]: + def input_shape(self) -> tuple[int, ...]: """ Return the shape of one input sample. @@ -157,7 +159,7 @@ def input_shape(self) -> Tuple[int, ...]: return self._input_shape @property - def optimizer(self) -> Optional["torch.optim.Optimizer"]: + def optimizer(self) -> "torch.optim.Optimizer" | None: """ Return the optimizer. @@ -166,7 +168,7 @@ def optimizer(self) -> Optional["torch.optim.Optimizer"]: return self._optimizer @property - def attack_losses(self) -> Tuple[str, ...]: + def attack_losses(self) -> tuple[str, ...]: """ Return the combination of strings of the loss components. @@ -185,17 +187,17 @@ def device(self) -> "torch.device": def _preprocess_and_convert_inputs( self, - x: Union[np.ndarray, "torch.Tensor"], - y: Optional[List[Dict[str, Union[np.ndarray, "torch.Tensor"]]]] = None, + x: np.ndarray | "torch.Tensor", + y: list[dict[str, np.ndarray | "torch.Tensor"]] | None = None, fit: bool = False, no_grad: bool = True, - ) -> Tuple["torch.Tensor", List[Dict[str, "torch.Tensor"]]]: + ) -> tuple["torch.Tensor", list[dict[str, "torch.Tensor"]]]: """ Apply preprocessing on inputs `(x, y)` and convert to tensors, if needed. :param x: Samples of shape NCHW or NHWC. - :param y: Target values of format `List[Dict[str, Union[np.ndarray, torch.Tensor]]]`, one for each input image. - The fields of the Dict are as follows: + :param y: Target values of format `list[dict[str, np.ndarray | torch.Tensor]]`, one for each input image. + The fields of the dict are as follows: - boxes [N, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. - labels [N]: the labels for each image. @@ -249,7 +251,7 @@ def _preprocess_and_convert_inputs( return x_preprocessed, y_preprocessed - def _translate_labels(self, labels: List[Dict[str, "torch.Tensor"]]) -> Any: + def _translate_labels(self, labels: list[dict[str, "torch.Tensor"]]) -> Any: """ Translate object detection labels from ART format (torchvision) to the model format (torchvision) and move tensors to GPU, if applicable. @@ -260,7 +262,7 @@ def _translate_labels(self, labels: List[Dict[str, "torch.Tensor"]]) -> Any: labels_translated = [{k: v.to(self.device) for k, v in y_i.items()} for y_i in labels] return labels_translated - def _translate_predictions(self, predictions: Any) -> List[Dict[str, np.ndarray]]: # pylint: disable=R0201 + def _translate_predictions(self, predictions: Any) -> list[dict[str, np.ndarray]]: """ Translate object detection predictions from the model format (torchvision) to ART format (torchvision) and convert tensors to numpy arrays. @@ -268,7 +270,7 @@ def _translate_predictions(self, predictions: Any) -> List[Dict[str, np.ndarray] :param predictions: Object detection predictions in format x1y1x2y2 (torchvision). :return: Object detection predictions in format x1y1x2y2 (torchvision). """ - predictions_x1y1x2y2: List[Dict[str, np.ndarray]] = [] + predictions_x1y1x2y2: list[dict[str, np.ndarray]] = [] for pred in predictions: prediction = {} @@ -283,14 +285,14 @@ def _translate_predictions(self, predictions: Any) -> List[Dict[str, np.ndarray] return predictions_x1y1x2y2 def _get_losses( - self, x: Union[np.ndarray, "torch.Tensor"], y: List[Dict[str, Union[np.ndarray, "torch.Tensor"]]] - ) -> Tuple[Dict[str, "torch.Tensor"], "torch.Tensor"]: + self, x: np.ndarray | "torch.Tensor", y: list[dict[str, np.ndarray | "torch.Tensor"]] + ) -> tuple[dict[str, "torch.Tensor"], "torch.Tensor"]: """ Get the loss tensor output of the model including all preprocessing. :param x: Samples of shape NCHW or NHWC. - :param y: Target values of format `List[Dict[str, Union[np.ndarray, torch.Tensor]]]`, one for each input image. - The fields of the Dict are as follows: + :param y: Target values of format `list[dict[str, Union[np.ndarray, torch.Tensor]]]`, one for each input image. + The fields of the dict are as follows: - boxes [N, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. - labels [N]: the labels for each image. @@ -322,15 +324,15 @@ def _get_losses( return loss_components, x_preprocessed - def loss_gradient( # pylint: disable=W0613 - self, x: Union[np.ndarray, "torch.Tensor"], y: List[Dict[str, Union[np.ndarray, "torch.Tensor"]]], **kwargs + def loss_gradient( + self, x: np.ndarray | "torch.Tensor", y: list[dict[str, np.ndarray | "torch.Tensor"]], **kwargs ) -> np.ndarray: """ Compute the gradient of the loss function w.r.t. `x`. :param x: Samples of shape NCHW or NHWC. - :param y: Target values of format `List[Dict[str, Union[np.ndarray, torch.Tensor]]]`, one for each input image. - The fields of the Dict are as follows: + :param y: Target values of format `list[dict[str, Union[np.ndarray, torch.Tensor]]]`, one for each input image. + The fields of the dict are as follows: - boxes [N, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. - labels [N]: the labels for each image. @@ -380,13 +382,13 @@ def loss_gradient( # pylint: disable=W0613 return grads - def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> List[Dict[str, np.ndarray]]: + def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> list[dict[str, np.ndarray]]: """ Perform prediction for a batch of inputs. :param x: Samples of shape NCHW or NHWC. :param batch_size: Batch size. - :return: Predictions of format `List[Dict[str, np.ndarray]]`, one for each input image. The fields of the Dict + :return: Predictions of format `list[dict[str, np.ndarray]]`, one for each input image. The fields of the dict are as follows: - boxes [N, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. @@ -406,7 +408,7 @@ def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> List[Dict[s dataset = TensorDataset(x_preprocessed) dataloader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=False) - predictions: List[Dict[str, np.ndarray]] = [] + predictions: list[dict[str, np.ndarray]] = [] for (x_batch,) in dataloader: # Move inputs to device x_batch = x_batch.to(self._device) @@ -420,22 +422,22 @@ def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> List[Dict[s return predictions - def fit( # pylint: disable=W0221 + def fit( self, x: np.ndarray, - y: List[Dict[str, Union[np.ndarray, "torch.Tensor"]]], + y: list[dict[str, np.ndarray | "torch.Tensor"]], batch_size: int = 128, nb_epochs: int = 10, drop_last: bool = False, - scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None, + scheduler: "torch.optim.lr_scheduler._LRScheduler" | None = None, **kwargs, ) -> None: """ Fit the classifier on the training set `(x, y)`. :param x: Samples of shape NCHW or NHWC. - :param y: Target values of format `List[Dict[str, Union[np.ndarray, torch.Tensor]]]`, one for each input image. - The fields of the Dict are as follows: + :param y: Target values of format `list[dict[str, Union[np.ndarray, torch.Tensor]]]`, one for each input image. + The fields of the dict are as follows: - boxes [N, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. - labels [N]: the labels for each image. @@ -522,20 +524,18 @@ def __getitem__(self, idx): if scheduler is not None: scheduler.step() - def get_activations( - self, x: np.ndarray, layer: Union[int, str], batch_size: int, framework: bool = False - ) -> np.ndarray: + def get_activations(self, x: np.ndarray, layer: int | str, batch_size: int, framework: bool = False) -> np.ndarray: raise NotImplementedError def compute_losses( - self, x: Union[np.ndarray, "torch.Tensor"], y: List[Dict[str, Union[np.ndarray, "torch.Tensor"]]] - ) -> Dict[str, np.ndarray]: + self, x: np.ndarray | "torch.Tensor", y: list[dict[str, np.ndarray | "torch.Tensor"]] + ) -> dict[str, np.ndarray]: """ Compute all loss components. :param x: Samples of shape NCHW or NHWC. - :param y: Target values of format `List[Dict[str, Union[np.ndarray, torch.Tensor]]]`, one for each input image. - The fields of the Dict are as follows: + :param y: Target values of format `list[dict[str, Union[np.ndarray, torch.Tensor]]]`, one for each input image. + The fields of the dict are as follows: - boxes [N, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. - labels [N]: the labels for each image. @@ -549,14 +549,14 @@ def compute_losses( return output def compute_loss( # type: ignore - self, x: Union[np.ndarray, "torch.Tensor"], y: List[Dict[str, Union[np.ndarray, "torch.Tensor"]]], **kwargs - ) -> Union[np.ndarray, "torch.Tensor"]: + self, x: np.ndarray | "torch.Tensor", y: list[dict[str, np.ndarray | "torch.Tensor"]], **kwargs + ) -> np.ndarray | "torch.Tensor": """ Compute the loss of the neural network for samples `x`. :param x: Samples of shape NCHW or NHWC. - :param y: Target values of format `List[Dict[str, Union[np.ndarray, torch.Tensor]]]`, one for each input image. - The fields of the Dict are as follows: + :param y: Target values of format `list[dict[str, Union[np.ndarray, torch.Tensor]]]`, one for each input image. + The fields of the dict are as follows: - boxes [N, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. - labels [N]: the labels for each image. diff --git a/art/estimators/object_detection/pytorch_yolo.py b/art/estimators/object_detection/pytorch_yolo.py index 976d601465..cd7fc69b55 100644 --- a/art/estimators/object_detection/pytorch_yolo.py +++ b/art/estimators/object_detection/pytorch_yolo.py @@ -20,15 +20,17 @@ | Paper link: https://arxiv.org/abs/1804.02767 """ +from __future__ import annotations + import logging -from typing import List, Dict, Optional, Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from art.estimators.object_detection.pytorch_object_detector import PyTorchObjectDetector if TYPE_CHECKING: - # pylint: disable=C0412 + import torch from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE @@ -48,14 +50,14 @@ class PyTorchYolo(PyTorchObjectDetector): def __init__( self, model: "torch.nn.Module", - input_shape: Tuple[int, ...] = (3, 416, 416), - optimizer: Optional["torch.optim.Optimizer"] = None, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, + input_shape: tuple[int, ...] = (3, 416, 416), + optimizer: "torch.optim.Optimizer" | None = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, channels_first: bool = True, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = None, - attack_losses: Tuple[str, ...] = ( + attack_losses: tuple[str, ...] = ( "loss_classifier", "loss_box_reg", "loss_objectness", @@ -67,8 +69,8 @@ def __init__( Initialization. :param model: YOLO v3 or v5 model wrapped as demonstrated in examples/get_started_yolo.py. - The output of the model is `List[Dict[str, torch.Tensor]]`, one for each input image. - The fields of the Dict are as follows: + The output of the model is `list[dict[str, torch.Tensor]]`, one for each input image. + The fields of the dict are as follows: - boxes [N, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. @@ -104,7 +106,7 @@ def __init__( device_type=device_type, ) - def _translate_labels(self, labels: List[Dict[str, "torch.Tensor"]]) -> "torch.Tensor": + def _translate_labels(self, labels: list[dict[str, "torch.Tensor"]]) -> "torch.Tensor": """ Translate object detection labels from ART format (torchvision) to the model format (YOLO) and move tensors to GPU, if applicable. @@ -144,7 +146,7 @@ def _translate_labels(self, labels: List[Dict[str, "torch.Tensor"]]) -> "torch.T labels_xcycwh = torch.vstack(labels_xcycwh_list) return labels_xcycwh - def _translate_predictions(self, predictions: "torch.Tensor") -> List[Dict[str, np.ndarray]]: + def _translate_predictions(self, predictions: "torch.Tensor") -> list[dict[str, np.ndarray]]: """ Translate object detection predictions from the model format (YOLO) to ART format (torchvision) and convert tensors to numpy arrays. @@ -161,7 +163,7 @@ def _translate_predictions(self, predictions: "torch.Tensor") -> List[Dict[str, height = self.input_shape[0] width = self.input_shape[1] - predictions_x1y1x2y2: List[Dict[str, np.ndarray]] = [] + predictions_x1y1x2y2: list[dict[str, np.ndarray]] = [] for pred in predictions: boxes = torch.vstack( diff --git a/art/estimators/object_detection/tensorflow_faster_rcnn.py b/art/estimators/object_detection/tensorflow_faster_rcnn.py index 56544bd2e1..21020eff8b 100644 --- a/art/estimators/object_detection/tensorflow_faster_rcnn.py +++ b/art/estimators/object_detection/tensorflow_faster_rcnn.py @@ -18,8 +18,10 @@ """ This module implements the task specific estimator for Faster R-CNN in TensorFlow. """ +from __future__ import annotations + import logging -from typing import List, Dict, Optional, Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -29,10 +31,10 @@ from art import config if TYPE_CHECKING: - # pylint: disable=C0412 + import tensorflow.compat.v1 as tf from object_detection.meta_architectures.faster_rcnn_meta_arch import FasterRCNNMetaArch - from tensorflow.python.client.session import Session # pylint: disable=E0611 + from tensorflow.python.client.session import Session from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE from art.defences.preprocessor.preprocessor import Preprocessor @@ -51,17 +53,17 @@ class TensorFlowFasterRCNN(ObjectDetectorMixin, TensorFlowEstimator): def __init__( self, images: "tf.Tensor", - model: Optional["FasterRCNNMetaArch"] = None, - filename: Optional[str] = None, - url: Optional[str] = None, - sess: Optional["Session"] = None, + model: "FasterRCNNMetaArch" | None = None, + filename: str | None = None, + url: str | None = None, + sess: "Session" | None = None, is_training: bool = False, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, channels_first: bool = False, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), - attack_losses: Tuple[str, ...] = ( + attack_losses: tuple[str, ...] = ( "Loss/RPNLoss/localization_loss", "Loss/RPNLoss/objectness_loss", "Loss/BoxClassifierLoss/localization_loss", @@ -127,21 +129,21 @@ def __init__( raise ValueError("This estimator does not support `postprocessing_defences`.") # Create placeholders for groundtruth boxes - self._groundtruth_boxes_list: List["tf.Tensor"] + self._groundtruth_boxes_list: list["tf.Tensor"] self._groundtruth_boxes_list = [ tf.placeholder(dtype=tf.float32, shape=(None, 4), name=f"groundtruth_boxes_{i}") for i in range(images.shape[0]) ] # Create placeholders for groundtruth classes - self._groundtruth_classes_list: List["tf.Tensor"] + self._groundtruth_classes_list: list["tf.Tensor"] self._groundtruth_classes_list = [ tf.placeholder(dtype=tf.int32, shape=(None,), name=f"groundtruth_classes_{i}") for i in range(images.shape[0]) ] # Create placeholders for groundtruth weights - self._groundtruth_weights_list: List["tf.Tensor"] + self._groundtruth_weights_list: list["tf.Tensor"] self._groundtruth_weights_list = [ tf.placeholder(dtype=tf.float32, shape=(None,), name=f"groundtruth_weights_{i}") for i in range(images.shape[0]) @@ -184,8 +186,8 @@ def __init__( # Save new attributes self._input_shape = images.shape.as_list()[1:] self.is_training: bool = is_training - self.images: Optional["tf.Tensor"] = images - self.attack_losses: Tuple[str, ...] = attack_losses + self.images: "tf.Tensor" | None = images + self.attack_losses: tuple[str, ...] = attack_losses # Assign session if sess is None: @@ -206,7 +208,7 @@ def native_label_is_pytorch_format(self) -> bool: return False @property - def input_shape(self) -> Tuple[int, ...]: + def input_shape(self) -> tuple[int, ...]: """ Return the shape of one input sample. @@ -217,14 +219,14 @@ def input_shape(self) -> Tuple[int, ...]: @staticmethod def _load_model( images: "tf.Tensor", - filename: Optional[str] = None, - url: Optional[str] = None, - obj_detection_model: Optional["FasterRCNNMetaArch"] = None, + filename: str | None = None, + url: str | None = None, + obj_detection_model: "FasterRCNNMetaArch" | None = None, is_training: bool = False, - groundtruth_boxes_list: Optional[List["tf.Tensor"]] = None, - groundtruth_classes_list: Optional[List["tf.Tensor"]] = None, - groundtruth_weights_list: Optional[List["tf.Tensor"]] = None, - ) -> Tuple[Dict[str, "tf.Tensor"], ...]: + groundtruth_boxes_list: list["tf.Tensor"] | None = None, + groundtruth_classes_list: list["tf.Tensor"] | None = None, + groundtruth_weights_list: list["tf.Tensor"] | None = None, + ) -> tuple[dict[str, "tf.Tensor"], ...]: """ Download, extract and load a model from a URL if it not already in the cache. The file at indicated by `url` is downloaded to the path ~/.art/data and given the name `filename`. Files in tar, tar.gz, tar.bz, and zip @@ -322,14 +324,14 @@ def _load_model( return obj_detection_model, predictions, losses, detections - def loss_gradient( # pylint: disable=W0221 - self, x: np.ndarray, y: List[Dict[str, np.ndarray]], standardise_output: bool = False, **kwargs + def loss_gradient( + self, x: np.ndarray, y: list[dict[str, np.ndarray]], standardise_output: bool = False, **kwargs ) -> np.ndarray: """ Compute the gradient of the loss function w.r.t. `x`. :param x: Samples of shape (nb_samples, height, width, nb_channels). - :param y: Targets of format `List[Dict[str, np.ndarray]]`, one for each input image. The fields of the Dict are + :param y: Targets of format `list[dict[str, np.ndarray]]`, one for each input image. The fields of the dict are as follows: - boxes [N, 4]: the boxes in [y1, x1, y2, x2] in scale [0, 1] (`standardise_output=False`) or @@ -390,9 +392,9 @@ def loss_gradient( # pylint: disable=W0221 return grads - def predict( # pylint: disable=W0221 + def predict( self, x: np.ndarray, batch_size: int = 128, standardise_output: bool = False, **kwargs - ) -> List[Dict[str, np.ndarray]]: + ) -> list[dict[str, np.ndarray]]: """ Perform prediction for a batch of inputs. @@ -403,8 +405,8 @@ def predict( # pylint: disable=W0221 to COCO categories and the boxes will be changed to [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. - :return: Predictions of format `List[Dict[str, np.ndarray]]`, one for each input image. The - fields of the Dict are as follows: + :return: Predictions of format `list[dict[str, np.ndarray]]`, one for each input image. The + fields of the dict are as follows: - boxes [N, 4]: the boxes in [y1, x1, y2, x2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. Can be changed to PyTorch format with `standardise_output=True`. @@ -471,7 +473,7 @@ def input_images(self) -> "tf.Tensor": return self.images @property - def predictions(self) -> Dict[str, "tf.Tensor"]: + def predictions(self) -> dict[str, "tf.Tensor"]: """ Get the `_predictions` attribute. @@ -480,7 +482,7 @@ def predictions(self) -> Dict[str, "tf.Tensor"]: return self._predictions @property - def losses(self) -> Dict[str, "tf.Tensor"]: + def losses(self) -> dict[str, "tf.Tensor"]: """ Get the `_losses` attribute. @@ -491,7 +493,7 @@ def losses(self) -> Dict[str, "tf.Tensor"]: return self._losses @property - def detections(self) -> Dict[str, "tf.Tensor"]: + def detections(self) -> dict[str, "tf.Tensor"]: """ Get the `_detections` attribute. @@ -502,9 +504,7 @@ def detections(self) -> Dict[str, "tf.Tensor"]: def fit(self, x: np.ndarray, y, batch_size: int = 128, nb_epochs: int = 20, **kwargs) -> None: raise NotImplementedError - def get_activations( - self, x: np.ndarray, layer: Union[int, str], batch_size: int, framework: bool = False - ) -> np.ndarray: + def get_activations(self, x: np.ndarray, layer: int | str, batch_size: int, framework: bool = False) -> np.ndarray: raise NotImplementedError def compute_loss(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray: @@ -545,7 +545,7 @@ def compute_loss(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray: return loss_values - def compute_losses(self, x: np.ndarray, y: np.ndarray) -> Dict[str, np.ndarray]: # type: ignore + def compute_losses(self, x: np.ndarray, y: np.ndarray) -> dict[str, np.ndarray]: # type: ignore """ Compute all loss components. @@ -576,7 +576,7 @@ def compute_losses(self, x: np.ndarray, y: np.ndarray) -> Dict[str, np.ndarray]: for loss_name in self.attack_losses: self._losses_dict[loss_name] = self._losses[loss_name] - losses: Dict[str, np.ndarray] = {} + losses: dict[str, np.ndarray] = {} for loss_name in self.attack_losses: loss_value = self._sess.run(self._losses_dict[loss_name], feed_dict=feed_dict) losses[loss_name] = loss_value diff --git a/art/estimators/object_detection/tensorflow_v2_faster_rcnn.py b/art/estimators/object_detection/tensorflow_v2_faster_rcnn.py index 689098a0df..ded228019e 100644 --- a/art/estimators/object_detection/tensorflow_v2_faster_rcnn.py +++ b/art/estimators/object_detection/tensorflow_v2_faster_rcnn.py @@ -18,8 +18,10 @@ """ This module implements the task specific estimator for Faster R-CNN in TensorFlowV2. """ +from __future__ import annotations + import logging -from typing import List, Dict, Optional, Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -29,7 +31,7 @@ from art import config if TYPE_CHECKING: - # pylint: disable=C0412 + import tensorflow as tf from object_detection.meta_architectures.faster_rcnn_meta_arch import FasterRCNNMetaArch @@ -49,17 +51,17 @@ class TensorFlowV2FasterRCNN(ObjectDetectorMixin, TensorFlowV2Estimator): def __init__( self, - input_shape: Tuple[int, ...], - model: Optional["FasterRCNNMetaArch"] = None, - filename: Optional[str] = None, - url: Optional[str] = None, + input_shape: tuple[int, ...], + model: "FasterRCNNMetaArch" | None = None, + filename: str | None = None, + url: str | None = None, is_training: bool = False, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, channels_first: bool = False, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), - attack_losses: Tuple[str, ...] = ( + attack_losses: tuple[str, ...] = ( "Loss/RPNLoss/localization_loss", "Loss/RPNLoss/objectness_loss", "Loss/BoxClassifierLoss/localization_loss", @@ -69,7 +71,7 @@ def __init__( """ Initialization of an instance TensorFlowV2FasterRCNN. - :param input_shape: A Tuple indicating input shape in form (height, width, channels) + :param input_shape: A tuple indicating input shape in form (height, width, channels) :param model: A TensorFlowV2 Faster-RCNN model. The output that can be computed from the model includes a tuple of (predictions, losses, detections): - predictions: a dictionary holding "raw" prediction tensors. @@ -135,12 +137,12 @@ def __init__( raise ValueError("This estimator does not support `postprocessing_defences`.") # Save new attributes - self._input_shape: Tuple[int, ...] = input_shape - self._detections: List[Dict[str, np.ndarray]] = [] - self._predictions: List[np.ndarray] = [] - self._losses: Dict[str, np.ndarray] = {} + self._input_shape: tuple[int, ...] = input_shape + self._detections: list[dict[str, np.ndarray]] = [] + self._predictions: list[np.ndarray] = [] + self._losses: dict[str, np.ndarray] = {} self.is_training: bool = is_training - self.attack_losses: Tuple[str, ...] = attack_losses + self.attack_losses: tuple[str, ...] = attack_losses @property def native_label_is_pytorch_format(self) -> bool: @@ -150,7 +152,7 @@ def native_label_is_pytorch_format(self) -> bool: return False @property - def input_shape(self) -> Tuple[int, ...]: + def input_shape(self) -> tuple[int, ...]: """ Return the shape of one input sample. @@ -160,10 +162,10 @@ def input_shape(self) -> Tuple[int, ...]: @staticmethod def _load_model( - filename: Optional[str] = None, - url: Optional[str] = None, + filename: str | None = None, + url: str | None = None, is_training: bool = False, - ) -> Tuple[Dict[str, "tf.Tensor"], ...]: + ) -> tuple[dict[str, "tf.Tensor"], ...]: """ Download, extract and load a model from a URL if it is not already in the cache. The file indicated by `url` is downloaded to the path ~/.art/data and given the name `filename`. Files in tar, tar.gz, tar.bz, and zip @@ -207,14 +209,14 @@ def _load_model( return obj_detection_model - def loss_gradient( # pylint: disable=W0221 - self, x: np.ndarray, y: List[Dict[str, np.ndarray]], standardise_output: bool = False, **kwargs + def loss_gradient( + self, x: np.ndarray, y: list[dict[str, np.ndarray]], standardise_output: bool = False, **kwargs ) -> np.ndarray: """ Compute the gradient of the loss function w.r.t. `x`. :param x: Samples of shape (nb_samples, height, width, nb_channels). - :param y: Targets of format `List[Dict[str, np.ndarray]]`, one for each input image. The fields of the Dict are + :param y: Targets of format `list[dict[str, np.ndarray]]`, one for each input image. The fields of the dict are as follows: - boxes [N, 4]: the boxes in [y1, x1, y2, x2] in scale [0, 1] (`standardise_output=False`) or @@ -283,9 +285,9 @@ def loss_gradient( # pylint: disable=W0221 return grads - def predict( # pylint: disable=W0221 + def predict( self, x: np.ndarray, batch_size: int = 128, standardise_output: bool = False, **kwargs - ) -> List[Dict[str, np.ndarray]]: + ) -> list[dict[str, np.ndarray]]: """ Perform prediction for a batch of inputs. @@ -297,8 +299,8 @@ def predict( # pylint: disable=W0221 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. - :return: Predictions of format `List[Dict[str, np.ndarray]]`, one for each input image. The - fields of the Dict are as follows: + :return: Predictions of format `list[dict[str, np.ndarray]]`, one for each input image. The + fields of the dict are as follows: - boxes [N, 4]: the boxes in [y1, x1, y2, x2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. Can be changed to PyTorch format with `standardise_output=True`. @@ -352,7 +354,7 @@ def predict( # pylint: disable=W0221 return results @property - def predictions(self) -> List[np.ndarray]: + def predictions(self) -> list[np.ndarray]: """ Get the `_predictions` attribute. @@ -361,7 +363,7 @@ def predictions(self) -> List[np.ndarray]: return self._predictions @property - def losses(self) -> Dict[str, np.ndarray]: + def losses(self) -> dict[str, np.ndarray]: """ Get the `_losses` attribute. @@ -372,7 +374,7 @@ def losses(self) -> Dict[str, np.ndarray]: return self._losses @property - def detections(self) -> List[Dict[str, np.ndarray]]: + def detections(self) -> list[dict[str, np.ndarray]]: """ Get the `_detections` attribute. @@ -383,9 +385,7 @@ def detections(self) -> List[Dict[str, np.ndarray]]: def fit(self, x: np.ndarray, y, batch_size: int = 128, nb_epochs: int = 20, **kwargs) -> None: raise NotImplementedError - def get_activations( - self, x: np.ndarray, layer: Union[int, str], batch_size: int, framework: bool = False - ) -> np.ndarray: + def get_activations(self, x: np.ndarray, layer: int | str, batch_size: int, framework: bool = False) -> np.ndarray: raise NotImplementedError def compute_loss(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray: @@ -393,7 +393,7 @@ def compute_loss(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray: Compute the loss. :param x: Sample input with shape as expected by the model. - :param y: Targets of format `List[Dict[str, np.ndarray]]`, one for each input image. The fields of the Dict are + :param y: Targets of format `list[dict[str, np.ndarray]]`, one for each input image. The fields of the dict are as follows: - boxes [N, 4]: the boxes in [y1, x1, y2, x2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. - labels [N]: the labels for each image in TensorFlow format. @@ -437,13 +437,13 @@ def compute_loss(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray: return total_loss - def compute_losses(self, x: np.ndarray, y: np.ndarray) -> Dict[str, np.ndarray]: + def compute_losses(self, x: np.ndarray, y: np.ndarray) -> dict[str, np.ndarray]: """ Compute all loss components. :param x: Samples of shape (nb_samples, nb_features) or (nb_samples, nb_pixels_1, nb_pixels_2, nb_channels) or (nb_samples, nb_channels, nb_pixels_1, nb_pixels_2). - :param y: Targets of format `List[Dict[str, np.ndarray]]`, one for each input image. The fields of the Dict are + :param y: Targets of format `list[dict[str, np.ndarray]]`, one for each input image. The fields of the dict are as follows: - boxes [N, 4]: the boxes in [y1, x1, y2, x2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. - labels [N]: the labels for each image in TensorFlow format. diff --git a/art/estimators/object_detection/utils.py b/art/estimators/object_detection/utils.py index d9af5ccd0b..18e3a4e2b7 100644 --- a/art/estimators/object_detection/utils.py +++ b/art/estimators/object_detection/utils.py @@ -18,18 +18,20 @@ """ This module contains utility functions for object detection. """ -from typing import Dict, List, Union, Tuple, Optional, TYPE_CHECKING +from __future__ import annotations + +from typing import TYPE_CHECKING import numpy as np if TYPE_CHECKING: - # pylint: disable=C0412 + import torch -def convert_tf_to_pt(y: List[Dict[str, np.ndarray]], height: int, width: int) -> List[Dict[str, np.ndarray]]: +def convert_tf_to_pt(y: list[dict[str, np.ndarray]], height: int, width: int) -> list[dict[str, np.ndarray]]: """ - :param y: Target values of format `List[Dict[Tensor]]`, one for each input image. The fields of the Dict are as + :param y: Target values of format `list[dict[Tensor]]`, one for each input image. The fields of the dict are as follows: - boxes (FloatTensor[N, 4]): the boxes in [y1, x1, y2, x2] format, with 0 <= x1 < x2 <= W and @@ -39,7 +41,7 @@ def convert_tf_to_pt(y: List[Dict[str, np.ndarray]], height: int, width: int) -> :param height: Height of images in pixels. :param width: Width if images in pixels. - :return: Target values of format `List[Dict[Tensor]]`, one for each input image. The fields of the Dict are as + :return: Target values of format `list[dict[Tensor]]`, one for each input image. The fields of the dict are as follows: - boxes (FloatTensor[N, 4]): the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and @@ -60,9 +62,9 @@ def convert_tf_to_pt(y: List[Dict[str, np.ndarray]], height: int, width: int) -> return y -def convert_pt_to_tf(y: List[Dict[str, np.ndarray]], height: int, width: int) -> List[Dict[str, np.ndarray]]: +def convert_pt_to_tf(y: list[dict[str, np.ndarray]], height: int, width: int) -> list[dict[str, np.ndarray]]: """ - :param y: Target values of format `List[Dict[Tensor]]`, one for each input image. The fields of the Dict are as + :param y: Target values of format `list[dict[Tensor]]`, one for each input image. The fields of the dict are as follows: - boxes (FloatTensor[N, 4]): the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and @@ -72,7 +74,7 @@ def convert_pt_to_tf(y: List[Dict[str, np.ndarray]], height: int, width: int) -> :param height: Height of images in pixels. :param width: Width if images in pixels. - :return: Target values of format `List[Dict[Tensor]]`, one for each input image. The fields of the Dict are as + :return: Target values of format `list[dict[Tensor]]`, one for each input image. The fields of the dict are as follows: - boxes (FloatTensor[N, 4]): the boxes in [y1, x1, y2, x2] format, with 0 <= x1 < x2 <= W and @@ -95,15 +97,15 @@ def convert_pt_to_tf(y: List[Dict[str, np.ndarray]], height: int, width: int) -> def cast_inputs_to_pt( - x: Union[np.ndarray, "torch.Tensor"], - y: Optional[List[Dict[str, Union[np.ndarray, "torch.Tensor"]]]] = None, -) -> Tuple["torch.Tensor", Optional[List[Dict[str, "torch.Tensor"]]]]: + x: np.ndarray | "torch.Tensor", + y: list[dict[str, np.ndarray | "torch.Tensor"]] | None = None, +) -> tuple["torch.Tensor", list[dict[str, "torch.Tensor"]] | None]: """ Cast object detection inputs `(x, y)` to PyTorch tensors. :param x: Samples of shape NCHW or NHWC. - :param y: Target values of format `List[Dict[str, Union[np.ndarray, torch.Tensor]]]`, one for each input image. - The fields of the Dict are as follows: + :param y: Target values of format `list[dict[str, Union[np.ndarray, torch.Tensor]]]`, one for each input image. + The fields of the dict are as follows: - boxes [N, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. - labels [N]: the labels for each image. @@ -117,7 +119,7 @@ def cast_inputs_to_pt( else: x_tensor = x - y_tensor: Optional[List[Dict[str, torch.Tensor]]] = None + y_tensor: list[dict[str, torch.Tensor]] | None = None # Convert labels into tensor if isinstance(y, list): diff --git a/art/estimators/object_tracking/pytorch_goturn.py b/art/estimators/object_tracking/pytorch_goturn.py index dd85f78e56..0f42332132 100644 --- a/art/estimators/object_tracking/pytorch_goturn.py +++ b/art/estimators/object_tracking/pytorch_goturn.py @@ -62,9 +62,11 @@ """ This module implements the task specific estimator for PyTorch GOTURN object tracker. """ +from __future__ import annotations + import logging import time -from typing import List, Dict, Optional, Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -72,7 +74,7 @@ from art.estimators.pytorch import PyTorchEstimator if TYPE_CHECKING: - # pylint: disable=C0412 + import PIL import torch @@ -93,11 +95,11 @@ class PyTorchGoturn(ObjectTrackerMixin, PyTorchEstimator): def __init__( self, model, - input_shape: Tuple[int, ...], - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - channels_first: Optional[bool] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + input_shape: tuple[int, ...], + clip_values: "CLIP_VALUES_TYPE" | None = None, + channels_first: bool | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = None, device_type: str = "gpu", ): @@ -159,7 +161,7 @@ def __init__( if self.postprocessing_defences is not None: # pragma: no cover raise ValueError("This estimator does not support `postprocessing_defences`.") - self.attack_losses: Tuple[str, ...] = ("torch.nn.L1Loss",) + self.attack_losses: tuple[str, ...] = ("torch.nn.L1Loss",) @property def native_label_is_pytorch_format(self) -> bool: @@ -169,7 +171,7 @@ def native_label_is_pytorch_format(self) -> bool: return True @property - def input_shape(self) -> Tuple[int, ...]: + def input_shape(self) -> tuple[int, ...]: """ Return the shape of one input sample. @@ -189,14 +191,14 @@ def device(self) -> "torch.device": def _get_losses( self, x: np.ndarray, - y: List[Dict[str, Union[np.ndarray, "torch.Tensor"]]], + y: list[dict[str, np.ndarray | "torch.Tensor"]], reduction: str = "sum", - ) -> Tuple[Dict[str, Union["torch.Tensor", int, List["torch.Tensor"]]], List["torch.Tensor"], List["torch.Tensor"]]: + ) -> tuple[dict[str, "torch.Tensor" | int | list["torch.Tensor"]], list["torch.Tensor"], list["torch.Tensor"]]: """ Get the loss tensor output of the model including all preprocessing. :param x: Samples of shape (nb_samples, nb_frames, height, width, nb_channels). - :param y: Target values of format `List[Dict[str, np.ndarray]]`, one dictionary for each input image. The keys + :param y: Target values of format `list[dict[str, np.ndarray]]`, one dictionary for each input image. The keys of the dictionary are: - boxes [N_FRAMES, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and @@ -226,7 +228,7 @@ def _get_losses( image_tensor_list_grad = [] y_preprocessed = [] - inputs_t: List["torch.Tensor"] = [] + inputs_t: list["torch.Tensor"] = [] for i in range(x.shape[0]): if self.clip_values is not None: @@ -264,7 +266,7 @@ def _get_losses( loss = torch.nn.L1Loss(size_average=False)(y_pred.float(), gt_bb.float()) loss_list.append(loss) - loss_dict: Dict[str, Union["torch.Tensor", int, List["torch.Tensor"]]] = {} + loss_dict: dict[str, "torch.Tensor" | int | list["torch.Tensor"]] = {} if reduction == "sum": loss_dict["torch.nn.L1Loss"] = sum(loss_list) elif reduction == "none": @@ -274,15 +276,13 @@ def _get_losses( return loss_dict, inputs_t, image_tensor_list_grad - def loss_gradient( # pylint: disable=W0613 - self, x: np.ndarray, y: List[Dict[str, Union[np.ndarray, "torch.Tensor"]]], **kwargs - ) -> np.ndarray: + def loss_gradient(self, x: np.ndarray, y: list[dict[str, np.ndarray | "torch.Tensor"]], **kwargs) -> np.ndarray: """ Compute the gradient of the loss function w.r.t. `x`. :param x: Samples of shape (nb_samples, height, width, nb_channels). - :param y: Target values of format `List[Dict[Tensor]]`, one for each input image. The - fields of the Dict are as follows: + :param y: Target values of format `list[dict[Tensor]]`, one for each input image. The + fields of the dict are as follows: - boxes (FloatTensor[N, 4]): the predicted boxes in [x1, y1, x2, y2] format, with values \ between 0 and H and 0 and W. @@ -331,7 +331,7 @@ def loss_gradient( # pylint: disable=W0613 grads = self._apply_preprocessing_gradient(x, grads) if x.dtype != object: - grads = np.array([i for i in grads], dtype=x.dtype) # pylint: disable=R1721 + grads = np.array([i for i in grads], dtype=x.dtype) # pylint: disable=unnecessary-comprehension assert grads.shape == x.shape and grads.dtype == x.dtype return grads @@ -427,7 +427,7 @@ def get_center_y_f(bbox_tight: "torch.Tensor") -> "torch.Tensor": def compute_crop_pad_image_location( bbox_tight: "torch.Tensor", image: "torch.Tensor" - ) -> Tuple["torch.Tensor", "torch.Tensor", "torch.Tensor", "torch.Tensor"]: + ) -> tuple["torch.Tensor", "torch.Tensor", "torch.Tensor", "torch.Tensor"]: """ Get the valid image coordinates for the context region in target or search region in full image @@ -500,9 +500,9 @@ def edge_spacing_y_f(bbox_tight: "torch.Tensor") -> "torch.Tensor": return torch.maximum(torch.tensor(0.0).to(self.device), (output_height / 2) - bbox_center_y) - def crop_pad_image(bbox_tight: "torch.Tensor", image: "torch.Tensor") -> Tuple[ + def crop_pad_image(bbox_tight: "torch.Tensor", image: "torch.Tensor") -> tuple[ "torch.Tensor", - Tuple["torch.Tensor", "torch.Tensor", "torch.Tensor", "torch.Tensor"], + tuple["torch.Tensor", "torch.Tensor", "torch.Tensor", "torch.Tensor"], "torch.Tensor", "torch.Tensor", ]: @@ -620,7 +620,7 @@ def _track(self, x: "torch.Tensor", y_init: "torch.Tensor") -> "torch.Tensor": return y_pred - def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> List[Dict[str, np.ndarray]]: + def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> list[dict[str, np.ndarray]]: """ Perform prediction for a batch of inputs. @@ -632,7 +632,7 @@ def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> List[Dict[s Initial box around object to be tracked as [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. - :return: Predictions of format `List[Dict[str, np.ndarray]]`, one dictionary for each input image. The keys of + :return: Predictions of format `list[dict[str, np.ndarray]]`, one dictionary for each input image. The keys of the dictionary are: - boxes [N_FRAMES, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and @@ -684,22 +684,18 @@ def fit(self, x: np.ndarray, y, batch_size: int = 128, nb_epochs: int = 20, **kw """ raise NotImplementedError - def get_activations( - self, x: np.ndarray, layer: Union[int, str], batch_size: int, framework: bool = False - ) -> np.ndarray: + def get_activations(self, x: np.ndarray, layer: int | str, batch_size: int, framework: bool = False) -> np.ndarray: """ Not implemented. """ raise NotImplementedError - def compute_losses( - self, x: np.ndarray, y: List[Dict[str, Union[np.ndarray, "torch.Tensor"]]] - ) -> Dict[str, np.ndarray]: + def compute_losses(self, x: np.ndarray, y: list[dict[str, np.ndarray | "torch.Tensor"]]) -> dict[str, np.ndarray]: """ Compute losses. :param x: Samples of shape (nb_samples, nb_frames, height, width, nb_channels). - :param y: Target values of format `List[Dict[str, np.ndarray]]`, one dictionary for each input image. The keys + :param y: Target values of format `list[dict[str, np.ndarray]]`, one dictionary for each input image. The keys of the dictionary are: - boxes [N_FRAMES, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and @@ -711,14 +707,12 @@ def compute_losses( output_dict["torch.nn.L1Loss"] = output return output_dict - def compute_loss( - self, x: np.ndarray, y: List[Dict[str, Union[np.ndarray, "torch.Tensor"]]], **kwargs - ) -> np.ndarray: + def compute_loss(self, x: np.ndarray, y: list[dict[str, np.ndarray | "torch.Tensor"]], **kwargs) -> np.ndarray: """ Compute loss. :param x: Samples of shape (nb_samples, nb_frames, height, width, nb_channels). - :param y: Target values of format `List[Dict[str, np.ndarray]]`, one dictionary for each input image. The keys + :param y: Target values of format `list[dict[str, np.ndarray]]`, one dictionary for each input image. The keys of the dictionary are: - boxes [N_FRAMES, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and @@ -783,7 +777,7 @@ def update(self, image: np.ndarray) -> np.ndarray: return box_return - def track(self, img_files: List[str], box: np.ndarray, visualize: bool = False) -> Tuple[np.ndarray, np.ndarray]: + def track(self, img_files: list[str], box: np.ndarray, visualize: bool = False) -> tuple[np.ndarray, np.ndarray]: """ Method `track` for GOT-10k toolkit trackers (MIT licence). diff --git a/art/estimators/poison_mitigation/neural_cleanse/keras.py b/art/estimators/poison_mitigation/neural_cleanse/keras.py index 5c3146e647..ca752b4f54 100644 --- a/art/estimators/poison_mitigation/neural_cleanse/keras.py +++ b/art/estimators/poison_mitigation/neural_cleanse/keras.py @@ -21,10 +21,10 @@ | Paper link: https://people.cs.uchicago.edu/~ravenben/publications/pdf/backdoor-sp19.pdf """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import List, Optional, Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import tqdm @@ -69,15 +69,15 @@ def __init__( model: KERAS_MODEL_TYPE, use_logits: bool = False, channels_first: bool = False, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), input_layer: int = 0, output_layer: int = 0, steps: int = 1000, init_cost: float = 1e-3, - norm: Union[int, float] = 2, + norm: int | float = 2, learning_rate: float = 0.1, attack_success_threshold: float = 0.99, patience: int = 5, @@ -199,7 +199,7 @@ def __init__( ) @property - def input_shape(self) -> Tuple[int, ...]: + def input_shape(self) -> tuple[int, ...]: """ Return the shape of one input sample. @@ -222,7 +222,7 @@ def reset(self): def generate_backdoor( self, x_val: np.ndarray, y_val: np.ndarray, y_target: np.ndarray - ) -> Tuple[np.ndarray, np.ndarray]: + ) -> tuple[np.ndarray, np.ndarray]: """ Generates a possible backdoor for the model. Returns the pattern and the mask :return: A tuple of the pattern and mask for the model. @@ -233,8 +233,8 @@ def generate_backdoor( self.reset() datagen = ImageDataGenerator() gen = datagen.flow(x_val, y_val, batch_size=self.batch_size) - mask_best: Optional[np.ndarray] = None - pattern_best: Optional[np.ndarray] = None + mask_best: np.ndarray | None = None + pattern_best: np.ndarray | None = None reg_best = float("inf") cost_set_counter = 0 cost_up_counter = 0 @@ -367,7 +367,7 @@ def predict(self, x: np.ndarray, batch_size: int = 128, training_mode: bool = Fa """ return NeuralCleanseMixin.predict(self, x, batch_size=batch_size, training_mode=training_mode, **kwargs) - def mitigate(self, x_val: np.ndarray, y_val: np.ndarray, mitigation_types: List[str]) -> None: + def mitigate(self, x_val: np.ndarray, y_val: np.ndarray, mitigation_types: list[str]) -> None: """ Mitigates the effect of poison on a classifier @@ -393,7 +393,7 @@ def loss_gradient(self, x: np.ndarray, y: np.ndarray, training_mode: bool = Fals def class_gradient( self, x: np.ndarray, - label: Optional[Union[int, List[int], np.ndarray]] = None, + label: int | list[int] | np.ndarray | None = None, training_mode: bool = False, **kwargs, ) -> np.ndarray: diff --git a/art/estimators/poison_mitigation/neural_cleanse/neural_cleanse.py b/art/estimators/poison_mitigation/neural_cleanse/neural_cleanse.py index 97203cc019..73f5c31c00 100644 --- a/art/estimators/poison_mitigation/neural_cleanse/neural_cleanse.py +++ b/art/estimators/poison_mitigation/neural_cleanse/neural_cleanse.py @@ -20,10 +20,10 @@ | Paper link: https://people.cs.uchicago.edu/~ravenben/publications/pdf/backdoor-sp19.pdf """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Union, Tuple, List + import numpy as np @@ -46,7 +46,7 @@ def __init__( steps: int, *args, init_cost: float = 1e-3, - norm: Union[int, float] = 2, + norm: int | float = 2, learning_rate: float = 0.1, attack_success_threshold: float = 0.99, patience: int = 5, @@ -55,7 +55,7 @@ def __init__( early_stop_patience: int = 10, cost_multiplier: float = 1.5, batch_size: int = 32, - **kwargs + **kwargs, ) -> None: """ Create a neural cleanse wrapper. @@ -86,7 +86,7 @@ def __init__( self.cost_multiplier_up = cost_multiplier self.cost_multiplier_down = cost_multiplier**1.5 self.batch_size = batch_size - self.top_indices: List[int] = [] + self.top_indices: list[int] = [] self.activation_threshold = 0 def _predict_classifier( @@ -142,7 +142,7 @@ def predict(self, x: np.ndarray, batch_size: int = 128, training_mode: bool = Fa return predictions - def mitigate(self, x_val: np.ndarray, y_val: np.ndarray, mitigation_types: List[str]) -> None: + def mitigate(self, x_val: np.ndarray, y_val: np.ndarray, mitigation_types: list[str]) -> None: """ Mitigates the effect of poison on a classifier @@ -226,7 +226,7 @@ def check_backdoor_effective(self, backdoor_data: np.ndarray, backdoor_labels: n backdoor_effective = np.logical_not(np.all(backdoor_predictions == backdoor_labels, axis=1)) return np.any(backdoor_effective) # type: ignore - def backdoor_examples(self, x_val: np.ndarray, y_val: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + def backdoor_examples(self, x_val: np.ndarray, y_val: np.ndarray) -> tuple[np.ndarray, np.ndarray, np.ndarray]: """ Generate reverse-engineered backdoored examples using validation data :param x_val: validation data @@ -263,14 +263,14 @@ def backdoor_examples(self, x_val: np.ndarray, y_val: np.ndarray) -> Tuple[np.nd def generate_backdoor( self, x_val: np.ndarray, y_val: np.ndarray, y_target: np.ndarray - ) -> Tuple[np.ndarray, np.ndarray]: + ) -> tuple[np.ndarray, np.ndarray]: """ Generates a possible backdoor for the model. Returns the pattern and the mask :return: A tuple of the pattern and mask for the model. """ raise NotImplementedError - def outlier_detection(self, x_val: np.ndarray, y_val: np.ndarray) -> List[Tuple[int, np.ndarray, np.ndarray]]: + def outlier_detection(self, x_val: np.ndarray, y_val: np.ndarray) -> list[tuple[int, np.ndarray, np.ndarray]]: """ Returns a tuple of suspected of suspected poison labels and their mask and pattern :return: A list of tuples containing the the class index, mask, and pattern for suspected labels diff --git a/art/estimators/poison_mitigation/strip/strip.py b/art/estimators/poison_mitigation/strip/strip.py index 8a7626bc3a..070af6efb9 100644 --- a/art/estimators/poison_mitigation/strip/strip.py +++ b/art/estimators/poison_mitigation/strip/strip.py @@ -20,10 +20,10 @@ | Paper link: https://arxiv.org/abs/1902.06531 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations +from collections.abc import Callable import logging -from typing import Callable, Optional import numpy as np from scipy.stats import entropy, norm @@ -46,7 +46,7 @@ def __init__( predict_fn: Callable[[np.ndarray], np.ndarray], num_samples: int = 20, false_acceptance_rate: float = 0.01, - **kwargs + **kwargs, ) -> None: # pragma: no cover """ Create a STRIP defense @@ -59,8 +59,8 @@ def __init__( self.predict_fn = predict_fn self.num_samples = num_samples self.false_acceptance_rate = false_acceptance_rate - self.entropy_threshold: Optional[float] = None - self.validation_data: Optional[np.ndarray] = None + self.entropy_threshold: float | None = None + self.validation_data: np.ndarray | None = None def predict(self, x: np.ndarray) -> np.ndarray: """ diff --git a/art/estimators/pytorch.py b/art/estimators/pytorch.py index 10b6702b58..970e24503c 100644 --- a/art/estimators/pytorch.py +++ b/art/estimators/pytorch.py @@ -19,7 +19,7 @@ This module implements the abstract estimator `PyTorchEstimator` for PyTorch models. """ import logging -from typing import TYPE_CHECKING, Any, List, Tuple +from typing import TYPE_CHECKING, Any import numpy as np @@ -134,7 +134,7 @@ def _check_params(self) -> None: (isinstance(p, PreprocessorPyTorch) for p in self.preprocessing_operations) ) - def _apply_preprocessing(self, x, y, fit: bool = False, no_grad=True) -> Tuple[Any, Any]: # pylint: disable=W0221 + def _apply_preprocessing(self, x, y, fit: bool = False, no_grad=True) -> tuple[Any, Any]: """ Apply all preprocessing defences of the estimator on the raw inputs `x` and `y`. This function is should only be called from function `_apply_preprocessing`. @@ -282,12 +282,12 @@ def _apply_preprocessing_gradient(self, x, gradients, fit=False): return gradients - def _set_layer(self, train: bool, layerinfo: List["torch.nn.modules.Module"]) -> None: + def _set_layer(self, train: bool, layerinfo: list["torch.nn.modules.Module"]) -> None: """ Set all layers that are an instance of `layerinfo` into training or evaluation mode. :param train: False for evaluation mode. - :param layerinfo: List of module types. + :param layerinfo: list of module types. """ import torch @@ -316,7 +316,6 @@ def set_dropout(self, train: bool) -> None: """ import torch - # pylint: disable=W0212 self._set_layer(train=train, layerinfo=[torch.nn.modules.dropout._DropoutNd]) # type: ignore def set_batchnorm(self, train: bool) -> None: @@ -327,7 +326,6 @@ def set_batchnorm(self, train: bool) -> None: """ import torch - # pylint: disable=W0212 self._set_layer(train=train, layerinfo=[torch.nn.modules.batchnorm._BatchNorm]) # type: ignore def set_multihead_attention(self, train: bool) -> None: @@ -338,5 +336,4 @@ def set_multihead_attention(self, train: bool) -> None: """ import torch - # pylint: disable=W0212 self._set_layer(train=train, layerinfo=[torch.nn.modules.MultiheadAttention]) # type: ignore diff --git a/art/estimators/regression/blackbox.py b/art/estimators/regression/blackbox.py index 4339692b4f..f701760ea9 100644 --- a/art/estimators/regression/blackbox.py +++ b/art/estimators/regression/blackbox.py @@ -18,11 +18,12 @@ """ This module implements the classifier `BlackBoxRegressor` for black-box regressors. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations +from collections.abc import Callable from functools import total_ordering import logging -from typing import Callable, List, Optional, Union, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -47,12 +48,12 @@ class BlackBoxRegressor(RegressorMixin, BaseEstimator): def __init__( self, - predict_fn: Union[Callable, Tuple[np.ndarray, np.ndarray]], - input_shape: Tuple[int, ...], - loss_fn: Optional[Callable] = None, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + predict_fn: Callable | tuple[np.ndarray, np.ndarray], + input_shape: tuple[int, ...], + loss_fn: Callable | None = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), fuzzy_float_compare: bool = False, ): @@ -93,7 +94,7 @@ def __init__( self._loss_fn = loss_fn @property - def input_shape(self) -> Tuple[int, ...]: + def input_shape(self) -> tuple[int, ...]: """ Return the shape of one input sample. @@ -110,7 +111,7 @@ def predict_fn(self) -> Callable: """ return self._predict_fn # type: ignore - def get_classifier(self, thresholds: List[float]) -> BlackBoxClassifier: + def get_classifier(self, thresholds: list[float]) -> BlackBoxClassifier: """ Returns a classifier representation of the regressor. Maps real values to classes based on the provided thresholds. @@ -136,7 +137,6 @@ def predict_class(x): ) return classifier - # pylint: disable=W0221 def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> np.ndarray: """ Perform prediction for a batch of inputs. @@ -177,7 +177,7 @@ def fit(self, x: np.ndarray, y: np.ndarray, **kwargs) -> None: """ raise NotImplementedError - def save(self, filename: str, path: Optional[str] = None) -> None: + def save(self, filename: str, path: str | None = None) -> None: """ Save a model to file in the format specific to the backend framework. For Keras, .h5 format is used. @@ -247,7 +247,7 @@ def __ge__(self, other): return self.key[compare_idx] >= other.key[compare_idx] -def _make_lookup_predict_fn(existing_predictions: Tuple[np.ndarray, np.ndarray], fuzzy_float_compare: bool) -> Callable: +def _make_lookup_predict_fn(existing_predictions: tuple[np.ndarray, np.ndarray], fuzzy_float_compare: bool) -> Callable: """ Makes a predict_fn callback based on a table of existing predictions. diff --git a/art/estimators/regression/keras.py b/art/estimators/regression/keras.py index 431f07ea21..028c0ea97a 100644 --- a/art/estimators/regression/keras.py +++ b/art/estimators/regression/keras.py @@ -18,20 +18,15 @@ """ This module implements the regressor `KerasRegressor` for Keras models. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations +from collections.abc import Callable import logging import os import time from typing import ( Any, - Callable, - Dict, Iterator, - List, - Optional, - Tuple, - Union, TYPE_CHECKING, ) @@ -43,7 +38,7 @@ from art.estimators.regression.regressor import RegressorMixin if TYPE_CHECKING: - # pylint: disable=C0412 + import keras import tensorflow as tf @@ -54,7 +49,7 @@ logger = logging.getLogger(__name__) -KERAS_MODEL_TYPE = Union["keras.models.Model", "tf.keras.models.Model"] # pylint: disable=C0103 +KERAS_MODEL_TYPE = "keras.models.Model" | "tf.keras.models.Model" class KerasRegressor(RegressorMixin, KerasEstimator): @@ -68,9 +63,9 @@ def __init__( self, model: KERAS_MODEL_TYPE, channels_first: bool = False, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), input_layer: int = 0, output_layer: int = 0, @@ -131,14 +126,14 @@ def _initialize_params( :param input_layer: Which layer to consider as the Input when the model has multiple input layers. :param output_layer: Which layer to consider as the Output when the model has multiple output layers. """ - # pylint: disable=E0401 + if self.is_tensorflow: import tensorflow as tf if tf.executing_eagerly(): # pragma: no cover raise ValueError("TensorFlow is executing eagerly. Please disable eager execution.") - import tensorflow.keras as keras # pylint: disable=R0402 - import tensorflow.keras.backend as k # pylint: disable=E0611 + import tensorflow.keras as keras # pylint: disable=consider-using-from-import + import tensorflow.keras.backend as k self._losses = keras.losses else: @@ -232,7 +227,7 @@ def _initialize_params( self._layer_names = self._get_layers() @property - def input_shape(self) -> Tuple[int, ...]: + def input_shape(self) -> tuple[int, ...]: """ Return the shape of one input sample. @@ -260,9 +255,7 @@ def output_layer(self) -> int: """ return self._output_layer # type: ignore - def compute_loss( # pylint: disable=W0221 - self, x: np.ndarray, y: np.ndarray, reduction: str = "none", **kwargs - ) -> np.ndarray: + def compute_loss(self, x: np.ndarray, y: np.ndarray, reduction: str = "none", **kwargs) -> np.ndarray: """ Compute the loss of the neural network for samples `x`. @@ -281,7 +274,7 @@ def compute_loss( # pylint: disable=W0221 raise NotImplementedError("loss method is only supported for keras versions >= 2.3.1") if self.is_tensorflow: - import tensorflow.keras.backend as k # pylint: disable=E0611 + import tensorflow.keras.backend as k else: import keras.backend as k @@ -324,7 +317,7 @@ def compute_loss( # pylint: disable=W0221 return loss_value - def compute_loss_from_predictions( # pylint: disable=W0221 + def compute_loss_from_predictions( self, pred: np.ndarray, y: np.ndarray, reduction: str = "none", **kwargs ) -> np.ndarray: """ @@ -342,7 +335,7 @@ def compute_loss_from_predictions( # pylint: disable=W0221 raise NotImplementedError("loss method is only supported for keras versions >= 2.3.1") if self.is_tensorflow: - import tensorflow.keras.backend as k # pylint: disable=E0611 + import tensorflow.keras.backend as k else: import keras.backend as k @@ -375,9 +368,7 @@ def compute_loss_from_predictions( # pylint: disable=W0221 return loss_value - def loss_gradient( # pylint: disable=W0221 - self, x: np.ndarray, y: np.ndarray, training_mode: bool = False, **kwargs - ) -> np.ndarray: + def loss_gradient(self, x: np.ndarray, y: np.ndarray, training_mode: bool = False, **kwargs) -> np.ndarray: """ Compute the gradient of the loss function w.r.t. `x`. @@ -404,9 +395,7 @@ def loss_gradient( # pylint: disable=W0221 return gradients - def predict( # pylint: disable=W0221 - self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs - ) -> np.ndarray: + def predict(self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs) -> np.ndarray: """ Perform prediction for a batch of inputs. @@ -487,7 +476,7 @@ def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwarg super().fit_generator(generator, nb_epochs=nb_epochs, **kwargs) def get_activations( - self, x: np.ndarray, layer: Union[int, str], batch_size: int = 128, framework: bool = False + self, x: np.ndarray, layer: int | str, batch_size: int = 128, framework: bool = False ) -> np.ndarray: """ Return the output of the specified layer for input `x`. `layer` is specified by layer index (between 0 and @@ -500,9 +489,9 @@ def get_activations( :param framework: If true, return the intermediate tensor representation of the activation. :return: The output of `layer`, where the first dimension is the batch size corresponding to `x`. """ - # pylint: disable=E0401 + if self.is_tensorflow: - import tensorflow.keras.backend as k # pylint: disable=E0611 + import tensorflow.keras.backend as k else: import keras.backend as k from art.config import ART_NUMPY_DTYPE @@ -529,7 +518,7 @@ def get_activations( x_preprocessed, _ = self._apply_preprocessing(x=x_expanded, y=None, fit=False) if not hasattr(self, "_activations_func"): - self._activations_func: Dict[str, Callable] = {} + self._activations_func: dict[str, Callable] = {} keras_layer = self._model.get_layer(layer_name) if layer_name not in self._activations_func: @@ -574,7 +563,7 @@ def custom_loss_gradient(self, nn_function, tensors, input_values, name="default :rtype: `np.ndarray` """ if self.is_tensorflow: - import tensorflow.keras.backend as k # pylint: disable=E0611 + import tensorflow.keras.backend as k else: import keras.backend as k @@ -588,24 +577,24 @@ def custom_loss_gradient(self, nn_function, tensors, input_values, name="default outputs = self._custom_loss_func[name] return outputs(input_values) - def _get_layers(self) -> List[str]: + def _get_layers(self) -> list[str]: """ Return the hidden layers in the model, if applicable. :return: The hidden layers in the model, input and output layers excluded. """ - # pylint: disable=E0401 + if self.is_tensorflow: - from tensorflow.keras.layers import InputLayer # pylint: disable=E0611 + from tensorflow.keras.layers import InputLayer else: - from keras.engine.topology import InputLayer # pylint: disable=E0611 + from keras.engine.topology import InputLayer layer_names = [layer.name for layer in self._model.layers[:-1] if not isinstance(layer, InputLayer)] logger.info("Inferred %i hidden layers on Keras regressor.", len(layer_names)) return layer_names - def save(self, filename: str, path: Optional[str] = None) -> None: + def save(self, filename: str, path: str | None = None) -> None: """ Save a model to file in the format specific to the backend framework. For Keras, .h5 format is used. @@ -624,7 +613,7 @@ def save(self, filename: str, path: Optional[str] = None) -> None: self._model.save(str(full_path)) logger.info("Model saved in path: %s.", full_path) - def __getstate__(self) -> Dict[str, Any]: + def __getstate__(self) -> dict[str, Any]: """ Use to ensure `KerasRegressor` can be pickled. @@ -657,7 +646,7 @@ def __getstate__(self) -> Dict[str, Any]: self.save(model_name) return state - def __setstate__(self, state: Dict[str, Any]) -> None: + def __setstate__(self, state: dict[str, Any]) -> None: """ Use to ensure `KerasRegressor` can be unpickled. @@ -666,7 +655,7 @@ def __setstate__(self, state: Dict[str, Any]) -> None: self.__dict__.update(state) if self.is_tensorflow: - from tensorflow.keras.models import load_model # pylint: disable=E0611 + from tensorflow.keras.models import load_model else: from keras.models import load_model @@ -690,7 +679,7 @@ def __repr__(self): def generator_fit( x: np.ndarray, y: np.ndarray, batch_size: int = 128 -) -> Iterator[Tuple[np.ndarray, np.ndarray]]: # pragma: no cover +) -> Iterator[tuple[np.ndarray, np.ndarray]]: # pragma: no cover """ Minimal data generator for randomly batching large datasets. diff --git a/art/estimators/regression/pytorch.py b/art/estimators/regression/pytorch.py index ec86649492..ba4486465b 100644 --- a/art/estimators/regression/pytorch.py +++ b/art/estimators/regression/pytorch.py @@ -18,14 +18,14 @@ """ This module implements the regressor `PyTorchRegressor` for PyTorch models. """ -# pylint: disable=C0302,R0904 -from __future__ import absolute_import, division, print_function, unicode_literals + +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import copy import logging import os import time -from typing import Any, Dict, List, Optional, Tuple, Union, TYPE_CHECKING +from typing import Any, TYPE_CHECKING import numpy as np import six @@ -35,7 +35,7 @@ from art.estimators.pytorch import PyTorchEstimator if TYPE_CHECKING: - # pylint: disable=C0412, C0302 + import torch from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE @@ -64,15 +64,15 @@ def __init__( self, model: "torch.nn.Module", loss: "torch.nn.modules.loss._Loss", - input_shape: Tuple[int, ...], - optimizer: Optional["torch.optim.Optimizer"] = None, # type: ignore + input_shape: tuple[int, ...], + optimizer: "torch.optim.Optimizer" | None = None, # type: ignore use_amp: bool = False, opt_level: str = "O1", - loss_scale: Optional[Union[float, str]] = "dynamic", + loss_scale: float | str | None = "dynamic", channels_first: bool = True, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), device_type: str = "gpu", ) -> None: @@ -121,7 +121,7 @@ def __init__( self._loss = loss self._optimizer = optimizer self._use_amp = use_amp - self._learning_phase: Optional[bool] = None + self._learning_phase: bool | None = None self._opt_level = opt_level self._loss_scale = loss_scale @@ -130,7 +130,7 @@ def __init__( self.is_rnn = any((isinstance(m, torch.nn.modules.RNNBase) for m in self._model.modules())) # Get the internal layers - self._layer_names: List[str] = self._model.get_layers # type: ignore + self._layer_names: list[str] = self._model.get_layers # type: ignore self._model.to(self._device) @@ -139,7 +139,7 @@ def __init__( # Setup for AMP use if self._use_amp: # pragma: no cover - from apex import amp # pylint: disable=E0611 + from apex import amp if self._optimizer is None: logger.warning( @@ -175,10 +175,10 @@ def device(self) -> "torch.device": @property def model(self) -> "torch.nn.Module": - return self._model._model # pylint: disable=W0212 + return self._model._model @property - def input_shape(self) -> Tuple[int, ...]: + def input_shape(self) -> tuple[int, ...]: """ Return the shape of one input sample. @@ -224,7 +224,7 @@ def opt_level(self) -> str: return self._opt_level # type: ignore @property - def loss_scale(self) -> Union[float, str]: + def loss_scale(self) -> float | str: """ Return the loss scaling value. @@ -233,9 +233,7 @@ def loss_scale(self) -> Union[float, str]: """ return self._loss_scale # type: ignore - def predict( # pylint: disable=W0221 - self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs - ) -> np.ndarray: + def predict(self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs) -> np.ndarray: """ Perform prediction for a batch of inputs. @@ -281,8 +279,8 @@ def predict( # pylint: disable=W0221 return predictions def _predict_framework( - self, x: "torch.Tensor", y: Optional["torch.Tensor"] = None - ) -> Tuple["torch.Tensor", Optional["torch.Tensor"]]: + self, x: "torch.Tensor", y: "torch.Tensor" | None = None + ) -> tuple["torch.Tensor", "torch.Tensor" | None]: """ Perform prediction for a batch of inputs. @@ -302,7 +300,7 @@ def _predict_framework( return output, y_preprocessed - def fit( # pylint: disable=W0221 + def fit( self, x: np.ndarray, y: np.ndarray, @@ -310,7 +308,7 @@ def fit( # pylint: disable=W0221 nb_epochs: int = 10, training_mode: bool = True, drop_last: bool = False, - scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None, + scheduler: "torch.optim.lr_scheduler._LRScheduler" | None = None, **kwargs, ) -> None: """ @@ -366,7 +364,7 @@ def fit( # pylint: disable=W0221 # Do training if self._use_amp: # pragma: no cover - from apex import amp # pylint: disable=E0611 + from apex import amp with amp.scale_loss(loss, self._optimizer) as scaled_loss: scaled_loss.backward() @@ -438,7 +436,7 @@ def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwarg # Do training if self._use_amp: # pragma: no cover - from apex import amp # pylint: disable=E0611 + from apex import amp with amp.scale_loss(loss, self._optimizer) as scaled_loss: scaled_loss.backward() @@ -481,13 +479,13 @@ def weight_reset(module): self.model.apply(weight_reset) - def compute_loss( # type: ignore # pylint: disable=W0221 + def compute_loss( # type: ignore self, - x: Union[np.ndarray, "torch.Tensor"], - y: Union[np.ndarray, "torch.Tensor"], + x: np.ndarray | "torch.Tensor", + y: np.ndarray | "torch.Tensor", reduction: str = "none", **kwargs, - ) -> Union[np.ndarray, "torch.Tensor"]: + ) -> np.ndarray | "torch.Tensor": """ Compute the loss. @@ -538,9 +536,9 @@ def compute_loss( # type: ignore # pylint: disable=W0221 return loss.detach().cpu().numpy() - def compute_loss_from_predictions( # type: ignore # pylint: disable=W0221 + def compute_loss_from_predictions( # type: ignore self, pred: np.ndarray, y: np.ndarray, reduction: str = "none", **kwargs - ) -> Union[np.ndarray, "torch.Tensor"]: + ) -> np.ndarray | "torch.Tensor": """ Compute the loss of the regressor for predictions `pred`. Does not apply preprocessing to the given `y`. @@ -579,10 +577,10 @@ def compute_loss_from_predictions( # type: ignore # pylint: disable=W0221 def compute_losses( self, - x: Union[np.ndarray, "torch.Tensor"], - y: Union[np.ndarray, "torch.Tensor"], + x: np.ndarray | "torch.Tensor", + y: np.ndarray | "torch.Tensor", reduction: str = "none", - ) -> Dict[str, Union[np.ndarray, "torch.Tensor"]]: + ) -> dict[str, np.ndarray | "torch.Tensor"]: """ Compute all loss components. @@ -597,13 +595,13 @@ def compute_losses( """ return {"total": self.compute_loss(x=x, y=y, reduction=reduction)} - def loss_gradient( # pylint: disable=W0221 + def loss_gradient( self, - x: Union[np.ndarray, "torch.Tensor"], - y: Union[np.ndarray, "torch.Tensor"], + x: np.ndarray | "torch.Tensor", + y: np.ndarray | "torch.Tensor", training_mode: bool = False, **kwargs, - ) -> Union[np.ndarray, "torch.Tensor"]: + ) -> np.ndarray | "torch.Tensor": """ Compute the gradient of the loss function w.r.t. `x`. @@ -677,7 +675,7 @@ def loss_gradient( # pylint: disable=W0221 # Compute gradients if self._use_amp: # pragma: no cover - from apex import amp # pylint: disable=E0611 + from apex import amp with amp.scale_loss(loss, self._optimizer) as scaled_loss: scaled_loss.backward() @@ -700,14 +698,14 @@ def loss_gradient( # pylint: disable=W0221 return grads - def custom_loss_gradient( # pylint: disable=W0221 + def custom_loss_gradient( self, loss_fn, - x: Union[np.ndarray, "torch.Tensor"], - y: Union[np.ndarray, "torch.Tensor"], + x: np.ndarray | "torch.Tensor", + y: np.ndarray | "torch.Tensor", layer_name, training_mode: bool = False, - ) -> Union[np.ndarray, "torch.Tensor"]: + ) -> np.ndarray | "torch.Tensor": """ Compute the gradient of the loss function w.r.t. `x`. @@ -757,7 +755,7 @@ def custom_loss_gradient( # pylint: disable=W0221 # Compute gradients if self._use_amp: # pragma: no cover - from apex import amp # pylint: disable=E0611 + from apex import amp with amp.scale_loss(loss, self._optimizer) as scaled_loss: scaled_loss.backward() @@ -779,11 +777,11 @@ def custom_loss_gradient( # pylint: disable=W0221 def get_activations( # type: ignore self, - x: Union[np.ndarray, "torch.Tensor"], - layer: Optional[Union[int, str]] = None, + x: np.ndarray | "torch.Tensor", + layer: int | str | None = None, batch_size: int = 128, framework: bool = False, - ) -> Union[np.ndarray, "torch.Tensor"]: + ) -> np.ndarray | "torch.Tensor": """ Return the output of the specified layer for input `x`. `layer` is specified by layer index (between 0 and `nb_layers - 1`) or by name. The number of layers can be determined by counting the results returned by @@ -820,19 +818,17 @@ def get_activations( # type: ignore def get_feature(name): # the hook signature - def hook(model, input, output): # pylint: disable=W0622,W0613 + def hook(model, input, output): # pylint: disable=redefined-builtin,unused-argument self._features[name] = output return hook if not hasattr(self, "_features"): - self._features: Dict[str, torch.Tensor] = {} + self._features: dict[str, torch.Tensor] = {} # register forward hooks on the layers of choice if layer not in self._features: - interim_layer = dict([*self._model._model.named_modules()])[ # pylint: disable=W0212,W0622,W0613 - self._layer_names[layer_index] - ] + interim_layer = dict([*self._model._model.named_modules()])[self._layer_names[layer_index]] interim_layer.register_forward_hook(get_feature(self._layer_names[layer_index])) if framework: @@ -841,7 +837,7 @@ def hook(model, input, output): # pylint: disable=W0622,W0613 return self._features[self._layer_names[layer_index]] input_tensor = torch.from_numpy(x_preprocessed) self._model(input_tensor.to(self._device)) - return self._features[self._layer_names[layer_index]] # pylint: disable=W0212 + return self._features[self._layer_names[layer_index]] # Run prediction with batch processing results = [] @@ -856,14 +852,14 @@ def hook(model, input, output): # pylint: disable=W0622,W0613 # Run prediction for the current batch self._model(torch.from_numpy(x_preprocessed[begin:end]).to(self._device)) - layer_output = self._features[self._layer_names[layer_index]] # pylint: disable=W0212 + layer_output = self._features[self._layer_names[layer_index]] results.append(layer_output.detach().cpu().numpy()) results_array = np.concatenate(results) return results_array - def save(self, filename: str, path: Optional[str] = None) -> None: + def save(self, filename: str, path: str | None = None) -> None: """ Save a model to file in the format specific to the backend framework. @@ -881,20 +877,19 @@ def save(self, filename: str, path: Optional[str] = None) -> None: if not os.path.exists(folder): os.makedirs(folder) - # pylint: disable=W0212 # disable pylint because access to _modules required torch.save(self._model._model.state_dict(), full_path + ".model") torch.save(self._optimizer.state_dict(), full_path + ".optimizer") # type: ignore logger.info("Model state dict saved in path: %s.", full_path + ".model") logger.info("Optimizer state dict saved in path: %s.", full_path + ".optimizer") - def __getstate__(self) -> Dict[str, Any]: + def __getstate__(self) -> dict[str, Any]: """ Use to ensure `PyTorchRegressor` can be pickled. :return: State dictionary with instance parameters. """ - # pylint: disable=W0212 + # disable pylint because access to _model required state = self.__dict__.copy() state["inner_model"] = copy.copy(state["_model"]._model) @@ -910,7 +905,7 @@ def __getstate__(self) -> Dict[str, Any]: return state - def __setstate__(self, state: Dict[str, Any]) -> None: + def __setstate__(self, state: dict[str, Any]) -> None: """ Use to ensure `PyTorchRegressor` can be unpickled. @@ -971,7 +966,6 @@ def __init__(self, model: torch.nn.Module): super().__init__() self._model = model - # pylint: disable=W0221 # disable pylint because of API requirements for function def forward(self, x): """ @@ -982,7 +976,7 @@ def forward(self, x): :return: a list of output layers, where the last 2 layers are logit and final outputs. :rtype: `list` """ - # pylint: disable=W0212 + # disable pylint because access to _model required result = [] @@ -1001,7 +995,7 @@ def forward(self, x): return result @property - def get_layers(self) -> List[str]: + def get_layers(self) -> list[str]: """ Return the hidden layers in the model, if applicable. @@ -1018,7 +1012,7 @@ def get_layers(self) -> List[str]: result = [] if isinstance(self._model, torch.nn.Module): - for name, _ in self._model._modules.items(): # pylint: disable=W0212 + for name, _ in self._model._modules.items(): result.append(name) else: # pragma: no cover diff --git a/art/estimators/regression/scikitlearn.py b/art/estimators/regression/scikitlearn.py index e95cf2150a..31685214ba 100644 --- a/art/estimators/regression/scikitlearn.py +++ b/art/estimators/regression/scikitlearn.py @@ -18,11 +18,13 @@ """ This module implements the regressors for scikit-learn models. """ +from __future__ import annotations + import logging import os import pickle from copy import deepcopy -from typing import List, Optional, Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -31,7 +33,7 @@ from art import config if TYPE_CHECKING: - # pylint: disable=C0412 + import sklearn from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE @@ -52,9 +54,9 @@ class ScikitlearnRegressor(RegressorMixin, ScikitlearnEstimator): def __init__( self, model: "sklearn.base.BaseEstimator", - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), ) -> None: """ @@ -79,7 +81,7 @@ def __init__( self._input_shape = self._get_input_shape(model) @property - def input_shape(self) -> Tuple[int, ...]: + def input_shape(self) -> tuple[int, ...]: """ Return the shape of one input sample. @@ -123,7 +125,7 @@ def predict(self, x: np.ndarray, **kwargs) -> np.ndarray: return predictions - def save(self, filename: str, path: Optional[str] = None) -> None: + def save(self, filename: str, path: str | None = None) -> None: """ Save a model to file in the format specific to the backend framework. @@ -195,9 +197,9 @@ class ScikitlearnDecisionTreeRegressor(ScikitlearnRegressor): def __init__( self, model: "sklearn.tree.DecisionTreeRegressor", - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), ) -> None: """ @@ -212,7 +214,7 @@ def __init__( used for data preprocessing. The first value will be subtracted from the input. The input will then be divided by the second one. """ - # pylint: disable=E0001 + import sklearn if not isinstance(model, sklearn.tree.DecisionTreeRegressor): @@ -286,10 +288,10 @@ def get_samples_at_node(self, node_id: int) -> int: """ return self.model.tree_.n_node_samples[node_id] - def _get_leaf_nodes(self, node_id, i_tree, class_label, box) -> List["LeafNode"]: + def _get_leaf_nodes(self, node_id, i_tree, class_label, box) -> list["LeafNode"]: from art.metrics.verification_decisions_trees import LeafNode, Box, Interval - leaf_nodes: List[LeafNode] = [] + leaf_nodes: list[LeafNode] = [] if self.get_left_child(node_id) != self.get_right_child(node_id): diff --git a/art/estimators/scikitlearn.py b/art/estimators/scikitlearn.py index 10ffe679f2..8d0de4c495 100644 --- a/art/estimators/scikitlearn.py +++ b/art/estimators/scikitlearn.py @@ -18,8 +18,10 @@ """ This module implements the abstract estimator for scikit-learn models. """ +from __future__ import annotations + import logging -from typing import Optional, Tuple + from art.estimators.estimator import BaseEstimator @@ -31,8 +33,8 @@ class ScikitlearnEstimator(BaseEstimator): Estimator class for scikit-learn models. """ - def _get_input_shape(self, model) -> Optional[Tuple[int, ...]]: - _input_shape: Optional[Tuple[int, ...]] + def _get_input_shape(self, model) -> tuple[int, ...] | None: + _input_shape: tuple[int, ...] | None if hasattr(model, "n_features_"): _input_shape = (model.n_features_,) elif hasattr(model, "n_features_in_"): diff --git a/art/estimators/speech_recognition/pytorch_deep_speech.py b/art/estimators/speech_recognition/pytorch_deep_speech.py index 604aa50497..76a220b938 100644 --- a/art/estimators/speech_recognition/pytorch_deep_speech.py +++ b/art/estimators/speech_recognition/pytorch_deep_speech.py @@ -21,8 +21,10 @@ | Paper link: https://arxiv.org/abs/1512.02595 """ +from __future__ import annotations + import logging -from typing import TYPE_CHECKING, List, Optional, Tuple, Union +from typing import TYPE_CHECKING import numpy as np @@ -32,7 +34,7 @@ from art.utils import get_file if TYPE_CHECKING: - # pylint: disable=C0412 + import torch from deepspeech_pytorch.model import DeepSpeech @@ -56,12 +58,12 @@ class PyTorchDeepSpeech(PytorchSpeechRecognizerMixin, SpeechRecognizerMixin, PyT def __init__( self, - model: Optional["DeepSpeech"] = None, - pretrained_model: Optional[str] = None, - filename: Optional[str] = None, - url: Optional[str] = None, + model: "DeepSpeech" | None = None, + pretrained_model: str | None = None, + filename: str | None = None, + url: str | None = None, use_half: bool = False, - optimizer: Optional["torch.optim.Optimizer"] = None, # type: ignore + optimizer: "torch.optim.Optimizer" | None = None, # type: ignore use_amp: bool = False, opt_level: str = "O1", decoder_type: str = "greedy", @@ -73,9 +75,9 @@ def __init__( cutoff_prob: float = 1.0, beam_width: int = 10, lm_workers: int = 4, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = None, device_type: str = "gpu", verbose: bool = True, @@ -306,7 +308,7 @@ def __init__( # Setup for AMP use if self.use_amp: # pragma: no cover - from apex import amp # pylint: disable=E0611 + from apex import amp if self.optimizer is None: logger.warning( @@ -331,9 +333,7 @@ def __init__( loss_scale=1.0, ) - def predict( - self, x: np.ndarray, batch_size: int = 128, **kwargs - ) -> Union[Tuple[np.ndarray, np.ndarray], np.ndarray]: + def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> tuple[np.ndarray, np.ndarray] | np.ndarray: """ Perform prediction for a batch of inputs. @@ -474,7 +474,7 @@ def loss_gradient(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray: # Compute gradients if self.use_amp: # pragma: no cover - from apex import amp # pylint: disable=E0611 + from apex import amp with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() @@ -497,7 +497,7 @@ def loss_gradient(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray: results = self._apply_preprocessing_gradient(x, results) if x.dtype != object: - results = np.array([i for i in results], dtype=x.dtype) # pylint: disable=R1721 + results = np.array([i for i in results], dtype=x.dtype) # pylint: disable=unnecessary-comprehension assert results.shape == x.shape and results.dtype == x.dtype # Unfreeze batch norm layers again @@ -585,7 +585,7 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in # Actual training if self.use_amp: # pragma: no cover - from apex import amp # pylint: disable=E0611 + from apex import amp with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() @@ -597,7 +597,7 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in def compute_loss_and_decoded_output( self, masked_adv_input: "torch.Tensor", original_output: np.ndarray, **kwargs - ) -> Tuple["torch.Tensor", np.ndarray]: + ) -> tuple["torch.Tensor", np.ndarray]: """ Compute loss function and decoded output. @@ -658,7 +658,7 @@ def _preprocess_transform_model_input( x: "torch.Tensor", y: np.ndarray, real_lengths: np.ndarray, - ) -> Tuple["torch.Tensor", "torch.Tensor", "torch.Tensor", "torch.Tensor", List]: + ) -> tuple["torch.Tensor", "torch.Tensor", "torch.Tensor", "torch.Tensor", list]: """ Apply preprocessing and then transform the user input space into the model input space. This function is used by the ASR attack to attack into the PyTorchDeepSpeech estimator whose defences are called with the @@ -699,12 +699,12 @@ def _preprocess_transform_model_input( def _transform_model_input( self, - x: Union[np.ndarray, "torch.Tensor"], - y: Optional[np.ndarray] = None, + x: np.ndarray | "torch.Tensor", + y: np.ndarray | None = None, compute_gradient: bool = False, tensor_input: bool = False, - real_lengths: Optional[np.ndarray] = None, - ) -> Tuple["torch.Tensor", "torch.Tensor", "torch.Tensor", "torch.Tensor", List]: + real_lengths: np.ndarray | None = None, + ) -> tuple["torch.Tensor", "torch.Tensor", "torch.Tensor", "torch.Tensor", list]: """ Transform the user input space into the model input space. @@ -834,7 +834,7 @@ def sample_rate(self) -> int: return sample_rate @property - def input_shape(self) -> Tuple[int, ...]: + def input_shape(self) -> tuple[int, ...]: """ Return the shape of one input sample. @@ -888,9 +888,7 @@ def opt_level(self) -> str: """ return self._opt_level # type: ignore - def get_activations( - self, x: np.ndarray, layer: Union[int, str], batch_size: int, framework: bool = False - ) -> np.ndarray: + def get_activations(self, x: np.ndarray, layer: int | str, batch_size: int, framework: bool = False) -> np.ndarray: raise NotImplementedError def compute_loss(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray: diff --git a/art/estimators/speech_recognition/pytorch_espresso.py b/art/estimators/speech_recognition/pytorch_espresso.py index e789b493b8..b2db40838c 100644 --- a/art/estimators/speech_recognition/pytorch_espresso.py +++ b/art/estimators/speech_recognition/pytorch_espresso.py @@ -21,10 +21,12 @@ | Paper link: https://arxiv.org/abs/1909.08723 """ +from __future__ import annotations + import ast from argparse import Namespace import logging -from typing import Dict, List, Optional, Tuple, TYPE_CHECKING, Union +from typing import TYPE_CHECKING import numpy as np @@ -34,7 +36,7 @@ from art.utils import get_file if TYPE_CHECKING: - # pylint: disable=C0412 + import torch from espresso.models import SpeechTransformerModel @@ -59,11 +61,11 @@ class PyTorchEspresso(PytorchSpeechRecognizerMixin, SpeechRecognizerMixin, PyTor def __init__( self, - espresso_config_filepath: Optional[str] = None, - model: Optional[str] = None, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + espresso_config_filepath: str | None = None, + model: str | None = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = None, device_type: str = "gpu", verbose: bool = True, @@ -313,7 +315,7 @@ def loss_gradient(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray: results = self._apply_preprocessing_gradient(x_in, results) if x.dtype != object: - results = np.array([i for i in results], dtype=x.dtype) # pylint: disable=R1721 + results = np.array([i for i in results], dtype=x.dtype) # pylint: disable=unnecessary-comprehension assert results.shape == x.shape and results.dtype == x.dtype else: results = np.array([np.squeeze(res) for res in results], dtype=object) @@ -341,10 +343,10 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in def _transform_model_input( self, - x: Union[np.ndarray, "torch.Tensor"], - y: Optional[np.ndarray] = None, + x: np.ndarray | "torch.Tensor", + y: np.ndarray | None = None, compute_gradient: bool = False, - ) -> Tuple[Dict, List]: + ) -> tuple[dict, list]: """ Transform the user input space into the model input space. @@ -359,7 +361,7 @@ def _transform_model_input( import torch from fairseq.data import data_utils - def _collate_fn(batch: List) -> dict: + def _collate_fn(batch: list) -> dict: """ Collate function that transforms a list of numpy array or torch tensor representing a batch into a dictionary that Espresso takes as input. @@ -462,7 +464,7 @@ def _preprocess_transform_model_input( self, x: "torch.Tensor", y: np.ndarray, - ) -> Tuple[Dict, List]: + ) -> tuple[dict, list]: """ Apply preprocessing and then transform the user input space into the model input space. This function is used by the ASR attack to attack into the PyTorchDeepSpeech estimator whose defences are called with the @@ -501,7 +503,7 @@ def _preprocess_transform_model_input( def compute_loss_and_decoded_output( self, masked_adv_input: "torch.Tensor", original_output: np.ndarray, **kwargs - ) -> Tuple["torch.Tensor", np.ndarray]: + ) -> tuple["torch.Tensor", np.ndarray]: """ Compute loss function and decoded output. @@ -568,7 +570,7 @@ def sample_rate(self) -> int: return self._sampling_rate @property - def input_shape(self) -> Tuple[int, ...]: + def input_shape(self) -> tuple[int, ...]: """ Return the shape of one input sample. @@ -594,9 +596,7 @@ def device(self) -> "torch.device": """ return self._device - def get_activations( - self, x: np.ndarray, layer: Union[int, str], batch_size: int, framework: bool = False - ) -> np.ndarray: + def get_activations(self, x: np.ndarray, layer: int | str, batch_size: int, framework: bool = False) -> np.ndarray: raise NotImplementedError def compute_loss(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray: diff --git a/art/estimators/speech_recognition/speech_recognizer.py b/art/estimators/speech_recognition/speech_recognizer.py index 5c96fbbc32..0778a70eb6 100644 --- a/art/estimators/speech_recognition/speech_recognizer.py +++ b/art/estimators/speech_recognition/speech_recognizer.py @@ -20,7 +20,7 @@ recognizers in ART. """ from abc import ABC, abstractmethod -from typing import Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -43,7 +43,7 @@ class PytorchSpeechRecognizerMixin(ABC): @abstractmethod def compute_loss_and_decoded_output( self, masked_adv_input: "torch.Tensor", original_output: np.ndarray, **kwargs - ) -> Tuple["torch.Tensor", np.ndarray]: + ) -> tuple["torch.Tensor", np.ndarray]: """ Compute loss function and decoded output. diff --git a/art/estimators/speech_recognition/tensorflow_lingvo.py b/art/estimators/speech_recognition/tensorflow_lingvo.py index 64af5d5e20..73e08a5ff1 100644 --- a/art/estimators/speech_recognition/tensorflow_lingvo.py +++ b/art/estimators/speech_recognition/tensorflow_lingvo.py @@ -18,12 +18,12 @@ """ This module implements task-specific estimators for automatic speech recognition in TensorFlow. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging import os import sys -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union +from typing import TYPE_CHECKING, Any import numpy as np @@ -66,7 +66,7 @@ class TensorFlowLingvoASR(SpeechRecognizerMixin, TensorFlowV2Estimator): # Note: Support for the estimator is pinned to Lingvo version 0.6.4. Some additional source files that are not # provided by pip package need to be downloaded. Those downloads are pinned to the following commit: # https://github.com/tensorflow/lingvo/commit/9961306adf66f7340e27f109f096c9322d4f9636 - _LINGVO_CFG: Dict[str, Any] = { + _LINGVO_CFG: dict[str, Any] = { "path": os.path.join(config.ART_DATA_PATH, "lingvo"), "model_data": { "uri": "http://cseweb.ucsd.edu/~yaq007/ckpt-00908156.data-00000-of-00001", @@ -106,13 +106,13 @@ class TensorFlowLingvoASR(SpeechRecognizerMixin, TensorFlowV2Estimator): def __init__( self, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - channels_first: Optional[bool] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + channels_first: bool | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = None, - random_seed: Optional[int] = None, - sess: Optional["Session"] = None, + random_seed: int | None = None, + sess: "Session" | None = None, ): """ Initialization. @@ -183,16 +183,16 @@ def __init__( self._model = model self._task = task self._cluster = cluster - self._metrics: Optional[Tuple[Union[Dict[str, "Tensor"], Dict[str, Tuple["Tensor", "Tensor"]]], ...]] = None + self._metrics: tuple[dict[str, "Tensor"] | dict[str, tuple["Tensor", "Tensor"]] | ...] | None = None # add prediction and loss gradient ops to graph - self._predict_batch_op: Dict[str, "Tensor"] = self._predict_batch( + self._predict_batch_op: dict[str, "Tensor"] = self._predict_batch( self._x_padded, self._y_target, self._mask_frequency ) self._loss_gradient_op: "Tensor" = self._loss_gradient(self._x_padded, self._y_target, self._mask_frequency) @property - def input_shape(self) -> Tuple[int, ...]: + def input_shape(self) -> tuple[int, ...]: """ Return the shape of one input sample. @@ -241,7 +241,7 @@ def _load_model(self): from lingvo.tasks.asr import decoder from asr import decoder_patched - decoder.AsrDecoderBase._ComputeMetrics = decoder_patched.AsrDecoderBase._ComputeMetrics # pylint: disable=W0212 + decoder.AsrDecoderBase._ComputeMetrics = decoder_patched.AsrDecoderBase._ComputeMetrics # check and download Lingvo ASR vocab # vocab_path = self._check_and_download_vocab() @@ -255,7 +255,7 @@ def _load_model(self): # register model params model_name = "asr.librispeech.Librispeech960Wpm" model_imports.ImportParams(model_name) - params = model_registry._ModelRegistryHelper.GetParams(model_name, "Test") # pylint: disable=W0212 + params = model_registry._ModelRegistryHelper.GetParams(model_name, "Test") # set random seed parameter if self.random_seed is not None: @@ -355,7 +355,7 @@ def _create_asr_frontend(): return features @staticmethod - def _pad_audio_input(x: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + def _pad_audio_input(x: np.ndarray) -> tuple[np.ndarray, np.ndarray, np.ndarray]: """Apply padding to a batch of audio samples such that it has shape of (batch_size, max_length).""" max_length = max(map(len, x)) batch_size = x.shape[0] @@ -375,7 +375,7 @@ def _pad_audio_input(x: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray] mask_frequency[i, : frequency_length[i], :] = 1 return x_padded, x_mask, mask_frequency - def _predict_batch(self, x: "Tensor", y: "Tensor", mask_frequency: "Tensor") -> Dict[str, "Tensor"]: + def _predict_batch(self, x: "Tensor", y: "Tensor", mask_frequency: "Tensor") -> dict[str, "Tensor"]: """Create prediction operation for a batch of padded inputs.""" import tensorflow.compat.v1 as tf1 @@ -390,9 +390,7 @@ def _predict_batch(self, x: "Tensor", y: "Tensor", mask_frequency: "Tensor") -> return predictions - def predict( - self, x: np.ndarray, batch_size: int = 128, **kwargs - ) -> Union[Tuple[np.ndarray, np.ndarray], np.ndarray]: + def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> tuple[np.ndarray, np.ndarray] | np.ndarray: """ Perform batch-wise prediction for given inputs. @@ -461,9 +459,7 @@ def _loss_gradient(self, x: "Tensor", y: "Tensor", mask: "Tensor") -> "Tensor": loss_gradient = tf1.gradients(loss, [x])[0] return loss_gradient - def loss_gradient( # pylint: disable=W0221 - self, x: np.ndarray, y: np.ndarray, batch_mode: bool = False, **kwargs - ) -> np.ndarray: + def loss_gradient(self, x: np.ndarray, y: np.ndarray, batch_mode: bool = False, **kwargs) -> np.ndarray: """ Compute the gradient of the loss function w.r.t. `x`. @@ -554,9 +550,7 @@ def _loss_gradient_per_sequence(self, x: np.ndarray, y: np.ndarray) -> np.ndarra dtype = np.float32 if x.ndim != 1 else object return np.array(gradients, dtype=dtype) - def get_activations( - self, x: np.ndarray, layer: Union[int, str], batch_size: int, framework: bool = False - ) -> np.ndarray: + def get_activations(self, x: np.ndarray, layer: int | str, batch_size: int, framework: bool = False) -> np.ndarray: raise NotImplementedError def compute_loss(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray: diff --git a/art/estimators/tensorflow.py b/art/estimators/tensorflow.py index 14bdffc0a7..0a5aebfed7 100644 --- a/art/estimators/tensorflow.py +++ b/art/estimators/tensorflow.py @@ -19,7 +19,7 @@ This module implements the abstract estimators `TensorFlowEstimator` and `TensorFlowV2Estimator` for TensorFlow models. """ import logging -from typing import Any, Tuple, TYPE_CHECKING +from typing import Any, TYPE_CHECKING import numpy as np @@ -150,7 +150,7 @@ def _check_params(self) -> None: (isinstance(p, PreprocessorTensorFlowV2) for p in self.preprocessing_operations) ) - def _apply_preprocessing(self, x, y, fit: bool = False) -> Tuple[Any, Any]: + def _apply_preprocessing(self, x, y, fit: bool = False) -> tuple[Any, Any]: """ Apply all preprocessing defences of the estimator on the raw inputs `x` and `y`. This function is should only be called from function `_apply_preprocessing`. diff --git a/art/evaluations/security_curve/security_curve.py b/art/evaluations/security_curve/security_curve.py index 7e21ae33fe..3ab0283440 100644 --- a/art/evaluations/security_curve/security_curve.py +++ b/art/evaluations/security_curve/security_curve.py @@ -20,7 +20,9 @@ Examples of Security Curves can be found in Figure 6 of Madry et al., 2017 (https://arxiv.org/abs/1706.06083). """ -from typing import List, Optional, Tuple, TYPE_CHECKING, Union +from __future__ import annotations + +from typing import TYPE_CHECKING import numpy as np @@ -38,7 +40,7 @@ class SecurityCurve(Evaluation): Examples of Security Curves can be found in Figure 6 of Madry et al., 2017 (https://arxiv.org/abs/1706.06083). """ - def __init__(self, eps: Union[int, List[float], List[int]]): + def __init__(self, eps: int | list[float] | list[int]): """ Create an instance of a Security Curve evaluation. @@ -46,18 +48,17 @@ def __init__(self, eps: Union[int, List[float], List[int]]): """ self.eps = eps - self.eps_list: List[float] = [] - self.accuracy_adv_list: List[float] = [] - self.accuracy: Optional[float] = None + self.eps_list: list[float] = [] + self.accuracy_adv_list: list[float] = [] + self.accuracy: float | None = None - # pylint: disable=W0221 def evaluate( # type: ignore self, classifier: "CLASSIFIER_LOSS_GRADIENTS_TYPE", x: np.ndarray, y: np.ndarray, - **kwargs: Union[str, bool, int, float], - ) -> Tuple[List[float], List[float], float]: + **kwargs: str | bool | int | float, + ) -> tuple[list[float], list[float], float]: """ Evaluate the Security Curve of a classifier using Projected Gradient Descent. @@ -66,7 +67,7 @@ def evaluate( # type: ignore :param y: True labels for input data `x`. :param kwargs: Keyword arguments for the Projected Gradient Descent attack used for evaluation, except keywords `classifier` and `eps`. - :return: List of evaluated `eps` values, List of adversarial accuracies, and benign accuracy. + :return: List of evaluated `eps` values, list of adversarial accuracies, and benign accuracy. """ kwargs.pop("classifier", None) @@ -119,7 +120,7 @@ def _check_gradient( classifier: "CLASSIFIER_LOSS_GRADIENTS_TYPE", x: np.ndarray, y: np.ndarray, - **kwargs: Union[str, bool, int, float], + **kwargs: str | bool | int | float, ) -> None: """ Check if potential gradient obfuscation can be detected. Projected Gradient Descent with 100 iterations is run diff --git a/art/exceptions.py b/art/exceptions.py index 6d4e605191..13bd4f8d46 100644 --- a/art/exceptions.py +++ b/art/exceptions.py @@ -18,7 +18,9 @@ """ Module containing ART's exceptions. """ -from typing import List, Tuple, Type, Union +from __future__ import annotations + +from typing import Type class EstimatorError(TypeError): @@ -26,7 +28,7 @@ class EstimatorError(TypeError): Basic exception for errors raised by unexpected estimator types. """ - def __init__(self, this_class, class_expected_list: List[Union[Type, Tuple[Type]]], classifier_given) -> None: + def __init__(self, this_class, class_expected_list: list[Type | tuple[Type]], classifier_given) -> None: super().__init__() self.this_class = this_class self.class_expected_list = class_expected_list diff --git a/art/experimental/estimators/classification/jax.py b/art/experimental/estimators/classification/jax.py index 4c7310307c..f2b2caa980 100644 --- a/art/experimental/estimators/classification/jax.py +++ b/art/experimental/estimators/classification/jax.py @@ -18,11 +18,12 @@ """ This module implements the classifier `JaxClassifier` for Jax models. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations +from collections.abc import Callable import logging import random -from typing import Any, Callable, Dict, List, Optional, Tuple, Union, TYPE_CHECKING +from typing import Any, TYPE_CHECKING import numpy as np @@ -58,16 +59,16 @@ class JaxClassifier(ClassGradientsMixin, ClassifierMixin, JaxEstimator): def __init__( self, - model: List, + model: list, predict_func: Callable, loss_func: Callable, update_func: Callable, - input_shape: Tuple[int, ...], + input_shape: tuple[int, ...], nb_classes: int, channels_first: bool = False, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, - preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, - postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, + preprocessing_defences: "Preprocessor" | list["Preprocessor"] | None = None, + postprocessing_defences: "Postprocessor" | list["Postprocessor"] | None = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), ) -> None: """ @@ -106,11 +107,11 @@ def __init__( self._input_shape = input_shape @property - def model(self) -> List: + def model(self) -> list: return self._model @property - def input_shape(self) -> Tuple[int, ...]: + def input_shape(self) -> tuple[int, ...]: """ Return the shape of one input sample. @@ -235,9 +236,7 @@ def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwarg """ raise NotImplementedError - def class_gradient( # pylint: disable=W0221 - self, x: np.ndarray, label: Optional[Union[int, List[int], np.ndarray]] = None, **kwargs - ) -> np.ndarray: + def class_gradient(self, x: np.ndarray, label: int | list[int] | np.ndarray | None = None, **kwargs) -> np.ndarray: """ Compute per-class derivatives w.r.t. `x`. @@ -255,7 +254,7 @@ def class_gradient( # pylint: disable=W0221 def get_activations( self, x: np.ndarray, - layer: Optional[Union[int, str]] = None, + layer: int | str | None = None, batch_size: int = 128, framework: bool = False, ) -> np.ndarray: @@ -272,7 +271,7 @@ def get_activations( """ raise NotImplementedError - def save(self, filename: str, path: Optional[str] = None) -> None: + def save(self, filename: str, path: str | None = None) -> None: """ Save a model to file in the format specific to the backend framework. @@ -282,7 +281,7 @@ def save(self, filename: str, path: Optional[str] = None) -> None: """ raise NotImplementedError - def __getstate__(self) -> Dict[str, Any]: + def __getstate__(self) -> dict[str, Any]: """ Use to ensure `JaxClassifier` can be pickled. @@ -290,7 +289,7 @@ def __getstate__(self) -> Dict[str, Any]: """ raise NotImplementedError - def __setstate__(self, state: Dict[str, Any]) -> None: + def __setstate__(self, state: dict[str, Any]) -> None: """ Use to ensure `JaxClassifier` can be unpickled. diff --git a/art/experimental/estimators/jax.py b/art/experimental/estimators/jax.py index 9a300a7687..f168257e8f 100644 --- a/art/experimental/estimators/jax.py +++ b/art/experimental/estimators/jax.py @@ -88,6 +88,3 @@ def set_params(self, **kwargs) -> None: """ super().set_params(**kwargs) self._check_params() - - def _check_params(self) -> None: - super()._check_params() diff --git a/art/metrics/metrics.py b/art/metrics/metrics.py index 97c84afa50..f6d46ed24b 100644 --- a/art/metrics/metrics.py +++ b/art/metrics/metrics.py @@ -19,11 +19,11 @@ Module implementing varying metrics for assessing model robustness. These fall mainly under two categories: attack-dependent and attack-independent. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations from functools import reduce import logging -from typing import Any, Dict, List, Optional, Union, TYPE_CHECKING +from typing import Any, TYPE_CHECKING import numpy as np import numpy.linalg as la @@ -43,7 +43,7 @@ logger = logging.getLogger(__name__) -SUPPORTED_METHODS: Dict[str, Dict[str, Any]] = { +SUPPORTED_METHODS: dict[str, dict[str, Any]] = { "auto": { "class": AutoAttack, "params": {"eps_step": 0.1}, @@ -64,7 +64,7 @@ } -def get_crafter(classifier: "CLASSIFIER_TYPE", attack: str, params: Optional[Dict[str, Any]] = None) -> "EvasionAttack": +def get_crafter(classifier: "CLASSIFIER_TYPE", attack: str, params: dict[str, Any] | None = None) -> "EvasionAttack": """ Create an attack instance to craft adversarial samples. @@ -90,10 +90,10 @@ def get_crafter(classifier: "CLASSIFIER_TYPE", attack: str, params: Optional[Dic def adversarial_accuracy( classifier: "CLASSIFIER_TYPE", x: np.ndarray, - y: Optional[np.ndarray] = None, - attack_name: Optional[str] = None, - attack_params: Optional[Dict[str, Any]] = None, - attack_crafter: Optional[EvasionAttack] = None, + y: np.ndarray | None = None, + attack_name: str | None = None, + attack_params: dict[str, Any] | None = None, + attack_crafter: EvasionAttack | None = None, ) -> float: """ Compute the adversarial accuracy of a classifier object over the sample `x` for a given adversarial crafting @@ -147,8 +147,8 @@ def empirical_robustness( classifier: "CLASSIFIER_TYPE", x: np.ndarray, attack_name: str, - attack_params: Optional[Dict[str, Any]] = None, -) -> Union[float, np.ndarray]: + attack_params: dict[str, Any] | None = None, +) -> float | np.ndarray: """ Compute the Empirical Robustness of a classifier object over the sample `x` for a given adversarial crafting method `attack`. This is equivalent to computing the minimal perturbation that the attacker must introduce for a @@ -247,12 +247,12 @@ def clever( batch_size: int, radius: float, norm: int, - target: Union[int, List[int], None] = None, + target: int | list[int] | None = None, target_sort: bool = False, c_init: float = 1.0, pool_factor: int = 10, verbose: bool = True, -) -> Optional[np.ndarray]: +) -> np.ndarray | None: """ Compute CLEVER score for an untargeted attack. @@ -286,7 +286,7 @@ def clever( else: # Assume it's iterable target_classes = target - score_list: List[Optional[float]] = [] + score_list: list[float | None] = [] for j in tqdm(target_classes, desc="CLEVER untargeted", disable=not verbose): if j == pred_class: score_list.append(None) @@ -440,8 +440,8 @@ def clever_t( def wasserstein_distance( u_values: np.ndarray, v_values: np.ndarray, - u_weights: Optional[np.ndarray] = None, - v_weights: Optional[np.ndarray] = None, + u_weights: np.ndarray | None = None, + v_weights: np.ndarray | None = None, ) -> np.ndarray: """ Compute the first Wasserstein distance between two 1D distributions. diff --git a/art/metrics/privacy/membership_leakage.py b/art/metrics/privacy/membership_leakage.py index 1a4defc427..ceff04165d 100644 --- a/art/metrics/privacy/membership_leakage.py +++ b/art/metrics/privacy/membership_leakage.py @@ -18,8 +18,8 @@ """ This module implements membership leakage metrics. """ -from __future__ import absolute_import, division, print_function, unicode_literals -from typing import TYPE_CHECKING, Optional, Tuple +from __future__ import absolute_import, division, print_function, unicode_literals, annotations +from typing import TYPE_CHECKING from enum import Enum, auto import numpy as np @@ -42,15 +42,15 @@ class ComparisonType(Enum): DIFFERENCE = auto() -def PDTP( # pylint: disable=C0103 +def PDTP( # pylint: disable=invalid-name target_estimator: "CLASSIFIER_TYPE", extra_estimator: "CLASSIFIER_TYPE", x: np.ndarray, y: np.ndarray, - indexes: Optional[np.ndarray] = None, + indexes: np.ndarray | None = None, num_iter: int = 10, - comparison_type: Optional[ComparisonType] = ComparisonType.RATIO, -) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + comparison_type: ComparisonType | None = ComparisonType.RATIO, +) -> tuple[np.ndarray, np.ndarray, np.ndarray]: """ Compute the pointwise differential training privacy metric for the given classifier and training set. @@ -148,13 +148,13 @@ def PDTP( # pylint: disable=C0103 return avg_per_sample, worse_per_sample, std_dev_per_sample -def SHAPr( # pylint: disable=C0103 +def SHAPr( # pylint: disable=invalid-name target_estimator: "CLASSIFIER_TYPE", x_train: np.ndarray, y_train: np.ndarray, x_test: np.ndarray, y_test: np.ndarray, - knn_metric: Optional[str] = None, + knn_metric: str | None = None, ) -> np.ndarray: """ Compute the SHAPr membership privacy risk metric for the given classifier and training set. diff --git a/art/metrics/privacy/worst_case_mia_score.py b/art/metrics/privacy/worst_case_mia_score.py index 27af32ab29..72588ae1a1 100644 --- a/art/metrics/privacy/worst_case_mia_score.py +++ b/art/metrics/privacy/worst_case_mia_score.py @@ -18,10 +18,10 @@ """ This module implements a metric for inference attack worst case accuracy measurement. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Optional, List, Tuple, Union + import numpy as np from sklearn.metrics import roc_curve @@ -32,13 +32,13 @@ THR = float # Threshold of the binary decision -def _calculate_roc_for_fpr(y_true: np.ndarray, y_proba: np.ndarray, targeted_fpr: float) -> Tuple[FPR, TPR, THR]: +def _calculate_roc_for_fpr(y_true: np.ndarray, y_proba: np.ndarray, targeted_fpr: float) -> tuple[FPR, TPR, THR]: """ Get FPR, TPR and, THRESHOLD based on the targeted_fpr (such that FPR <= targeted_fpr) :param y_true: True attack labels. :param y_proba: Predicted attack probabilities. :param targeted_fpr: the targeted False Positive Rate, ROC will be calculated based on this FPR. - :return: tuple that contains (Achieved FPR, TPR, Threshold). + :return: Tuple that contains (Achieved FPR, TPR, Threshold). """ fpr, tpr, thr = roc_curve(y_true=y_true, y_score=y_proba) @@ -54,9 +54,9 @@ def _calculate_roc_for_fpr(y_true: np.ndarray, y_proba: np.ndarray, targeted_fpr def get_roc_for_fpr( attack_proba: np.ndarray, attack_true: np.ndarray, - target_model_labels: Optional[np.ndarray] = None, + target_model_labels: np.ndarray | None = None, targeted_fpr: float = 0.001, -) -> Union[List[Tuple[FPR, TPR, THR]], List[Tuple[int, FPR, TPR, THR]]]: +) -> list[tuple[FPR, TPR, THR]] | list[tuple[int, FPR, TPR, THR]]: """ Compute the attack TPR, THRESHOLD and achieved FPR based on the targeted FPR. This implementation supports only binary attack prediction labels {0,1}. The returned THRESHOLD defines the decision threshold on the attack @@ -98,7 +98,7 @@ def get_roc_for_multi_fprs( attack_proba: np.ndarray, attack_true: np.ndarray, targeted_fprs: np.ndarray, -) -> Tuple[List[FPR], List[TPR], List[THR]]: +) -> tuple[list[FPR], list[TPR], list[THR]]: """ Compute the attack ROC based on the targeted FPRs. This implementation supports only binary attack prediction labels. The returned list of THRESHOLDs defines the decision threshold on the attack diff --git a/art/metrics/verification_decisions_trees.py b/art/metrics/verification_decisions_trees.py index 15fc582f38..0c7d7f5c01 100644 --- a/art/metrics/verification_decisions_trees.py +++ b/art/metrics/verification_decisions_trees.py @@ -18,10 +18,10 @@ """ This module implements robustness verifications for decision-tree-based models. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Dict, List, Optional, Tuple, Union, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import trange @@ -55,7 +55,7 @@ class Box: Representation of a box of intervals bounds. """ - def __init__(self, intervals: Optional[Dict[int, Interval]] = None) -> None: + def __init__(self, intervals: dict[int, Interval] | None = None) -> None: """ A box of intervals. @@ -119,9 +119,9 @@ class LeafNode: def __init__( self, - tree_id: Optional[int], + tree_id: int | None, class_label: int, - node_id: Optional[int], + node_id: int | None, box: Box, value: float, ) -> None: @@ -150,7 +150,7 @@ class Tree: Representation of a decision tree. """ - def __init__(self, class_id: Optional[int], leaf_nodes: List[LeafNode]) -> None: + def __init__(self, class_id: int | None, leaf_nodes: list[LeafNode]) -> None: """ Create a decision tree representation. @@ -189,7 +189,7 @@ def verify( nb_search_steps: int = 10, max_clique: int = 2, max_level: int = 2, - ) -> Tuple[float, float]: + ) -> tuple[float, float]: """ Verify the robustness of the classifier on the dataset `(x, y)`. @@ -220,17 +220,17 @@ def verify( num_initial_successes: int = 0 num_samples: int = x.shape[0] - # pylint: disable=R1702 + # pylint: disable=too-many-nested-blocks pbar = trange(num_samples, desc="Decision tree verification", disable=not self.verbose) for i_sample in pbar: eps: float = eps_init - robust_log: List[bool] = [] + robust_log: list[bool] = [] i_robust = None i_not_robust = None eps_robust: float = 0.0 eps_not_robust: float = 0.0 - best_score: Optional[float] + best_score: float | None for i_step in range(nb_search_steps): logger.info("Search step %d: eps = %.4g", i_step, eps) @@ -293,14 +293,14 @@ def verify( def _get_k_partite_clique( self, - accessible_leaves: List[List[LeafNode]], + accessible_leaves: list[list[LeafNode]], label: int, - target_label: Optional[int], - ) -> Tuple[float, List]: + target_label: int | None, + ) -> tuple[float, list]: """ Find the K partite cliques among the accessible leaf nodes. - :param accessible_leaves: List of lists of accessible leaf nodes. + :param accessible_leaves: list of lists of accessible leaf nodes. :param label: The try label of the current sample. :param target_label: The target label. :return: The best score and a list of new cliques. @@ -308,10 +308,10 @@ def _get_k_partite_clique( new_nodes_list = [] best_scores_sum = 0.0 - # pylint: disable=R1702 + # pylint: disable=too-many-nested-blocks for start_tree in range(0, len(accessible_leaves), self.max_clique): - cliques_old: List[Dict[str, Union[Box, float]]] = [] - cliques_new: List[Dict[str, Union[Box, float]]] = [] + cliques_old: list[dict[str, Box | float]] = [] + cliques_new: list[dict[str, Box | float]] = [] # Start searching for cliques for accessible_leaf in accessible_leaves[start_tree]: @@ -381,7 +381,7 @@ def _get_k_partite_clique( return best_scores_sum, new_nodes_list - def _get_best_score(self, i_sample: int, eps: float, norm: float, target_label: Optional[int]) -> float: + def _get_best_score(self, i_sample: int, eps: float, norm: float, target_label: int | None) -> float: """ Get the list of best scores. @@ -444,8 +444,8 @@ def _get_distance(self, box: Box, i_sample: int, norm: float) -> float: return resulting_distance def _get_accessible_leaves( - self, i_sample: int, eps: float, norm: float, target_label: Optional[int] - ) -> List[List[LeafNode]]: + self, i_sample: int, eps: float, norm: float, target_label: int | None + ) -> list[list[LeafNode]]: """ Determine the leaf nodes accessible within the attack budget. diff --git a/art/preprocessing/audio/l_filter/numpy.py b/art/preprocessing/audio/l_filter/numpy.py index d0353cae01..9535786f13 100644 --- a/art/preprocessing/audio/l_filter/numpy.py +++ b/art/preprocessing/audio/l_filter/numpy.py @@ -20,10 +20,10 @@ finite impulse response (FIR) filter. This implementation is a wrapper around the `scipy.signal.lfilter` function in the `scipy` package. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Optional, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING from scipy.signal import lfilter import numpy as np @@ -51,8 +51,8 @@ def __init__( numerator_coef: np.ndarray = np.array([1.0]), denominator_coef: np.ndarray = np.array([1.0]), axis: int = -1, - initial_cond: Optional[np.ndarray] = None, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, + initial_cond: np.ndarray | None = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, apply_fit: bool = False, apply_predict: bool = True, verbose: bool = False, @@ -83,7 +83,7 @@ def __init__( self.verbose = verbose self._check_params() - def __call__(self, x: np.ndarray, y: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]: + def __call__(self, x: np.ndarray, y: np.ndarray | None = None) -> tuple[np.ndarray, np.ndarray | None]: """ Apply filter to sample `x`. diff --git a/art/preprocessing/audio/l_filter/pytorch.py b/art/preprocessing/audio/l_filter/pytorch.py index f3c4def7a2..ec292e46c3 100644 --- a/art/preprocessing/audio/l_filter/pytorch.py +++ b/art/preprocessing/audio/l_filter/pytorch.py @@ -20,10 +20,10 @@ (IIR) or finite impulse response (FIR) filter. This implementation is a wrapper around the `torchaudio.functional.lfilter` function in the `torchaudio` package. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Optional, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np from tqdm.auto import tqdm @@ -31,7 +31,7 @@ from art.preprocessing.preprocessing import PreprocessorPyTorch if TYPE_CHECKING: - # pylint: disable=C0412 + import torch from art.utils import CLIP_VALUES_TYPE @@ -51,7 +51,7 @@ def __init__( self, numerator_coef: np.ndarray = np.array([1.0]), denominator_coef: np.ndarray = np.array([1.0]), - clip_values: Optional["CLIP_VALUES_TYPE"] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, apply_fit: bool = False, apply_predict: bool = True, verbose: bool = False, @@ -86,8 +86,8 @@ def __init__( self._check_params() def forward( - self, x: "torch.Tensor", y: Optional["torch.Tensor"] = None - ) -> Tuple["torch.Tensor", Optional["torch.Tensor"]]: + self, x: "torch.Tensor", y: "torch.Tensor" | None = None + ) -> tuple["torch.Tensor", "torch.Tensor" | None]: """ Apply filter to a single sample `x`. @@ -117,7 +117,7 @@ def forward( return x_preprocess, y - def __call__(self, x: np.ndarray, y: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]: + def __call__(self, x: np.ndarray, y: np.ndarray | None = None) -> tuple[np.ndarray, np.ndarray | None]: """ Apply filter to sample `x`. diff --git a/art/preprocessing/expectation_over_transformation/image_center_crop/pytorch.py b/art/preprocessing/expectation_over_transformation/image_center_crop/pytorch.py index 12e23f74b1..22061be225 100644 --- a/art/preprocessing/expectation_over_transformation/image_center_crop/pytorch.py +++ b/art/preprocessing/expectation_over_transformation/image_center_crop/pytorch.py @@ -19,14 +19,14 @@ This module implements Expectation over Transformation preprocessing for image center crop in PyTorch. """ import logging -from typing import Dict, List, Optional, TYPE_CHECKING, Tuple, Union +from typing import TYPE_CHECKING import numpy as np from art.preprocessing.expectation_over_transformation.pytorch import EoTPyTorch if TYPE_CHECKING: - # pylint: disable=C0412 + import torch logger = logging.getLogger(__name__) @@ -44,7 +44,7 @@ class EoTImageCenterCropPyTorch(EoTPyTorch): def __init__( self, nb_samples: int, - clip_values: Tuple[float, float], + clip_values: tuple[float, float], size: int = 5, label_type: str = "classification", apply_fit: bool = False, @@ -71,8 +71,8 @@ def __init__( self._check_params() def _transform( - self, x: "torch.Tensor", y: Optional[Union["torch.Tensor", List[Dict[str, "torch.Tensor"]]]], **kwargs - ) -> Tuple["torch.Tensor", Optional[Union["torch.Tensor", List[Dict[str, "torch.Tensor"]]]]]: + self, x: "torch.Tensor", y: "torch.Tensor" | list[dict[str, "torch.Tensor"]] | None, **kwargs + ) -> tuple["torch.Tensor", "torch.Tensor" | list[dict[str, "torch.Tensor"]] | None]: """ Center crop an input image and its labels by randomly sampled crop size. @@ -107,11 +107,11 @@ def _transform( max=self.clip_values[1], ) - y_preprocess: Optional[Union["torch.Tensor", List[Dict[str, "torch.Tensor"]]]] + y_preprocess: "torch.Tensor" | list[dict[str, "torch.Tensor"]] | None if self.label_type == "object_detection" and y is not None: - y_od: List[Dict[str, "torch.Tensor"]] = [{}] + y_od: list[dict[str, "torch.Tensor"]] = [{}] if isinstance(y, list): if isinstance(y[0], dict): diff --git a/art/preprocessing/expectation_over_transformation/image_rotation/pytorch.py b/art/preprocessing/expectation_over_transformation/image_rotation/pytorch.py index 4656d6c33c..d9763c4a65 100644 --- a/art/preprocessing/expectation_over_transformation/image_rotation/pytorch.py +++ b/art/preprocessing/expectation_over_transformation/image_rotation/pytorch.py @@ -18,15 +18,17 @@ """ This module implements Expectation over Transformation preprocessing for image rotation in PyTorch. """ +from __future__ import annotations + import logging -from typing import Dict, List, Optional, TYPE_CHECKING, Tuple, Union +from typing import TYPE_CHECKING import numpy as np from art.preprocessing.expectation_over_transformation.pytorch import EoTPyTorch if TYPE_CHECKING: - # pylint: disable=C0412 + import torch logger = logging.getLogger(__name__) @@ -44,8 +46,8 @@ class EoTImageRotationPyTorch(EoTPyTorch): def __init__( self, nb_samples: int, - clip_values: Tuple[float, float], - angles: Union[float, Tuple[float, float], List[float]] = 45.0, + clip_values: tuple[float, float], + angles: float | tuple[float, float] | list[float] = 45.0, label_type: str = "classification", apply_fit: bool = False, apply_predict: bool = True, @@ -75,8 +77,8 @@ def __init__( self._check_params() def _transform( - self, x: "torch.Tensor", y: Optional[Union["torch.Tensor", List[Dict[str, "torch.Tensor"]]]], **kwargs - ) -> Tuple["torch.Tensor", Optional[Union["torch.Tensor", List[Dict[str, "torch.Tensor"]]]]]: + self, x: "torch.Tensor", y: "torch.Tensor" | list[dict[str, "torch.Tensor"]] | None, **kwargs + ) -> tuple["torch.Tensor", "torch.Tensor" | list[dict[str, "torch.Tensor"]] | None]: """ Transformation of an input image and its label by randomly sampled rotation. @@ -113,11 +115,11 @@ def _transform( max=self.clip_values[1], ) - y_preprocess: Optional[Union["torch.Tensor", List[Dict[str, "torch.Tensor"]]]] + y_preprocess: "torch.Tensor" | list[dict[str, "torch.Tensor"]] | None if self.label_type == "object_detection" and y is not None: - y_od: List[Dict[str, "torch.Tensor"]] = [{}] + y_od: list[dict[str, "torch.Tensor"]] = [{}] if isinstance(y, list): if isinstance(y[0], dict): @@ -183,7 +185,7 @@ def _transform( def _check_params(self) -> None: - # pylint: disable=R0916 + # pylint: disable=too-many-boolean-expressions if ( self.label_type == "classification" and not isinstance(self.angles, (int, float, tuple, list)) diff --git a/art/preprocessing/expectation_over_transformation/image_rotation/tensorflow.py b/art/preprocessing/expectation_over_transformation/image_rotation/tensorflow.py index ceda12b269..ab67029c01 100644 --- a/art/preprocessing/expectation_over_transformation/image_rotation/tensorflow.py +++ b/art/preprocessing/expectation_over_transformation/image_rotation/tensorflow.py @@ -18,15 +18,17 @@ """ This module implements Expectation over Transformation preprocessing for image rotation in TensorFlow. """ +from __future__ import annotations + import logging -from typing import Optional, TYPE_CHECKING, Tuple, Union +from typing import TYPE_CHECKING import numpy as np from art.preprocessing.expectation_over_transformation.tensorflow import EoTTensorFlowV2 if TYPE_CHECKING: - # pylint: disable=C0412 + import tensorflow as tf logger = logging.getLogger(__name__) @@ -44,8 +46,8 @@ class EoTImageRotationTensorFlow(EoTTensorFlowV2): def __init__( self, nb_samples: int, - clip_values: Tuple[float, float], - angles: Union[float, Tuple[float, float]] = 45.0, + clip_values: tuple[float, float], + angles: float | tuple[float, float] = 45.0, label_type: str = "classification", apply_fit: bool = False, apply_predict: bool = True, @@ -71,9 +73,7 @@ def __init__( self.label_type = label_type self._check_params() - def _transform( - self, x: "tf.Tensor", y: Optional["tf.Tensor"], **kwargs - ) -> Tuple["tf.Tensor", Optional["tf.Tensor"]]: + def _transform(self, x: "tf.Tensor", y: "tf.Tensor" | None, **kwargs) -> tuple["tf.Tensor", "tf.Tensor" | None]: """ Transformation of an input image and its label by randomly sampled rotation. @@ -84,7 +84,6 @@ def _transform( import tensorflow as tf import tensorflow_addons as tfa - # pylint: disable=E1120,E1123 angles = tf.random.uniform(shape=(), minval=self.angles_range[0], maxval=self.angles_range[1]) angles = angles / 360.0 * 2.0 * np.pi x_preprocess = tfa.image.rotate(images=x, angles=angles, interpolation="NEAREST", name=None) @@ -95,7 +94,7 @@ def _transform( def _check_params(self) -> None: - # pylint: disable=R0916 + # pylint: disable=too-many-boolean-expressions if not isinstance(self.angles, (int, float, tuple)) or ( isinstance(self.angles, tuple) and ( diff --git a/art/preprocessing/expectation_over_transformation/natural_corruptions/brightness/pytorch.py b/art/preprocessing/expectation_over_transformation/natural_corruptions/brightness/pytorch.py index b210dd20c4..22c46356f8 100644 --- a/art/preprocessing/expectation_over_transformation/natural_corruptions/brightness/pytorch.py +++ b/art/preprocessing/expectation_over_transformation/natural_corruptions/brightness/pytorch.py @@ -18,8 +18,10 @@ """ This module implements EoT of changes in brightness by addition of uniformly sampled delta. """ +from __future__ import annotations + import logging -from typing import Dict, List, Tuple, Union, TYPE_CHECKING, Optional +from typing import TYPE_CHECKING import numpy as np @@ -39,8 +41,8 @@ class EoTBrightnessPyTorch(EoTPyTorch): def __init__( self, nb_samples: int, - clip_values: Tuple[float, float], - delta: Union[float, Tuple[float, float]], + clip_values: tuple[float, float], + delta: float | tuple[float, float], apply_fit: bool = False, apply_predict: bool = True, ) -> None: @@ -64,8 +66,8 @@ def __init__( self._check_params() def _transform( - self, x: "torch.Tensor", y: Optional[Union["torch.Tensor", List[Dict[str, "torch.Tensor"]]]], **kwargs - ) -> Tuple["torch.Tensor", Optional[Union["torch.Tensor", List[Dict[str, "torch.Tensor"]]]]]: + self, x: "torch.Tensor", y: "torch.Tensor" | list[dict[str, "torch.Tensor"]] | None, **kwargs + ) -> tuple["torch.Tensor", "torch.Tensor" | list[dict[str, "torch.Tensor"]] | None]: """ Transformation of an image with randomly sampled brightness. @@ -80,7 +82,7 @@ def _transform( def _check_params(self) -> None: - # pylint: disable=R0916 + # pylint: disable=too-many-boolean-expressions if not isinstance(self.delta, (int, float, tuple)) or ( isinstance(self.delta, tuple) and ( diff --git a/art/preprocessing/expectation_over_transformation/natural_corruptions/brightness/tensorflow.py b/art/preprocessing/expectation_over_transformation/natural_corruptions/brightness/tensorflow.py index 18b34e0417..0a4fd9c1e5 100644 --- a/art/preprocessing/expectation_over_transformation/natural_corruptions/brightness/tensorflow.py +++ b/art/preprocessing/expectation_over_transformation/natural_corruptions/brightness/tensorflow.py @@ -18,8 +18,10 @@ """ This module implements EoT of changes in brightness by addition of uniformly sampled delta. """ +from __future__ import annotations + import logging -from typing import Tuple, Union, TYPE_CHECKING, Optional +from typing import TYPE_CHECKING import numpy as np @@ -39,8 +41,8 @@ class EoTBrightnessTensorFlow(EoTTensorFlowV2): def __init__( self, nb_samples: int, - clip_values: Tuple[float, float], - delta: Union[float, Tuple[float, float]], + clip_values: tuple[float, float], + delta: float | tuple[float, float], apply_fit: bool = False, apply_predict: bool = True, ) -> None: @@ -63,9 +65,7 @@ def __init__( self.delta_range = (-delta, delta) if isinstance(delta, (int, float)) else delta self._check_params() - def _transform( - self, x: "tf.Tensor", y: Optional["tf.Tensor"], **kwargs - ) -> Tuple["tf.Tensor", Optional["tf.Tensor"]]: + def _transform(self, x: "tf.Tensor", y: "tf.Tensor" | None, **kwargs) -> tuple["tf.Tensor", "tf.Tensor" | None]: """ Transformation of an image with randomly sampled brightness. @@ -80,7 +80,7 @@ def _transform( def _check_params(self) -> None: - # pylint: disable=R0916 + # pylint: disable=too-many-boolean-expressions if not isinstance(self.delta, (int, float, tuple)) or ( isinstance(self.delta, tuple) and ( diff --git a/art/preprocessing/expectation_over_transformation/natural_corruptions/contrast/pytorch.py b/art/preprocessing/expectation_over_transformation/natural_corruptions/contrast/pytorch.py index 9ad2464144..34cb4b71ef 100644 --- a/art/preprocessing/expectation_over_transformation/natural_corruptions/contrast/pytorch.py +++ b/art/preprocessing/expectation_over_transformation/natural_corruptions/contrast/pytorch.py @@ -18,8 +18,10 @@ """ This module implements EoT of changes in contrast with uniformly sampled factor. """ +from __future__ import annotations + import logging -from typing import Dict, List, Union, Tuple, TYPE_CHECKING, Optional +from typing import TYPE_CHECKING import numpy as np @@ -39,8 +41,8 @@ class EoTContrastPyTorch(EoTPyTorch): def __init__( self, nb_samples: int, - clip_values: Tuple[float, float], - contrast_factor: Union[float, Tuple[float, float]], + clip_values: tuple[float, float], + contrast_factor: float | tuple[float, float], apply_fit: bool = False, apply_predict: bool = True, ) -> None: @@ -66,8 +68,8 @@ def __init__( self._check_params() def _transform( - self, x: "torch.Tensor", y: Optional[Union["torch.Tensor", List[Dict[str, "torch.Tensor"]]]], **kwargs - ) -> Tuple["torch.Tensor", Optional[Union["torch.Tensor", List[Dict[str, "torch.Tensor"]]]]]: + self, x: "torch.Tensor", y: "torch.Tensor" | list[dict[str, "torch.Tensor"]] | None, **kwargs + ) -> tuple["torch.Tensor", "torch.Tensor" | list[dict[str, "torch.Tensor"]] | None]: """ Transformation of an image with randomly sampled contrast. @@ -98,7 +100,7 @@ def _transform( def _check_params(self) -> None: - # pylint: disable=R0916 + # pylint: disable=too-many-boolean-expressions if not isinstance(self.contrast_factor, (int, float, tuple)) or ( isinstance(self.contrast_factor, tuple) and ( diff --git a/art/preprocessing/expectation_over_transformation/natural_corruptions/contrast/tensorflow.py b/art/preprocessing/expectation_over_transformation/natural_corruptions/contrast/tensorflow.py index cfe27e264a..bece6cd017 100644 --- a/art/preprocessing/expectation_over_transformation/natural_corruptions/contrast/tensorflow.py +++ b/art/preprocessing/expectation_over_transformation/natural_corruptions/contrast/tensorflow.py @@ -18,8 +18,10 @@ """ This module implements EoT of changes in contrast with uniformly sampled factor. """ +from __future__ import annotations + import logging -from typing import Tuple, Union, TYPE_CHECKING, Optional +from typing import TYPE_CHECKING import numpy as np @@ -39,8 +41,8 @@ class EoTContrastTensorFlow(EoTTensorFlowV2): def __init__( self, nb_samples: int, - clip_values: Tuple[float, float], - contrast_factor: Union[float, Tuple[float, float]], + clip_values: tuple[float, float], + contrast_factor: float | tuple[float, float], apply_fit: bool = False, apply_predict: bool = True, ) -> None: @@ -65,9 +67,7 @@ def __init__( ) self._check_params() - def _transform( - self, x: "tf.Tensor", y: Optional["tf.Tensor"], **kwargs - ) -> Tuple["tf.Tensor", Optional["tf.Tensor"]]: + def _transform(self, x: "tf.Tensor", y: "tf.Tensor" | None, **kwargs) -> tuple["tf.Tensor", "tf.Tensor" | None]: """ Transformation of an image with randomly sampled contrast. @@ -98,7 +98,7 @@ def _transform( def _check_params(self) -> None: - # pylint: disable=R0916 + # pylint: disable=too-many-boolean-expressions if not isinstance(self.contrast_factor, (int, float, tuple)) or ( isinstance(self.contrast_factor, tuple) and ( diff --git a/art/preprocessing/expectation_over_transformation/natural_corruptions/gaussian_noise/pytorch.py b/art/preprocessing/expectation_over_transformation/natural_corruptions/gaussian_noise/pytorch.py index 1e344926ae..09f8a9ffad 100644 --- a/art/preprocessing/expectation_over_transformation/natural_corruptions/gaussian_noise/pytorch.py +++ b/art/preprocessing/expectation_over_transformation/natural_corruptions/gaussian_noise/pytorch.py @@ -18,8 +18,10 @@ """ This module implements EoT of adding Gaussian noise with uniformly sampled standard deviation. """ +from __future__ import annotations + import logging -from typing import Dict, List, Tuple, Union, TYPE_CHECKING, Optional +from typing import TYPE_CHECKING import numpy as np @@ -39,8 +41,8 @@ class EoTGaussianNoisePyTorch(EoTPyTorch): def __init__( self, nb_samples: int, - clip_values: Tuple[float, float], - std: Union[float, Tuple[float, float]], + clip_values: tuple[float, float], + std: float | tuple[float, float], apply_fit: bool = False, apply_predict: bool = True, ) -> None: @@ -64,8 +66,8 @@ def __init__( self._check_params() def _transform( - self, x: "torch.Tensor", y: Optional[Union["torch.Tensor", List[Dict[str, "torch.Tensor"]]]], **kwargs - ) -> Tuple["torch.Tensor", Optional[Union["torch.Tensor", List[Dict[str, "torch.Tensor"]]]]]: + self, x: "torch.Tensor", y: "torch.Tensor" | list[dict[str, "torch.Tensor"]] | None, **kwargs + ) -> tuple["torch.Tensor", "torch.Tensor" | list[dict[str, "torch.Tensor"]] | None]: """ Transformation of an image with randomly sampled Gaussian noise. @@ -81,7 +83,7 @@ def _transform( def _check_params(self) -> None: - # pylint: disable=R0916 + # pylint: disable=too-many-boolean-expressions if not isinstance(self.std, (int, float, tuple)) or ( isinstance(self.std, tuple) and ( diff --git a/art/preprocessing/expectation_over_transformation/natural_corruptions/gaussian_noise/tensorflow.py b/art/preprocessing/expectation_over_transformation/natural_corruptions/gaussian_noise/tensorflow.py index d86016bcf6..a88162989c 100644 --- a/art/preprocessing/expectation_over_transformation/natural_corruptions/gaussian_noise/tensorflow.py +++ b/art/preprocessing/expectation_over_transformation/natural_corruptions/gaussian_noise/tensorflow.py @@ -18,8 +18,10 @@ """ This module implements EoT of adding Gaussian noise with uniformly sampled standard deviation. """ +from __future__ import annotations + import logging -from typing import Tuple, Union, TYPE_CHECKING, Optional +from typing import TYPE_CHECKING import numpy as np @@ -39,8 +41,8 @@ class EoTGaussianNoiseTensorFlow(EoTTensorFlowV2): def __init__( self, nb_samples: int, - clip_values: Tuple[float, float], - std: Union[float, Tuple[float, float]], + clip_values: tuple[float, float], + std: float | tuple[float, float], apply_fit: bool = False, apply_predict: bool = True, ) -> None: @@ -63,9 +65,7 @@ def __init__( self.std_range = (0.0, std) if isinstance(std, (int, float)) else std self._check_params() - def _transform( - self, x: "tf.Tensor", y: Optional["tf.Tensor"], **kwargs - ) -> Tuple["tf.Tensor", Optional["tf.Tensor"]]: + def _transform(self, x: "tf.Tensor", y: "tf.Tensor" | None, **kwargs) -> tuple["tf.Tensor", "tf.Tensor" | None]: """ Transformation of an image with randomly sampled Gaussian noise. @@ -81,7 +81,7 @@ def _transform( def _check_params(self) -> None: - # pylint: disable=R0916 + # pylint: disable=too-many-boolean-expressions if not isinstance(self.std, (int, float, tuple)) or ( isinstance(self.std, tuple) and ( diff --git a/art/preprocessing/expectation_over_transformation/natural_corruptions/shot_noise/pytorch.py b/art/preprocessing/expectation_over_transformation/natural_corruptions/shot_noise/pytorch.py index 76987cfe22..98fcc873f2 100644 --- a/art/preprocessing/expectation_over_transformation/natural_corruptions/shot_noise/pytorch.py +++ b/art/preprocessing/expectation_over_transformation/natural_corruptions/shot_noise/pytorch.py @@ -18,8 +18,10 @@ """ This module implements EoT of adding shot noise (Poisson) with uniformly sampled rate parameter. """ +from __future__ import annotations + import logging -from typing import Dict, List, Tuple, Union, TYPE_CHECKING, Optional +from typing import TYPE_CHECKING import numpy as np @@ -39,8 +41,8 @@ class EoTShotNoisePyTorch(EoTPyTorch): def __init__( self, nb_samples: int, - clip_values: Tuple[float, float], - lam: Union[float, Tuple[float, float]], + clip_values: tuple[float, float], + lam: float | tuple[float, float], apply_fit: bool = False, apply_predict: bool = True, ) -> None: @@ -64,8 +66,8 @@ def __init__( self._check_params() def _transform( - self, x: "torch.Tensor", y: Optional[Union["torch.Tensor", List[Dict[str, "torch.Tensor"]]]], **kwargs - ) -> Tuple["torch.Tensor", Optional[Union["torch.Tensor", List[Dict[str, "torch.Tensor"]]]]]: + self, x: "torch.Tensor", y: "torch.Tensor" | list[dict[str, "torch.Tensor"]] | None, **kwargs + ) -> tuple["torch.Tensor", "torch.Tensor" | list[dict[str, "torch.Tensor"]] | None]: """ Transformation of an image with randomly sampled shot (Poisson) noise. @@ -81,7 +83,7 @@ def _transform( def _check_params(self) -> None: - # pylint: disable=R0916 + # pylint: disable=too-many-boolean-expressions if not isinstance(self.lam, (int, float, tuple)) or ( isinstance(self.lam, tuple) and ( diff --git a/art/preprocessing/expectation_over_transformation/natural_corruptions/shot_noise/tensorflow.py b/art/preprocessing/expectation_over_transformation/natural_corruptions/shot_noise/tensorflow.py index 0c7ba0fe52..492c9e8047 100644 --- a/art/preprocessing/expectation_over_transformation/natural_corruptions/shot_noise/tensorflow.py +++ b/art/preprocessing/expectation_over_transformation/natural_corruptions/shot_noise/tensorflow.py @@ -18,8 +18,10 @@ """ This module implements EoT of adding shot noise (Poisson) with uniformly sampled rate parameter. """ +from __future__ import annotations + import logging -from typing import Tuple, Union, TYPE_CHECKING, Optional +from typing import TYPE_CHECKING import numpy as np @@ -39,8 +41,8 @@ class EoTShotNoiseTensorFlow(EoTTensorFlowV2): def __init__( self, nb_samples: int, - clip_values: Tuple[float, float], - lam: Union[float, Tuple[float, float]], + clip_values: tuple[float, float], + lam: float | tuple[float, float], apply_fit: bool = False, apply_predict: bool = True, ) -> None: @@ -63,9 +65,7 @@ def __init__( self.lam_range = (0.0, lam) if isinstance(lam, (int, float)) else lam self._check_params() - def _transform( - self, x: "tf.Tensor", y: Optional["tf.Tensor"], **kwargs - ) -> Tuple["tf.Tensor", Optional["tf.Tensor"]]: + def _transform(self, x: "tf.Tensor", y: "tf.Tensor" | None, **kwargs) -> tuple["tf.Tensor", "tf.Tensor" | None]: """ Transformation of an image with randomly sampled shot (Poisson) noise. @@ -76,13 +76,13 @@ def _transform( import tensorflow as tf lam_i = np.random.uniform(low=self.lam_range[0], high=self.lam_range[1]) - # pylint: disable=E1123,E1120 + delta_i = tf.random.poisson(shape=x.shape, lam=lam_i, seed=None) / lam_i * self.clip_values[1] return tf.clip_by_value(x + delta_i, clip_value_min=self.clip_values[0], clip_value_max=self.clip_values[1]), y def _check_params(self) -> None: - # pylint: disable=R0916 + # pylint: disable=too-many-boolean-expressions if not isinstance(self.lam, (int, float, tuple)) or ( isinstance(self.lam, tuple) and ( diff --git a/art/preprocessing/expectation_over_transformation/natural_corruptions/zoom_blur/pytorch.py b/art/preprocessing/expectation_over_transformation/natural_corruptions/zoom_blur/pytorch.py index 4d588ec648..1f84334cc2 100644 --- a/art/preprocessing/expectation_over_transformation/natural_corruptions/zoom_blur/pytorch.py +++ b/art/preprocessing/expectation_over_transformation/natural_corruptions/zoom_blur/pytorch.py @@ -18,8 +18,10 @@ """ This module implements EoT of zoom blur with uniformly sampled zoom factor. """ +from __future__ import annotations + import logging -from typing import Dict, List, Tuple, Union, TYPE_CHECKING, Optional +from typing import TYPE_CHECKING import numpy as np @@ -39,8 +41,8 @@ class EoTZoomBlurPyTorch(EoTPyTorch): def __init__( self, nb_samples: int, - clip_values: Tuple[float, float], - zoom: Union[float, Tuple[float, float]], + clip_values: tuple[float, float], + zoom: float | tuple[float, float], apply_fit: bool = False, apply_predict: bool = True, ) -> None: @@ -64,8 +66,8 @@ def __init__( self._check_params() def _transform( - self, x: "torch.Tensor", y: Optional[Union["torch.Tensor", List[Dict[str, "torch.Tensor"]]]], **kwargs - ) -> Tuple["torch.Tensor", Optional[Union["torch.Tensor", List[Dict[str, "torch.Tensor"]]]]]: + self, x: "torch.Tensor", y: "torch.Tensor" | list[dict[str, "torch.Tensor"]] | None, **kwargs + ) -> tuple["torch.Tensor", "torch.Tensor" | list[dict[str, "torch.Tensor"]] | None]: """ Transformation of an image with randomly sampled zoom blur. @@ -102,7 +104,7 @@ def _transform( def _check_params(self) -> None: - # pylint: disable=R0916 + # pylint: disable=too-many-boolean-expressions if not isinstance(self.zoom, (int, float, tuple)) or ( isinstance(self.zoom, tuple) and ( diff --git a/art/preprocessing/expectation_over_transformation/natural_corruptions/zoom_blur/tensorflow.py b/art/preprocessing/expectation_over_transformation/natural_corruptions/zoom_blur/tensorflow.py index 9dc8ac0d48..21927dd2db 100644 --- a/art/preprocessing/expectation_over_transformation/natural_corruptions/zoom_blur/tensorflow.py +++ b/art/preprocessing/expectation_over_transformation/natural_corruptions/zoom_blur/tensorflow.py @@ -18,8 +18,10 @@ """ This module implements EoT of zoom blur with uniformly sampled zoom factor. """ +from __future__ import annotations + import logging -from typing import Tuple, Union, TYPE_CHECKING, Optional +from typing import TYPE_CHECKING import numpy as np @@ -39,8 +41,8 @@ class EoTZoomBlurTensorFlow(EoTTensorFlowV2): def __init__( self, nb_samples: int, - clip_values: Tuple[float, float], - zoom: Union[float, Tuple[float, float]], + clip_values: tuple[float, float], + zoom: float | tuple[float, float], apply_fit: bool = False, apply_predict: bool = True, ) -> None: @@ -63,9 +65,7 @@ def __init__( self.zoom_range = (1.0, zoom) if isinstance(zoom, (int, float)) else zoom self._check_params() - def _transform( - self, x: "tf.Tensor", y: Optional["tf.Tensor"], **kwargs - ) -> Tuple["tf.Tensor", Optional["tf.Tensor"]]: + def _transform(self, x: "tf.Tensor", y: "tf.Tensor" | None, **kwargs) -> tuple["tf.Tensor", "tf.Tensor" | None]: """ Transformation of an image with randomly sampled zoom blur. @@ -104,7 +104,7 @@ def _transform( def _check_params(self) -> None: - # pylint: disable=R0916 + # pylint: disable=too-many-boolean-expressions if not isinstance(self.zoom, (int, float, tuple)) or ( isinstance(self.zoom, tuple) and ( diff --git a/art/preprocessing/expectation_over_transformation/pytorch.py b/art/preprocessing/expectation_over_transformation/pytorch.py index 310a9209bc..209a2ed5f6 100644 --- a/art/preprocessing/expectation_over_transformation/pytorch.py +++ b/art/preprocessing/expectation_over_transformation/pytorch.py @@ -20,7 +20,7 @@ """ from abc import abstractmethod import logging -from typing import Dict, List, Optional, Tuple, TYPE_CHECKING, Union +from typing import TYPE_CHECKING from art.preprocessing.preprocessing import PreprocessorPyTorch @@ -38,7 +38,7 @@ class EoTPyTorch(PreprocessorPyTorch): def __init__( self, nb_samples: int, - clip_values: Tuple[float, float], + clip_values: tuple[float, float], apply_fit: bool = False, apply_predict: bool = True, ) -> None: @@ -58,8 +58,8 @@ def __init__( @abstractmethod def _transform( - self, x: "torch.Tensor", y: Optional[Union["torch.Tensor", List[Dict[str, "torch.Tensor"]]]], **kwargs - ) -> Tuple["torch.Tensor", Optional[Union["torch.Tensor", List[Dict[str, "torch.Tensor"]]]]]: + self, x: "torch.Tensor", y: "torch.Tensor" | list[dict[str, "torch.Tensor"]] | None, **kwargs + ) -> tuple["torch.Tensor", "torch.Tensor" | list[dict[str, "torch.Tensor"]] | None]: """ Internal method implementing the transformation per input sample. @@ -70,8 +70,8 @@ def _transform( raise NotImplementedError def forward( - self, x: "torch.Tensor", y: Optional[Union["torch.Tensor", List[Dict[str, "torch.Tensor"]]]] = None - ) -> Tuple["torch.Tensor", Optional[Union["torch.Tensor", List[Dict[str, "torch.Tensor"]]]]]: + self, x: "torch.Tensor", y: "torch.Tensor" | list[dict[str, "torch.Tensor"]] | None = None + ) -> tuple["torch.Tensor", "torch.Tensor" | list[dict[str, "torch.Tensor"]] | None]: """ Apply transformations to inputs `x` and labels `y`. @@ -82,13 +82,13 @@ def forward( import torch x_preprocess_list = [] - y_preprocess_list_classification: List[torch.Tensor] = [] - y_preprocess_list_object_detection: List[List[Dict[str, torch.Tensor]]] = [] + y_preprocess_list_classification: list[torch.Tensor] = [] + y_preprocess_list_object_detection: list[list[dict[str, torch.Tensor]]] = [] for i_image in range(x.shape[0]): for _ in range(self.nb_samples): x_i = x[[i_image]] - y_i: Optional[Union[torch.Tensor, List[Dict[str, torch.Tensor]]]] + y_i: torch.Tensor | list[dict[str, torch.Tensor]] | None if y is not None: if isinstance(y, list): y_i = [y[i_image]] @@ -106,7 +106,7 @@ def forward( y_preprocess_list_object_detection.append(y_preprocess_i) x_preprocess = torch.stack(x_preprocess_list, dim=0) - y_preprocess: Optional[Union["torch.Tensor", List[Dict[str, "torch.Tensor"]]]] + y_preprocess: "torch.Tensor" | list[dict[str, "torch.Tensor"]] | None if y is None: y_preprocess = y else: diff --git a/art/preprocessing/expectation_over_transformation/tensorflow.py b/art/preprocessing/expectation_over_transformation/tensorflow.py index 8e645b16b4..b6b42b1ab7 100644 --- a/art/preprocessing/expectation_over_transformation/tensorflow.py +++ b/art/preprocessing/expectation_over_transformation/tensorflow.py @@ -18,9 +18,11 @@ """ This module defines a base class for EoT in TensorFlow v2. """ +from __future__ import annotations + from abc import abstractmethod import logging -from typing import Dict, List, Optional, Tuple, TYPE_CHECKING, Union +from typing import TYPE_CHECKING import numpy as np @@ -40,7 +42,7 @@ class EoTTensorFlowV2(PreprocessorTensorFlowV2): def __init__( self, nb_samples: int, - clip_values: Tuple[float, float], + clip_values: tuple[float, float], apply_fit: bool = False, apply_predict: bool = True, ) -> None: @@ -59,9 +61,7 @@ def __init__( EoTTensorFlowV2._check_params(self) @abstractmethod - def _transform( - self, x: "tf.Tensor", y: Optional["tf.Tensor"], **kwargs - ) -> Tuple["tf.Tensor", Optional["tf.Tensor"]]: + def _transform(self, x: "tf.Tensor", y: "tf.Tensor" | None, **kwargs) -> tuple["tf.Tensor", "tf.Tensor" | None]: """ Internal method implementing the transformation per input sample. @@ -71,7 +71,7 @@ def _transform( """ raise NotImplementedError - def forward(self, x: "tf.Tensor", y: Optional["tf.Tensor"] = None) -> Tuple["tf.Tensor", Optional["tf.Tensor"]]: + def forward(self, x: "tf.Tensor", y: "tf.Tensor" | None = None) -> tuple["tf.Tensor", "tf.Tensor" | None]: """ Apply transformations to inputs `x` and labels `y`. @@ -87,7 +87,7 @@ def forward(self, x: "tf.Tensor", y: Optional["tf.Tensor"] = None) -> Tuple["tf. for i_image in range(x.shape[0]): for _ in range(self.nb_samples): x_i = x[[i_image]] - y_i: Optional[Union[tf.Tensor, List[Dict[str, tf.Tensor]]]] + y_i: tf.Tensor | list[dict[str, tf.Tensor]] | None if y is not None: if isinstance(y, list): y_i = [y[i_image]] diff --git a/art/preprocessing/image/image_resize/numpy.py b/art/preprocessing/image/image_resize/numpy.py index 44c6b3a798..46a5f02545 100644 --- a/art/preprocessing/image/image_resize/numpy.py +++ b/art/preprocessing/image/image_resize/numpy.py @@ -18,8 +18,10 @@ """ This module implements resizing for images and object detection bounding boxes. """ +from __future__ import annotations + import logging -from typing import Dict, List, Optional, TYPE_CHECKING, Tuple, Union +from typing import TYPE_CHECKING import numpy as np import cv2 @@ -49,7 +51,7 @@ def __init__( channels_first: bool = False, label_type: str = "classification", interpolation: int = cv2.INTER_LINEAR, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, apply_fit: bool = True, apply_predict: bool = False, verbose: bool = False, @@ -79,8 +81,8 @@ def __init__( self._check_params() def __call__( - self, x: np.ndarray, y: Optional[Union[np.ndarray, List[Dict[str, np.ndarray]]]] = None - ) -> Tuple[np.ndarray, Optional[Union[np.ndarray, List[Dict[str, np.ndarray]]]]]: + self, x: np.ndarray, y: np.ndarray | list[dict[str, np.ndarray]] | None = None + ) -> tuple[np.ndarray, np.ndarray | list[dict[str, np.ndarray]] | None]: """ Resize `x` and adjust bounding boxes for labels `y` accordingly. @@ -89,7 +91,7 @@ def __call__( :return: Transformed samples and labels. """ x_preprocess_list = [] - y_preprocess: Optional[Union[np.ndarray, List[Dict[str, np.ndarray]]]] + y_preprocess: np.ndarray | list[dict[str, np.ndarray]] | None if y is not None and self.label_type == "object_detection": y_preprocess = [] else: @@ -108,7 +110,7 @@ def __call__( x_preprocess_list.append(x_resized) if self.label_type == "object_detection" and y is not None: - y_resized: Dict[str, np.ndarray] = {} + y_resized: dict[str, np.ndarray] = {} # Copy labels and ensure types if isinstance(y, list) and isinstance(y_preprocess, list): diff --git a/art/preprocessing/image/image_resize/pytorch.py b/art/preprocessing/image/image_resize/pytorch.py index 0aa65cbde5..89b3555fa2 100644 --- a/art/preprocessing/image/image_resize/pytorch.py +++ b/art/preprocessing/image/image_resize/pytorch.py @@ -18,15 +18,17 @@ """ This module implements resizing for images and object detection bounding boxes in PyTorch. """ +from __future__ import annotations + import logging -from typing import Dict, List, Optional, TYPE_CHECKING, Tuple, Union +from typing import TYPE_CHECKING from tqdm.auto import tqdm from art.preprocessing.preprocessing import PreprocessorPyTorch if TYPE_CHECKING: - # pylint: disable=C0412 + import torch from art.utils import CLIP_VALUES_TYPE @@ -49,7 +51,7 @@ def __init__( channels_first: bool = True, label_type: str = "classification", interpolation: str = "bilinear", - clip_values: Optional["CLIP_VALUES_TYPE"] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, apply_fit: bool = True, apply_predict: bool = False, verbose: bool = False, @@ -82,8 +84,8 @@ def __init__( def forward( self, x: "torch.Tensor", - y: Optional[Union["torch.Tensor", List[Dict[str, "torch.Tensor"]]]] = None, - ) -> Tuple["torch.Tensor", Optional[Union["torch.Tensor", List[Dict[str, "torch.Tensor"]]]]]: + y: "torch.Tensor" | list[dict[str, "torch.Tensor"]] | None = None, + ) -> tuple["torch.Tensor", "torch.Tensor" | list[dict[str, "torch.Tensor"]] | None]: """ Resize `x` and adjust bounding boxes for labels `y` accordingly. @@ -94,7 +96,7 @@ def forward( import torch x_preprocess_list = [] - y_preprocess: Optional[Union[torch.Tensor, List[Dict[str, torch.Tensor]]]] + y_preprocess: torch.Tensor | list[dict[str, torch.Tensor]] | None if y is not None and self.label_type == "object_detection": y_preprocess = [] else: @@ -115,7 +117,7 @@ def forward( x_preprocess_list.append(x_resized) if self.label_type == "object_detection" and y is not None: - y_resized: Dict[str, torch.Tensor] = {} + y_resized: dict[str, torch.Tensor] = {} # Copy labels and ensure types if isinstance(y, list) and isinstance(y_preprocess, list): diff --git a/art/preprocessing/image/image_resize/tensorflow.py b/art/preprocessing/image/image_resize/tensorflow.py index aeb919df8c..2f036b48a3 100644 --- a/art/preprocessing/image/image_resize/tensorflow.py +++ b/art/preprocessing/image/image_resize/tensorflow.py @@ -18,8 +18,10 @@ """ This module implements resizing for images and object detection bounding boxes in TensorFlow v2. """ +from __future__ import annotations + import logging -from typing import Dict, List, Optional, TYPE_CHECKING, Tuple, Union +from typing import TYPE_CHECKING from tqdm.auto import tqdm @@ -48,7 +50,7 @@ def __init__( channels_first: bool = False, label_type: str = "classification", interpolation: str = "bilinear", - clip_values: Optional["CLIP_VALUES_TYPE"] = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, apply_fit: bool = True, apply_predict: bool = False, verbose: bool = False, @@ -81,8 +83,8 @@ def __init__( def forward( self, x: "tf.Tensor", - y: Optional[Union["tf.Tensor", List[Dict[str, "tf.Tensor"]]]] = None, - ) -> Tuple["tf.Tensor", Optional[Union["tf.Tensor", List[Dict[str, "tf.Tensor"]]]]]: + y: "tf.Tensor" | list[dict[str, "tf.Tensor"]] | None = None, + ) -> tuple["tf.Tensor", "tf.Tensor" | list[dict[str, "tf.Tensor"]] | None]: """ Resize `x` and adjust bounding boxes for labels `y` accordingly. @@ -93,7 +95,7 @@ def forward( import tensorflow as tf x_preprocess_list = [] - y_preprocess: Optional[Union[tf.Tensor, List[Dict[str, tf.Tensor]]]] + y_preprocess: tf.Tensor | list[dict[str, tf.Tensor]] | None if y is not None and self.label_type == "object_detection": y_preprocess = [] else: @@ -112,7 +114,7 @@ def forward( x_preprocess_list.append(x_resized) if self.label_type == "object_detection" and y is not None: - y_resized: Dict[str, tf.Tensor] = {} + y_resized: dict[str, tf.Tensor] = {} # Copy labels and ensure types if isinstance(y, list) and isinstance(y_preprocess, list): diff --git a/art/preprocessing/image/image_square_pad/numpy.py b/art/preprocessing/image/image_square_pad/numpy.py index a04422d72c..2c1ef463c8 100644 --- a/art/preprocessing/image/image_square_pad/numpy.py +++ b/art/preprocessing/image/image_square_pad/numpy.py @@ -18,8 +18,10 @@ """ This module implements square padding for images and object detection bounding boxes. """ +from __future__ import annotations + import logging -from typing import Dict, List, Any, Optional, TYPE_CHECKING, Tuple, Union +from typing import Any, TYPE_CHECKING import numpy as np from tqdm.auto import tqdm @@ -46,8 +48,8 @@ def __init__( channels_first: bool = False, label_type: str = "classification", pad_mode: str = "constant", - pad_kwargs: Optional[Dict[str, Any]] = None, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, + pad_kwargs: dict[str, Any] | None = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, apply_fit: bool = True, apply_predict: bool = False, verbose: bool = False, @@ -78,8 +80,8 @@ def __init__( self._check_params() def __call__( - self, x: np.ndarray, y: Optional[Union[np.ndarray, List[Dict[str, np.ndarray]]]] = None - ) -> Tuple[np.ndarray, Optional[Union[np.ndarray, List[Dict[str, np.ndarray]]]]]: + self, x: np.ndarray, y: np.ndarray | list[dict[str, np.ndarray]] | None = None + ) -> tuple[np.ndarray, np.ndarray | list[dict[str, np.ndarray]] | None]: """ Square pad `x` and adjust bounding boxes for labels `y` accordingly. @@ -88,7 +90,7 @@ def __call__( :return: Transformed samples and labels. """ x_preprocess = [] - y_preprocess: Optional[Union[np.ndarray, List[Dict[str, np.ndarray]]]] + y_preprocess: np.ndarray | list[dict[str, np.ndarray]] | None if y is not None and self.label_type == "object_detection": y_preprocess = [] else: @@ -124,7 +126,7 @@ def __call__( x_preprocess.append(x_pad) if self.label_type == "object_detection" and y is not None: - y_pad: Dict[str, np.ndarray] = {} + y_pad: dict[str, np.ndarray] = {} # Copy labels and ensure types if isinstance(y, list) and isinstance(y_preprocess, list): diff --git a/art/preprocessing/image/image_square_pad/pytorch.py b/art/preprocessing/image/image_square_pad/pytorch.py index beb5d29e80..dea1c33120 100644 --- a/art/preprocessing/image/image_square_pad/pytorch.py +++ b/art/preprocessing/image/image_square_pad/pytorch.py @@ -18,8 +18,10 @@ """ This module implements square padding for images and object detection bounding boxes in PyTorch. """ +from __future__ import annotations + import logging -from typing import Dict, List, Any, Optional, TYPE_CHECKING, Tuple, Union +from typing import Any, TYPE_CHECKING import numpy as np from tqdm.auto import tqdm @@ -27,7 +29,7 @@ from art.preprocessing.preprocessing import PreprocessorPyTorch if TYPE_CHECKING: - # pylint: disable=C0412 + import torch from art.utils import CLIP_VALUES_TYPE @@ -48,8 +50,8 @@ def __init__( channels_first: bool = True, label_type: str = "classification", pad_mode: str = "constant", - pad_kwargs: Optional[Dict[str, Any]] = None, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, + pad_kwargs: dict[str, Any] | None = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, apply_fit: bool = True, apply_predict: bool = False, verbose: bool = False, @@ -82,8 +84,8 @@ def __init__( def forward( self, x: "torch.Tensor", - y: Optional[Union["torch.Tensor", List[Dict[str, "torch.Tensor"]]]] = None, - ) -> Tuple["torch.Tensor", Optional[Union["torch.Tensor", List[Dict[str, "torch.Tensor"]]]]]: + y: "torch.Tensor" | list[dict[str, "torch.Tensor"]] | None = None, + ) -> tuple["torch.Tensor", "torch.Tensor" | list[dict[str, "torch.Tensor"]] | None]: """ Square pad `x` and adjust bounding boxes for labels `y` accordingly. @@ -94,7 +96,7 @@ def forward( import torch x_preprocess = [] - y_preprocess: Optional[Union[torch.Tensor, List[Dict[str, torch.Tensor]]]] + y_preprocess: torch.Tensor | list[dict[str, torch.Tensor]] | None if y is not None and self.label_type == "object_detection": y_preprocess = [] else: @@ -130,7 +132,7 @@ def forward( x_preprocess.append(x_pad) if self.label_type == "object_detection" and y is not None: - y_pad: Dict[str, torch.Tensor] = {} + y_pad: dict[str, torch.Tensor] = {} # Copy labels and ensure types if isinstance(y, list) and isinstance(y_preprocess, list): diff --git a/art/preprocessing/image/image_square_pad/tensorflow.py b/art/preprocessing/image/image_square_pad/tensorflow.py index c2a5507ba5..b65e5de3b2 100644 --- a/art/preprocessing/image/image_square_pad/tensorflow.py +++ b/art/preprocessing/image/image_square_pad/tensorflow.py @@ -18,8 +18,10 @@ """ This module implements square padding for images and object detection bounding boxes in TensorFlow v2. """ +from __future__ import annotations + import logging -from typing import Dict, List, Any, Optional, TYPE_CHECKING, Tuple, Union +from typing import Any, TYPE_CHECKING import numpy as np from tqdm.auto import tqdm @@ -47,8 +49,8 @@ def __init__( channels_first: bool = False, label_type: str = "classification", pad_mode: str = "CONSTANT", - pad_kwargs: Optional[Dict[str, Any]] = None, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, + pad_kwargs: dict[str, Any] | None = None, + clip_values: "CLIP_VALUES_TYPE" | None = None, apply_fit: bool = True, apply_predict: bool = False, verbose: bool = False, @@ -80,8 +82,8 @@ def __init__( def forward( self, x: "tf.Tensor", - y: Optional[Union["tf.Tensor", List[Dict[str, "tf.Tensor"]]]] = None, - ) -> Tuple["tf.Tensor", Optional[Union["tf.Tensor", List[Dict[str, "tf.Tensor"]]]]]: + y: "tf.Tensor" | list[dict[str, "tf.Tensor"]] | None = None, + ) -> tuple["tf.Tensor", "tf.Tensor" | list[dict[str, "tf.Tensor"]] | None]: """ Square pad `x` and adjust bounding boxes for labels `y` accordingly. @@ -92,7 +94,7 @@ def forward( import tensorflow as tf x_preprocess = [] - y_preprocess: Optional[Union[tf.Tensor, List[Dict[str, tf.Tensor]]]] + y_preprocess: tf.Tensor | list[dict[str, tf.Tensor]] | None if y is not None and self.label_type == "object_detection": y_preprocess = [] else: @@ -128,7 +130,7 @@ def forward( x_preprocess.append(x_pad) if self.label_type == "object_detection" and y is not None: - y_pad: Dict[str, tf.Tensor] = {} + y_pad: dict[str, tf.Tensor] = {} # Copy labels and ensure types if isinstance(y, list) and isinstance(y_preprocess, list): diff --git a/art/preprocessing/preprocessing.py b/art/preprocessing/preprocessing.py index 9c23d904ca..7c1afa7470 100644 --- a/art/preprocessing/preprocessing.py +++ b/art/preprocessing/preprocessing.py @@ -18,7 +18,7 @@ """ This module contains the Preprocessor API. """ -# pylint: disable=W0611 +# pylint: disable=unused-import from art.defences.preprocessor.preprocessor import Preprocessor from art.defences.preprocessor.preprocessor import PreprocessorPyTorch from art.defences.preprocessor.preprocessor import PreprocessorTensorFlowV2 diff --git a/art/preprocessing/standardisation_mean_std/numpy.py b/art/preprocessing/standardisation_mean_std/numpy.py index 5c9749a5e2..8e91f53658 100644 --- a/art/preprocessing/standardisation_mean_std/numpy.py +++ b/art/preprocessing/standardisation_mean_std/numpy.py @@ -18,8 +18,10 @@ """ This module implements the standardisation with mean and standard deviation. """ +from __future__ import annotations + import logging -from typing import Optional, Tuple, Union + import numpy as np @@ -39,8 +41,8 @@ class StandardisationMeanStd(Preprocessor): def __init__( self, - mean: Union[float, np.ndarray] = 0.0, - std: Union[float, np.ndarray] = 1.0, + mean: float | np.ndarray = 0.0, + std: float | np.ndarray = 1.0, apply_fit: bool = True, apply_predict: bool = True, ): @@ -56,14 +58,14 @@ def __init__( self._check_params() # init broadcastable mean and std for lazy loading - self._broadcastable_mean: Optional[np.ndarray] = None - self._broadcastable_std: Optional[np.ndarray] = None + self._broadcastable_mean: np.ndarray | None = None + self._broadcastable_std: np.ndarray | None = None def __call__( self, x: np.ndarray, - y: Optional[np.ndarray] = None, - ) -> Tuple[np.ndarray, Optional[np.ndarray]]: + y: np.ndarray | None = None, + ) -> tuple[np.ndarray, np.ndarray | None]: """ Apply StandardisationMeanStd inputs `x`. diff --git a/art/preprocessing/standardisation_mean_std/pytorch.py b/art/preprocessing/standardisation_mean_std/pytorch.py index fb3c07a283..5704ab7a11 100644 --- a/art/preprocessing/standardisation_mean_std/pytorch.py +++ b/art/preprocessing/standardisation_mean_std/pytorch.py @@ -18,8 +18,10 @@ """ This module implements the standardisation with mean and standard deviation. """ +from __future__ import annotations + import logging -from typing import TYPE_CHECKING, Optional, Tuple, Union +from typing import TYPE_CHECKING import numpy as np @@ -42,8 +44,8 @@ class StandardisationMeanStdPyTorch(PreprocessorPyTorch): def __init__( self, - mean: Union[float, np.ndarray] = 0.0, - std: Union[float, np.ndarray] = 1.0, + mean: float | np.ndarray = 0.0, + std: float | np.ndarray = 1.0, apply_fit: bool = True, apply_predict: bool = True, device_type: str = "gpu", @@ -66,12 +68,12 @@ def __init__( self._check_params() # init broadcastable mean and std for lazy loading - self._broadcastable_mean: Optional[np.ndarray] = None - self._broadcastable_std: Optional[np.ndarray] = None + self._broadcastable_mean: np.ndarray | None = None + self._broadcastable_std: np.ndarray | None = None def forward( - self, x: "torch.Tensor", y: Optional["torch.Tensor"] = None - ) -> Tuple["torch.Tensor", Optional["torch.Tensor"]]: + self, x: "torch.Tensor", y: "torch.Tensor" | None = None + ) -> tuple["torch.Tensor", "torch.Tensor" | None]: """ Apply standardisation with mean and standard deviation to input `x`. diff --git a/art/preprocessing/standardisation_mean_std/tensorflow.py b/art/preprocessing/standardisation_mean_std/tensorflow.py index ee8fc6c424..d821f2a0df 100644 --- a/art/preprocessing/standardisation_mean_std/tensorflow.py +++ b/art/preprocessing/standardisation_mean_std/tensorflow.py @@ -18,8 +18,10 @@ """ This module implements the standardisation with mean and standard deviation. """ +from __future__ import annotations + import logging -from typing import TYPE_CHECKING, Optional, Tuple, Union +from typing import TYPE_CHECKING import numpy as np @@ -42,8 +44,8 @@ class StandardisationMeanStdTensorFlow(PreprocessorTensorFlowV2): def __init__( self, - mean: Union[float, np.ndarray] = 0.0, - std: Union[float, np.ndarray] = 1.0, + mean: float | np.ndarray = 0.0, + std: float | np.ndarray = 1.0, apply_fit: bool = True, apply_predict: bool = True, ): @@ -59,10 +61,10 @@ def __init__( self._check_params() # init broadcastable mean and std for lazy loading - self._broadcastable_mean: Optional[np.ndarray] = None - self._broadcastable_std: Optional[np.ndarray] = None + self._broadcastable_mean: np.ndarray | None = None + self._broadcastable_std: np.ndarray | None = None - def forward(self, x: "tf.Tensor", y: Optional["tf.Tensor"] = None) -> Tuple["tf.Tensor", Optional["tf.Tensor"]]: + def forward(self, x: "tf.Tensor", y: "tf.Tensor" | None = None) -> tuple["tf.Tensor", "tf.Tensor" | None]: """ Apply standardisation with mean and standard deviation to input `x`. @@ -77,7 +79,7 @@ def forward(self, x: "tf.Tensor", y: Optional["tf.Tensor"] = None) -> Tuple["tf. x_norm = x - self._broadcastable_mean x_norm = x_norm / self._broadcastable_std - x_norm = tf.cast(x_norm, dtype=ART_NUMPY_DTYPE) # pylint: disable=E1123,E1120 + x_norm = tf.cast(x_norm, dtype=ART_NUMPY_DTYPE) return x_norm, y diff --git a/art/preprocessing/standardisation_mean_std/utils.py b/art/preprocessing/standardisation_mean_std/utils.py index b2a5d64931..87c1392212 100644 --- a/art/preprocessing/standardisation_mean_std/utils.py +++ b/art/preprocessing/standardisation_mean_std/utils.py @@ -19,7 +19,7 @@ This module implements utilities for standardisation with mean and standard deviation. """ -from typing import Tuple, TYPE_CHECKING, Union +from typing import TYPE_CHECKING import numpy as np @@ -29,8 +29,8 @@ def broadcastable_mean_std( - x: Union[np.ndarray, "torch.Tensor", "tf.Tensor"], mean: np.ndarray, std: np.ndarray -) -> Tuple[np.ndarray, np.ndarray]: + x: np.ndarray | "torch.Tensor" | "tf.Tensor", mean: np.ndarray, std: np.ndarray +) -> tuple[np.ndarray, np.ndarray]: """ Ensure that the mean and standard deviation are broadcastable with respect to input `x`. diff --git a/art/summary_writer.py b/art/summary_writer.py index e996aef81a..bf0e186308 100644 --- a/art/summary_writer.py +++ b/art/summary_writer.py @@ -18,10 +18,10 @@ """ This module defines and implements the summary writers for TensorBoard output. """ +from __future__ import annotations from abc import ABC, abstractmethod from math import sqrt -from typing import Dict, List, Optional, Union import numpy as np @@ -31,7 +31,7 @@ class SummaryWriter(ABC): This abstract base class defines the API for summary writers. """ - def __init__(self, summary_writer: Union[str, bool]): + def __init__(self, summary_writer: str | bool): """ Create summary writer. @@ -109,7 +109,7 @@ class SummaryWriterDefault(SummaryWriter): def __init__( self, - summary_writer: Union[str, bool], + summary_writer: str | bool, ind_1: bool = False, ind_2: bool = False, ind_3: bool = False, @@ -123,21 +123,21 @@ def __init__( self.ind_4 = ind_4 self.loss = None - self.loss_prev: Dict[str, np.ndarray] = {} - self.losses: Dict[str, List[np.ndarray]] = {} + self.loss_prev: dict[str, np.ndarray] = {} + self.losses: dict[str, list[np.ndarray]] = {} - self.i_3: Dict[str, np.ndarray] = {} - self.i_4: Dict[str, np.ndarray] = {} + self.i_3: dict[str, np.ndarray] = {} + self.i_4: dict[str, np.ndarray] = {} def update( self, batch_id: int, global_step: int, - grad: Optional[np.ndarray] = None, - patch: Optional[np.ndarray] = None, + grad: np.ndarray | None = None, + patch: np.ndarray | None = None, estimator=None, - x: Optional[np.ndarray] = None, - y: Optional[np.ndarray] = None, + x: np.ndarray | None = None, + y: np.ndarray | None = None, targeted: bool = False, **kwargs, ): diff --git a/art/utils.py b/art/utils.py index 6c0261cccc..fa12a24310 100644 --- a/art/utils.py +++ b/art/utils.py @@ -18,9 +18,10 @@ """ Module providing convenience functions. """ -# pylint: disable=C0302 -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations + +from collections.abc import Callable import logging import math import os @@ -31,11 +32,11 @@ import zipfile from functools import wraps from inspect import signature -from typing import TYPE_CHECKING, Callable, List, Dict, Optional, Tuple, Union +from typing import TYPE_CHECKING, Optional, Union import numpy as np import six -from scipy.special import gammainc # pylint: disable=E0611 +from scipy.special import gammainc from tqdm.auto import tqdm from art import config @@ -49,18 +50,18 @@ # ------------------------------------------------------------------------------------------------- CONSTANTS AND TYPES -DATASET_TYPE = Tuple[ # pylint: disable=C0103 - Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray], float, float +DATASET_TYPE = tuple[ # pylint: disable=invalid-name + tuple[np.ndarray, np.ndarray], tuple[np.ndarray, np.ndarray], float, float ] -CLIP_VALUES_TYPE = Tuple[Union[int, float, np.ndarray], Union[int, float, np.ndarray]] # pylint: disable=C0103 +CLIP_VALUES_TYPE = tuple[Union[int, float, np.ndarray], Union[int, float, np.ndarray]] # pylint: disable=invalid-name if TYPE_CHECKING: - # pylint: disable=R0401,C0412 + # pylint: disable=cyclic-import from art.defences.preprocessor.preprocessor import Preprocessor - PREPROCESSING_TYPE = Optional[ # pylint: disable=C0103 + PREPROCESSING_TYPE = Optional[ # pylint: disable=invalid-name Union[ - Tuple[Union[int, float, np.ndarray], Union[int, float, np.ndarray]], Preprocessor, Tuple[Preprocessor, ...] + tuple[Union[int, float, np.ndarray], Union[int, float, np.ndarray]], Preprocessor, tuple[Preprocessor, ...] ] ] @@ -118,7 +119,7 @@ from art.estimators.speech_recognition.tensorflow_lingvo import TensorFlowLingvoASR from art.estimators.tensorflow import TensorFlowV2Estimator - CLASSIFIER_LOSS_GRADIENTS_TYPE = Union[ # pylint: disable=C0103 + CLASSIFIER_LOSS_GRADIENTS_TYPE = Union[ # pylint: disable=invalid-name ClassifierLossGradients, EnsembleClassifier, GPyGaussianProcessClassifier, @@ -133,7 +134,7 @@ QueryEfficientGradientEstimationClassifier, ] - CLASSIFIER_CLASS_LOSS_GRADIENTS_TYPE = Union[ # pylint: disable=C0103 + CLASSIFIER_CLASS_LOSS_GRADIENTS_TYPE = Union[ # pylint: disable=invalid-name ClassifierClassLossGradients, EnsembleClassifier, GPyGaussianProcessClassifier, @@ -146,7 +147,7 @@ TensorFlowV2Classifier, ] - CLASSIFIER_NEURALNETWORK_TYPE = Union[ # pylint: disable=C0103 + CLASSIFIER_NEURALNETWORK_TYPE = Union[ # pylint: disable=invalid-name ClassifierNeuralNetwork, DetectorClassifier, EnsembleClassifier, @@ -157,7 +158,7 @@ TensorFlowV2Classifier, ] - CLASSIFIER_DECISION_TREE_TYPE = Union[ # pylint: disable=C0103 + CLASSIFIER_DECISION_TREE_TYPE = Union[ # pylint: disable=invalid-name ClassifierDecisionTree, LightGBMClassifier, ScikitlearnDecisionTreeClassifier, @@ -167,7 +168,7 @@ XGBoostClassifier, ] - CLASSIFIER_TYPE = Union[ # pylint: disable=C0103 + CLASSIFIER_TYPE = Union[ # pylint: disable=invalid-name Classifier, BlackBoxClassifier, CatBoostARTClassifier, @@ -195,13 +196,13 @@ CLASSIFIER_NEURALNETWORK_TYPE, ] - GENERATOR_TYPE = Union[TensorFlowGenerator, TensorFlowV2Generator] # pylint: disable=C0103 + GENERATOR_TYPE = Union[TensorFlowGenerator, TensorFlowV2Generator] # pylint: disable=invalid-name - REGRESSOR_TYPE = Union[ # pylint: disable=C0103 + REGRESSOR_TYPE = Union[ # pylint: disable=invalid-name ScikitlearnRegressor, ScikitlearnDecisionTreeRegressor, PyTorchRegressor, KerasRegressor, BlackBoxRegressor ] - OBJECT_DETECTOR_TYPE = Union[ # pylint: disable=C0103 + OBJECT_DETECTOR_TYPE = Union[ # pylint: disable=invalid-name ObjectDetector, PyTorchObjectDetector, PyTorchFasterRCNN, @@ -210,12 +211,12 @@ TensorFlowV2FasterRCNN, ] - SPEECH_RECOGNIZER_TYPE = Union[ # pylint: disable=C0103 + SPEECH_RECOGNIZER_TYPE = Union[ # pylint: disable=invalid-name PyTorchDeepSpeech, TensorFlowLingvoASR, ] - PYTORCH_ESTIMATOR_TYPE = Union[ # pylint: disable=C0103 + PYTORCH_ESTIMATOR_TYPE = Union[ # pylint: disable=invalid-name PyTorchClassifier, PyTorchDeepSpeech, PyTorchEstimator, @@ -225,21 +226,21 @@ PyTorchRegressor, ] - PYTORCH_OBJECT_DETECTOR_TYPE = Union[PyTorchObjectDetector] # pylint: disable=C0103 + PYTORCH_OBJECT_DETECTOR_TYPE = Union[PyTorchObjectDetector] # pylint: disable=invalid-name - KERAS_ESTIMATOR_TYPE = Union[ # pylint: disable=C0103 + KERAS_ESTIMATOR_TYPE = Union[ # pylint: disable=invalid-name KerasClassifier, KerasEstimator, KerasRegressor, ] - TENSORFLOWV2_ESTIMATOR_TYPE = Union[ # pylint: disable=C0103 + TENSORFLOWV2_ESTIMATOR_TYPE = Union[ # pylint: disable=invalid-name TensorFlowV2Classifier, TensorFlowV2Estimator, TensorFlowV2FasterRCNN, ] - ESTIMATOR_TYPE = Union[ # pylint: disable=C0103 + ESTIMATOR_TYPE = Union[ # pylint: disable=invalid-name CLASSIFIER_TYPE, REGRESSOR_TYPE, OBJECT_DETECTOR_TYPE, @@ -249,7 +250,7 @@ TENSORFLOWV2_ESTIMATOR_TYPE, ] - CLONABLE = Union[ # pylint: disable=C0103 + CLONABLE = Union[ # pylint: disable=invalid-name ScikitlearnClassifier, PyTorchClassifier, TensorFlowV2Classifier, @@ -259,10 +260,10 @@ XGBoostClassifier, ] - ABLATOR_TYPE = Union[BlockAblator, ColumnAblator] # pylint: disable=C0103 + ABLATOR_TYPE = Union[BlockAblator, ColumnAblator] # pylint: disable=invalid-name - CERTIFIER_TYPE = Union[PytorchDeepZ] # pylint: disable=C0103 - IBP_CERTIFIER_TYPE = Union[PyTorchIBPClassifier] # pylint: disable=C0103 + CERTIFIER_TYPE = Union[PytorchDeepZ] # pylint: disable=invalid-name + IBP_CERTIFIER_TYPE = Union[PyTorchIBPClassifier] # pylint: disable=invalid-name # --------------------------------------------------------------------------------------------------------- DEPRECATION @@ -383,7 +384,7 @@ def wrapper(*args, **kwargs): # ----------------------------------------------------------------------------------------------------- MATH OPERATIONS -def projection_l1_1(values: np.ndarray, eps: Union[int, float, np.ndarray]) -> np.ndarray: +def projection_l1_1(values: np.ndarray, eps: int | float | np.ndarray) -> np.ndarray: """ This function computes the orthogonal projections of a batch of points on L1-balls of given radii The batch size is m = values.shape[0]. The points are flattened to dimension @@ -397,7 +398,7 @@ def projection_l1_1(values: np.ndarray, eps: Union[int, float, np.ndarray]) -> n :param eps: The radii of the respective L1-balls :return: projections """ - # pylint: disable=C0103 + # pylint: disable=invalid-name shp = values.shape a = values.copy() @@ -468,7 +469,7 @@ def projection_l1_1(values: np.ndarray, eps: Union[int, float, np.ndarray]) -> n return proj -def projection_l1_2(values: np.ndarray, eps: Union[int, float, np.ndarray]) -> np.ndarray: +def projection_l1_2(values: np.ndarray, eps: int | float | np.ndarray) -> np.ndarray: """ This function computes the orthogonal projections of a batch of points on L1-balls of given radii The batch size is m = values.shape[0]. The points are flattened to dimension @@ -484,7 +485,7 @@ def projection_l1_2(values: np.ndarray, eps: Union[int, float, np.ndarray]) -> n :param eps: The radii of the respective L1-balls :return: projections """ - # pylint: disable=C0103 + # pylint: disable=invalid-name shp = values.shape a = values.copy() n = np.prod(a.shape[1:]) @@ -523,8 +524,8 @@ def projection_l1_2(values: np.ndarray, eps: Union[int, float, np.ndarray]) -> n def projection( values: np.ndarray, - eps: Union[int, float, np.ndarray], - norm_p: Union[int, float, str], + eps: int | float | np.ndarray, + norm_p: int | float | str, *, suboptimal: bool = True, ) -> np.ndarray: @@ -581,8 +582,8 @@ def projection( def random_sphere( nb_points: int, nb_dims: int, - radius: Union[int, float, np.ndarray], - norm: Union[int, float, str], + radius: int | float | np.ndarray, + norm: int | float | str, ) -> np.ndarray: """ Generate uniformly at random `m x n`-dimension points in the `norm`-norm ball with radius `radius` and centered @@ -632,9 +633,9 @@ def random_sphere( def uniform_sample_from_sphere_or_ball( nb_points: int, nb_dims: int, - radius: Union[int, float, np.ndarray], + radius: int | float | np.ndarray, sample_space: str = "ball", - norm: Union[int, float, str] = 2, + norm: int | float | str = 2, ) -> np.ndarray: """ Generate a sample of distributed independently and uniformly on the sphere (with respect to the given @@ -703,8 +704,8 @@ def uniform_sample_from_sphere_or_ball( def original_to_tanh( x_original: np.ndarray, - clip_min: Union[float, np.ndarray], - clip_max: Union[float, np.ndarray], + clip_min: float | np.ndarray, + clip_max: float | np.ndarray, tanh_smoother: float = 0.999999, ) -> np.ndarray: """ @@ -724,8 +725,8 @@ def original_to_tanh( def tanh_to_original( x_tanh: np.ndarray, - clip_min: Union[float, np.ndarray], - clip_max: Union[float, np.ndarray], + clip_min: float | np.ndarray, + clip_max: float | np.ndarray, ) -> np.ndarray: """ Transform input from tanh to original space. @@ -741,7 +742,7 @@ def tanh_to_original( # --------------------------------------------------------------------------------------------------- LABELS OPERATIONS -def to_categorical(labels: Union[np.ndarray, List[float]], nb_classes: Optional[int] = None) -> np.ndarray: +def to_categorical(labels: Union[np.ndarray, list[float]], nb_classes: int | None = None) -> np.ndarray: """ Convert an array of labels to binary class matrix. @@ -757,7 +758,7 @@ def to_categorical(labels: Union[np.ndarray, List[float]], nb_classes: Optional[ return categorical -def float_to_categorical(labels: np.ndarray, nb_classes: Optional[int] = None): +def float_to_categorical(labels: np.ndarray, nb_classes: int | None = None): """ Convert an array of floating point labels to binary class matrix. @@ -786,7 +787,7 @@ def floats_to_one_hot(labels: np.ndarray): :rtype: `np.ndarray` """ labels = np.array(labels) - for feature in labels.T: # pylint: disable=E1133 + for feature in labels.T: unique = np.unique(feature) unique.sort() for index, value in enumerate(unique): @@ -795,7 +796,7 @@ def floats_to_one_hot(labels: np.ndarray): def check_and_transform_label_format( - labels: np.ndarray, nb_classes: Optional[int], return_one_hot: bool = True + labels: np.ndarray, nb_classes: int | None, return_one_hot: bool = True ) -> np.ndarray: """ Check label format and transform to one-hot-encoded labels if necessary @@ -895,7 +896,7 @@ def second_most_likely_class(x: np.ndarray, classifier: "CLASSIFIER_TYPE") -> np ) -def get_label_conf(y_vec: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: +def get_label_conf(y_vec: np.ndarray) -> tuple[np.ndarray, np.ndarray]: """ Returns the confidence and the label of the most probable class given a vector of class confidences @@ -944,7 +945,7 @@ def get_feature_values(x: np.ndarray, single_index_feature: bool) -> list: return values -def get_feature_index(feature: Union[int, slice]) -> Union[int, slice]: +def get_feature_index(feature: int | slice) -> int | slice: """ Returns a modified feature index: in case of a slice of size 1, returns the corresponding integer. In case of a slice with missing params, tries to fill them. Otherwise, returns the same value (integer or slice) as passed. @@ -968,7 +969,7 @@ def get_feature_index(feature: Union[int, slice]) -> Union[int, slice]: return slice(start, stop, step) -def remove_attacked_feature(attack_feature: Union[int, slice], non_numerical_features: Optional[List[int]]): +def remove_attacked_feature(attack_feature: int | slice, non_numerical_features: list[int] | None): """ Removes the attacked feature from the list of non-numeric features to encode. @@ -1044,7 +1045,7 @@ def compute_success( return np.sum(attack_success) / x_adv.shape[0] -def compute_accuracy(preds: np.ndarray, labels: np.ndarray, abstain: bool = True) -> Tuple[float, float]: +def compute_accuracy(preds: np.ndarray, labels: np.ndarray, abstain: bool = True) -> tuple[float, float]: """ Compute the accuracy rate and coverage rate of predictions In the case where predictions are abstained, those samples are ignored. @@ -1115,12 +1116,12 @@ def intersection_over_area(bbox_1: np.ndarray, bbox_2: np.ndarray) -> float: def non_maximum_suppression( - preds: Dict[str, np.ndarray], iou_threshold: float, confidence_threshold: Optional[float] = None -) -> Dict[str, np.ndarray]: + preds: dict[str, np.ndarray], iou_threshold: float, confidence_threshold: float | None = None +) -> dict[str, np.ndarray]: """ Perform non-maximum suppression on the predicted object detection labels of a single image. - :param preds: Predicted labels of format `Dict[str, np.ndarray]` for a single image. The fields of the Dict are + :param preds: Predicted labels of format `dict[str, np.ndarray]` for a single image. The fields of the dict are as follows: - boxes [N, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. @@ -1190,7 +1191,7 @@ def load_cifar10( :return: `(x_train, y_train), (x_test, y_test), min, max` """ - def load_batch(fpath: str) -> Tuple[np.ndarray, np.ndarray]: + def load_batch(fpath: str) -> tuple[np.ndarray, np.ndarray]: """ Utility function for loading CIFAR batches, as written in Keras. @@ -1564,7 +1565,7 @@ def _extract(full_path: str, path: str) -> bool: archive = tarfile.open(full_path, "r:gz") elif full_path.endswith("zip"): # pragma: no cover if zipfile.is_zipfile(full_path): - archive = zipfile.ZipFile(full_path) # pylint: disable=R1732 + archive = zipfile.ZipFile(full_path) # pylint: disable=consider-using-with else: return False else: @@ -1583,7 +1584,7 @@ def _extract(full_path: str, path: str) -> bool: def get_file( - filename: str, url: Union[str, List[str]], path: Optional[str] = None, extract: bool = False, verbose: bool = False + filename: str, url: Union[str, list[str]], path: str | None = None, extract: bool = False, verbose: bool = False ) -> str: """ Downloads a file from a URL if it not already in the cache. The file at indicated by `url` is downloaded to the @@ -1633,14 +1634,14 @@ def get_file( # [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:847) import ssl - ssl._create_default_https_context = ssl._create_unverified_context # pylint: disable=W0212 + ssl._create_default_https_context = ssl._create_unverified_context if verbose: with tqdm() as t_bar: - # pylint: disable=W0640 + # pylint: disable=cell-var-from-loop last_block = [0] - def progress_bar(blocks: int = 1, block_size: int = 1, total_size: Optional[int] = None): + def progress_bar(blocks: int = 1, block_size: int = 1, total_size: int | None = None): """ :param blocks: Number of blocks transferred so far [default: 1]. :param block_size: Size of each block (in tqdm units) [default: 1]. @@ -1684,7 +1685,7 @@ def make_directory(dir_path: str) -> None: os.makedirs(dir_path) -def clip_and_round(x: np.ndarray, clip_values: Optional["CLIP_VALUES_TYPE"], round_samples: float) -> np.ndarray: +def clip_and_round(x: np.ndarray, clip_values: "CLIP_VALUES_TYPE" | None, round_samples: float) -> np.ndarray: """ Rounds the input to the correct level of granularity. Useful to ensure data passed to classifier can be represented @@ -1709,8 +1710,8 @@ def preprocess( x: np.ndarray, y: np.ndarray, nb_classes: int = 10, - clip_values: Optional["CLIP_VALUES_TYPE"] = None, -) -> Tuple[np.ndarray, np.ndarray]: + clip_values: "CLIP_VALUES_TYPE" | None = None, +) -> tuple[np.ndarray, np.ndarray]: """ Scales `x` to [0, 1] and converts `y` to class categorical confidences. @@ -1732,7 +1733,7 @@ def preprocess( return normalized_x, categorical_y -def segment_by_class(data: Union[np.ndarray, List[int]], classes: np.ndarray, num_classes: int) -> List[np.ndarray]: +def segment_by_class(data: Union[np.ndarray, list[int]], classes: np.ndarray, num_classes: int) -> list[np.ndarray]: """ Returns segmented data according to specified features. @@ -1742,7 +1743,7 @@ def segment_by_class(data: Union[np.ndarray, List[int]], classes: np.ndarray, nu :param num_classes: How many features. :return: Segmented data according to specified features. """ - by_class: List[List[int]] = [[] for _ in range(num_classes)] + by_class: list[list[int]] = [[] for _ in range(num_classes)] for indx, feature in enumerate(classes): if len(classes.shape) == 2 and classes.shape[1] > 1: assigned = int(np.argmax(feature)) @@ -1832,7 +1833,7 @@ def is_probability_array(array: np.ndarray) -> bool: return is_sum_1 and is_smaller_1 and is_larger_0 -def pad_sequence_input(x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: +def pad_sequence_input(x: np.ndarray) -> tuple[np.ndarray, np.ndarray]: """ Apply padding to a batch of 1-dimensional samples such that it has shape of (batch_size, max_length). diff --git a/art/visualization.py b/art/visualization.py index 7cae41fb30..5975a76333 100644 --- a/art/visualization.py +++ b/art/visualization.py @@ -18,11 +18,11 @@ """ Module providing visualization functions. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging import os.path -from typing import List, Optional, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -111,7 +111,7 @@ def save_image(image_array: np.ndarray, f_name: str) -> None: def plot_3d( points: np.ndarray, labels: np.ndarray, - colors: Optional[List[str]] = None, + colors: list[str] | None = None, save: bool = True, f_name: str = "", ) -> "matplotlib.figure.Figure": # pragma: no cover @@ -130,7 +130,7 @@ def plot_3d( :return: A figure object. """ # Disable warnings of unused import because all imports in this block are required - # pylint: disable=W0611 + # pylint: disable=unused-import # import matplotlib import matplotlib.pyplot as plt diff --git a/examples/adversarial_training_cifar10.py b/examples/adversarial_training_cifar10.py index 4cced09113..827659e21c 100644 --- a/examples/adversarial_training_cifar10.py +++ b/examples/adversarial_training_cifar10.py @@ -3,6 +3,7 @@ Trains a convolutional neural network on the CIFAR-10 dataset, then generated adversarial images using the DeepFool attack and retrains the network on the training set augmented with the adversarial images. """ +# noqa: E402 from __future__ import absolute_import, division, print_function, unicode_literals import logging diff --git a/examples/inverse_gan_author_utils.py b/examples/inverse_gan_author_utils.py index 7924caac8a..48f0a616a6 100644 --- a/examples/inverse_gan_author_utils.py +++ b/examples/inverse_gan_author_utils.py @@ -773,7 +773,7 @@ def _initialize_saver(self, prefixes=None, force=False, max_to_keep=5): if self.saver is not None and not force: return else: - if prefixes is None or not (type(prefixes) != list or type(prefixes) != tuple): + if prefixes is None or not (type(prefixes) != list or type(prefixes) != tuple): # noqa: E721 raise ValueError("Prefix of variables that needs saving are not defined") prefixes_str = "" diff --git a/pyproject.toml b/pyproject.toml index 42c0f19f31..d07b8ba895 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,3 +3,5 @@ line-length=120 [tool.ruff] exclude = [".venv", "contrib"] lint.ignore = ["F401"] +[tool.ruff.lint.per-file-ignores] + "examples/*" = ["E402"] diff --git a/tests/attacks/test_functionally_equivalent_extraction.py b/tests/attacks/test_functionally_equivalent_extraction.py index 8ad7209925..91a34510b2 100644 --- a/tests/attacks/test_functionally_equivalent_extraction.py +++ b/tests/attacks/test_functionally_equivalent_extraction.py @@ -18,23 +18,22 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging +from os.path import dirname, join import unittest -from os.path import dirname, join import numpy as np - import tensorflow as tf tf.compat.v1.disable_eager_execution() -from tensorflow.keras.models import load_model +from tensorflow.keras.models import load_model # noqa: E402 -from art.attacks.extraction.functionally_equivalent_extraction import FunctionallyEquivalentExtraction -from art.estimators.classification.keras import KerasClassifier -from art.estimators.estimator import BaseEstimator, NeuralNetworkMixin -from art.estimators.classification.classifier import ClassifierMixin +from art.attacks.extraction.functionally_equivalent_extraction import FunctionallyEquivalentExtraction # noqa: E402 +from art.estimators.classification.keras import KerasClassifier # noqa: E402 +from art.estimators.estimator import BaseEstimator, NeuralNetworkMixin # noqa: E402 +from art.estimators.classification.classifier import ClassifierMixin # noqa: E402 -from tests.utils import TestBase, master_seed -from tests.attacks.utils import backend_test_classifier_type_check_fail +from tests.utils import TestBase, master_seed # noqa: E402 +from tests.attacks.utils import backend_test_classifier_type_check_fail # noqa: E402 logger = logging.getLogger(__name__) From d4d4e5b01349ba2313786db24d189846aa44172a Mon Sep 17 00:00:00 2001 From: Beat Buesser Date: Tue, 6 Aug 2024 01:18:18 +0200 Subject: [PATCH 06/27] Fix warnings introduced by upgrades Signed-off-by: Beat Buesser --- .../evasion/adversarial_patch/adversarial_patch.py | 2 +- art/attacks/evasion/brendel_bethge.py | 3 ++- art/attacks/evasion/laser_attack/laser_attack.py | 4 ++-- art/estimators/certification/deep_z/pytorch.py | 4 +++- .../certification/derandomized_smoothing/pytorch.py | 8 ++++---- art/estimators/classification/blackbox.py | 4 ++-- art/estimators/keras.py | 2 +- art/estimators/regression/keras.py | 3 ++- art/estimators/speech_recognition/tensorflow_lingvo.py | 2 +- 9 files changed, 18 insertions(+), 14 deletions(-) diff --git a/art/attacks/evasion/adversarial_patch/adversarial_patch.py b/art/attacks/evasion/adversarial_patch/adversarial_patch.py index 3e0cb0a9ce..0c7ef6c933 100644 --- a/art/attacks/evasion/adversarial_patch/adversarial_patch.py +++ b/art/attacks/evasion/adversarial_patch/adversarial_patch.py @@ -71,7 +71,7 @@ def __init__( learning_rate: float = 5.0, max_iter: int = 500, batch_size: int = 16, - patch_shape: tuple[int | int | int] | None = None, + patch_shape: tuple[int, int, int] | None = None, targeted: bool = True, verbose: bool = True, ): diff --git a/art/attacks/evasion/brendel_bethge.py b/art/attacks/evasion/brendel_bethge.py index 69ed3f7ef3..bdf1622db0 100644 --- a/art/attacks/evasion/brendel_bethge.py +++ b/art/attacks/evasion/brendel_bethge.py @@ -44,7 +44,8 @@ """ from __future__ import annotations -# pylint: disable=invalid-name,missing-class-docstring,missing-function-docstring,old-non-ascii-name,unused-variable,unused-argument,chained-comparison,no-else-return,no-else-break,no-else-raise +# pylint: disable=invalid-name,missing-class-docstring,missing-function-docstring,old-non-ascii-name,unused-variable +# pylint: disable=unused-argument,chained-comparison,no-else-return,no-else-break,no-else-raise from typing import TYPE_CHECKING import logging diff --git a/art/attacks/evasion/laser_attack/laser_attack.py b/art/attacks/evasion/laser_attack/laser_attack.py index 60ab83c6fa..9eed01e1ea 100644 --- a/art/attacks/evasion/laser_attack/laser_attack.py +++ b/art/attacks/evasion/laser_attack/laser_attack.py @@ -133,7 +133,7 @@ def generate_parameters( def _generate_params_for_single_input( self, x: np.ndarray, y: int | None = None - ) -> tuple[tuple[AdversarialObject | None, int | None]]: + ) -> tuple[AdversarialObject | None, int | None]: """ Generate adversarial example params for a single image. @@ -170,7 +170,7 @@ def _check_params(self) -> None: def _attack_single_image( self, x: np.ndarray, y: int, confidence: float - ) -> tuple[tuple[AdversarialObject | None, int | None]]: + ) -> tuple[AdversarialObject | None, int | None]: """ Attack particular image with given class. diff --git a/art/estimators/certification/deep_z/pytorch.py b/art/estimators/certification/deep_z/pytorch.py index daeae79157..47536a1191 100644 --- a/art/estimators/certification/deep_z/pytorch.py +++ b/art/estimators/certification/deep_z/pytorch.py @@ -131,7 +131,9 @@ def forward( raise ValueError("for abstract forward mode, please provide both cent and eps") raise ValueError("forward_mode must be set to abstract or concrete") - def abstract_forward(self, cent: np.ndarray, eps: np.ndarray) -> tuple["torch.Tensor", "torch.Tensor"]: + def abstract_forward( + self, cent: np.ndarray, eps: np.ndarray + ) -> tuple["torch.Tensor", "torch.Tensor"]: # typing: ignore """ Do the forward pass through the NN with the given error terms and zonotope center. diff --git a/art/estimators/certification/derandomized_smoothing/pytorch.py b/art/estimators/certification/derandomized_smoothing/pytorch.py index d250d64fbe..0597de42d4 100644 --- a/art/estimators/certification/derandomized_smoothing/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/pytorch.py @@ -60,12 +60,12 @@ class PyTorchDeRandomizedSmoothing(DeRandomizedSmoothingMixin, PyTorchClassifier): """ - Interface class for the two De-randomized smoothing approaches supported by ART for pytorch. + Interface class for the two De-randomized smoothing approaches supported by ART for PyTorch. If a regular pytorch neural network is fed in then (De)Randomized Smoothing as introduced in Levine et al. (2020) is used. - Otherwise, if a timm vision transfomer is fed in then Certified Patch Robustness via Smoothed Vision Transformers + Otherwise, if a timm vision transformer is fed in then Certified Patch Robustness via Smoothed Vision Transformers as introduced in Salman et al. (2021) is used. """ @@ -155,7 +155,7 @@ def __init__( # temporarily assign the original method to tmp_func tmp_func = timm.models.vision_transformer._create_vision_transformer - # overrride with ART's ViT creation function + # override with ART's ViT creation function timm.models.vision_transformer._create_vision_transformer = self.create_vision_transformer if isinstance(model, str): model = timm.create_model( @@ -383,7 +383,7 @@ def get_models(cls, generate_from_null: bool = False) -> list[str]: _ = cls( model=model, loss=torch.nn.CrossEntropyLoss(), - optimizer=torch.optim.SGD, + optimizer=torch.optim.SGD, # type: ignore optimizer_params={"lr": 0.01}, input_shape=(3, 32, 32), nb_classes=10, diff --git a/art/estimators/classification/blackbox.py b/art/estimators/classification/blackbox.py index 88b1df27d4..cbeb69fa77 100644 --- a/art/estimators/classification/blackbox.py +++ b/art/estimators/classification/blackbox.py @@ -47,7 +47,7 @@ class BlackBoxClassifier(ClassifierMixin, BaseEstimator): def __init__( self, - predict_fn: Callable[np.ndarray, np.ndarray], + predict_fn: Callable[[np.ndarray], np.ndarray], input_shape: tuple[int, ...], nb_classes: int, clip_values: "CLIP_VALUES_TYPE" | None = None, @@ -175,7 +175,7 @@ class BlackBoxClassifierNeuralNetwork(NeuralNetworkMixin, ClassifierMixin, BaseE def __init__( self, - predict_fn: Callable[np.ndarray, np.ndarray], + predict_fn: Callable[[np.ndarray], np.ndarray], input_shape: tuple[int, ...], nb_classes: int, channels_first: bool = True, diff --git a/art/estimators/keras.py b/art/estimators/keras.py index 2815dbc25d..76f389f2c9 100644 --- a/art/estimators/keras.py +++ b/art/estimators/keras.py @@ -123,7 +123,7 @@ def clone_for_refitting( run_eagerly=self.model.run_eagerly, ) - clone = type(self)(model=model) + clone = type(self)(model=model, channels_first=self.channels_first) params = self.get_params() del params["model"] clone.set_params(**params) diff --git a/art/estimators/regression/keras.py b/art/estimators/regression/keras.py index 028c0ea97a..f83f85de61 100644 --- a/art/estimators/regression/keras.py +++ b/art/estimators/regression/keras.py @@ -28,6 +28,7 @@ Any, Iterator, TYPE_CHECKING, + Union, ) import numpy as np @@ -49,7 +50,7 @@ logger = logging.getLogger(__name__) -KERAS_MODEL_TYPE = "keras.models.Model" | "tf.keras.models.Model" +KERAS_MODEL_TYPE = Union["keras.models.Model", "tf.keras.models.Model"] # pylint: disable=C0103 class KerasRegressor(RegressorMixin, KerasEstimator): diff --git a/art/estimators/speech_recognition/tensorflow_lingvo.py b/art/estimators/speech_recognition/tensorflow_lingvo.py index 73e08a5ff1..6d9bf31be9 100644 --- a/art/estimators/speech_recognition/tensorflow_lingvo.py +++ b/art/estimators/speech_recognition/tensorflow_lingvo.py @@ -183,7 +183,7 @@ def __init__( self._model = model self._task = task self._cluster = cluster - self._metrics: tuple[dict[str, "Tensor"] | dict[str, tuple["Tensor", "Tensor"]] | ...] | None = None + self._metrics: tuple[dict[str, "Tensor"] | dict[str, tuple["Tensor", "Tensor"]], ...] | None = None # add prediction and loss gradient ops to graph self._predict_batch_op: dict[str, "Tensor"] = self._predict_batch( From ba0bbfa2144e876cfbfdd5d92ca1753394f1c7a6 Mon Sep 17 00:00:00 2001 From: Beat Buesser Date: Thu, 15 Aug 2024 15:28:33 +0200 Subject: [PATCH 07/27] Fix warnings introduced by upgrades Signed-off-by: Beat Buesser --- .../evasion/adversarial_patch/adversarial_patch_tensorflow.py | 2 +- art/attacks/poisoning/poisoning_attack_svm.py | 2 +- art/estimators/certification/deep_z/pytorch.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/art/attacks/evasion/adversarial_patch/adversarial_patch_tensorflow.py b/art/attacks/evasion/adversarial_patch/adversarial_patch_tensorflow.py index 80a773c0d2..4b67f73658 100644 --- a/art/attacks/evasion/adversarial_patch/adversarial_patch_tensorflow.py +++ b/art/attacks/evasion/adversarial_patch/adversarial_patch_tensorflow.py @@ -78,7 +78,7 @@ def __init__( learning_rate: float = 5.0, max_iter: int = 500, batch_size: int = 16, - patch_shape: tuple[int | int | int] | None = None, + patch_shape: tuple[int, int, int] | None = None, optimizer: str = "Adam", targeted: bool = True, summary_writer: str | bool | SummaryWriter = False, diff --git a/art/attacks/poisoning/poisoning_attack_svm.py b/art/attacks/poisoning/poisoning_attack_svm.py index 2b4fed5a6b..9ab3f629d4 100644 --- a/art/attacks/poisoning/poisoning_attack_svm.py +++ b/art/attacks/poisoning/poisoning_attack_svm.py @@ -18,7 +18,7 @@ """ This module implements poisoning attacks on Support Vector Machines. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging diff --git a/art/estimators/certification/deep_z/pytorch.py b/art/estimators/certification/deep_z/pytorch.py index 47536a1191..f42310da19 100644 --- a/art/estimators/certification/deep_z/pytorch.py +++ b/art/estimators/certification/deep_z/pytorch.py @@ -113,7 +113,7 @@ def forward_hook(input_module, hook_input, hook_output): def forward( self, cent: np.ndarray, eps: np.ndarray | None = None - ) -> "torch.Tensor" | tuple["torch.Tensor" | "torch.Tensor"]: + ) -> "torch.Tensor" | tuple["torch.Tensor", "torch.Tensor"]: """ Performs the neural network forward pass, either using abstract operations or concrete ones depending on the value of self.forward_mode From 584d0369672469306cb11e1e9702e68ec28fdca8 Mon Sep 17 00:00:00 2001 From: Beat Buesser Date: Thu, 15 Aug 2024 16:06:34 +0200 Subject: [PATCH 08/27] Update future imports Signed-off-by: Beat Buesser --- art/attacks/evasion/adversarial_asr.py | 2 +- art/attacks/evasion/carlini.py | 2 +- art/attacks/evasion/composite_adversarial_attack.py | 2 +- art/attacks/evasion/deepfool.py | 2 +- art/attacks/evasion/elastic_net.py | 2 +- art/attacks/evasion/frame_saliency.py | 2 +- art/attacks/evasion/hclu.py | 2 +- art/attacks/evasion/newtonfool.py | 2 +- art/attacks/evasion/overload/overload.py | 2 +- art/attacks/evasion/saliency_map.py | 2 +- art/attacks/evasion/simba.py | 2 +- art/attacks/evasion/virtual_adversarial.py | 2 +- art/attacks/evasion/wasserstein.py | 2 +- art/attacks/evasion/zoo.py | 2 +- art/attacks/extraction/copycat_cnn.py | 2 +- art/attacks/extraction/knockoff_nets.py | 2 +- art/attacks/inference/attribute_inference/meminf_based.py | 2 +- .../inference/membership_inference/black_box_rule_based.py | 2 +- art/attacks/inference/model_inversion/mi_face.py | 2 +- art/attacks/inference/reconstruction/white_box.py | 2 +- .../poisoning/backdoor_attack_dgm/backdoor_attack_dgm_trail.py | 2 +- art/attacks/poisoning/bad_det/bad_det_gma.py | 2 +- art/attacks/poisoning/bad_det/bad_det_oda.py | 2 +- art/attacks/poisoning/bad_det/bad_det_oga.py | 2 +- art/attacks/poisoning/bad_det/bad_det_rma.py | 2 +- art/attacks/poisoning/gradient_matching_attack.py | 2 +- art/defences/detector/evasion/binary_input_detector.py | 2 +- art/defences/detector/evasion/evasion_detector.py | 2 +- art/defences/detector/poison/clustering_analyzer.py | 2 +- art/defences/detector/poison/ground_truth_evaluator.py | 2 +- art/defences/detector/poison/poison_filtering_defence.py | 2 +- art/defences/detector/poison/provenance_defense.py | 2 +- art/defences/detector/poison/spectral_signature_defense.py | 2 +- art/defences/postprocessor/postprocessor.py | 2 +- art/defences/preprocessor/cutmix/cutmix.py | 2 +- art/defences/preprocessor/cutmix/cutmix_pytorch.py | 2 +- art/defences/preprocessor/cutout/cutout.py | 2 +- art/defences/preprocessor/cutout/cutout_pytorch.py | 2 +- art/defences/preprocessor/feature_squeezing.py | 2 +- art/defences/preprocessor/jpeg_compression.py | 2 +- art/defences/preprocessor/label_smoothing.py | 2 +- art/defences/preprocessor/mixup/mixup.py | 2 +- art/defences/preprocessor/mixup/mixup_pytorch.py | 2 +- art/defences/preprocessor/mp3_compression.py | 2 +- art/defences/preprocessor/mp3_compression_pytorch.py | 2 +- art/defences/preprocessor/resample.py | 3 ++- art/defences/preprocessor/thermometer_encoding.py | 2 +- art/defences/preprocessor/video_compression.py | 2 +- art/defences/preprocessor/video_compression_pytorch.py | 2 +- art/defences/trainer/trainer.py | 2 +- art/defences/transformer/evasion/defensive_distillation.py | 2 +- art/defences/transformer/poisoning/strip.py | 2 +- art/defences/transformer/transformer.py | 2 +- art/estimators/certification/abstain.py | 2 +- .../certification/derandomized_smoothing/derandomized.py | 2 +- art/estimators/certification/object_seeker/object_seeker.py | 2 +- art/estimators/keras.py | 2 +- art/estimators/mxnet.py | 2 +- art/preprocessing/standardisation_mean_std/utils.py | 1 + 59 files changed, 60 insertions(+), 58 deletions(-) diff --git a/art/attacks/evasion/adversarial_asr.py b/art/attacks/evasion/adversarial_asr.py index 71a5408250..0c7d4d94b7 100644 --- a/art/attacks/evasion/adversarial_asr.py +++ b/art/attacks/evasion/adversarial_asr.py @@ -21,7 +21,7 @@ | Paper link: https://arxiv.org/abs/1801.01944 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging from typing import TYPE_CHECKING diff --git a/art/attacks/evasion/carlini.py b/art/attacks/evasion/carlini.py index a711126d72..53106f043e 100644 --- a/art/attacks/evasion/carlini.py +++ b/art/attacks/evasion/carlini.py @@ -25,7 +25,7 @@ | Paper link: https://arxiv.org/abs/1608.04644 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging from typing import TYPE_CHECKING diff --git a/art/attacks/evasion/composite_adversarial_attack.py b/art/attacks/evasion/composite_adversarial_attack.py index 50fb79faee..a72a620135 100644 --- a/art/attacks/evasion/composite_adversarial_attack.py +++ b/art/attacks/evasion/composite_adversarial_attack.py @@ -23,7 +23,7 @@ | Paper link: https://arxiv.org/abs/2202.04235 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging diff --git a/art/attacks/evasion/deepfool.py b/art/attacks/evasion/deepfool.py index 14da64d8f1..ee385a6783 100644 --- a/art/attacks/evasion/deepfool.py +++ b/art/attacks/evasion/deepfool.py @@ -20,7 +20,7 @@ | Paper link: https://arxiv.org/abs/1511.04599 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging from typing import TYPE_CHECKING diff --git a/art/attacks/evasion/elastic_net.py b/art/attacks/evasion/elastic_net.py index 7c20b322e8..cca14d9586 100644 --- a/art/attacks/evasion/elastic_net.py +++ b/art/attacks/evasion/elastic_net.py @@ -20,7 +20,7 @@ | Paper link: https://arxiv.org/abs/1709.04114 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging from typing import TYPE_CHECKING diff --git a/art/attacks/evasion/frame_saliency.py b/art/attacks/evasion/frame_saliency.py index b5b28fc55a..941ac78e01 100644 --- a/art/attacks/evasion/frame_saliency.py +++ b/art/attacks/evasion/frame_saliency.py @@ -21,7 +21,7 @@ | Paper link: https://arxiv.org/abs/1811.11875 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging from typing import TYPE_CHECKING diff --git a/art/attacks/evasion/hclu.py b/art/attacks/evasion/hclu.py index fff41d2df1..efe626b5ec 100644 --- a/art/attacks/evasion/hclu.py +++ b/art/attacks/evasion/hclu.py @@ -20,7 +20,7 @@ | Paper link: https://arxiv.org/abs/1812.02606 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import copy import logging diff --git a/art/attacks/evasion/newtonfool.py b/art/attacks/evasion/newtonfool.py index 9187f76028..2e1416c2e7 100644 --- a/art/attacks/evasion/newtonfool.py +++ b/art/attacks/evasion/newtonfool.py @@ -20,7 +20,7 @@ | Paper link: http://doi.acm.org/10.1145/3134600.3134635 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging from typing import TYPE_CHECKING diff --git a/art/attacks/evasion/overload/overload.py b/art/attacks/evasion/overload/overload.py index 19da29451c..0002e8c030 100644 --- a/art/attacks/evasion/overload/overload.py +++ b/art/attacks/evasion/overload/overload.py @@ -20,7 +20,7 @@ | Paper link: https://arxiv.org/abs/2304.05370 """ - +from __future__ import annotations import logging from typing import TYPE_CHECKING diff --git a/art/attacks/evasion/saliency_map.py b/art/attacks/evasion/saliency_map.py index 9322775b1f..896bbc296b 100644 --- a/art/attacks/evasion/saliency_map.py +++ b/art/attacks/evasion/saliency_map.py @@ -20,7 +20,7 @@ | Paper link: https://arxiv.org/abs/1511.07528 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging from typing import TYPE_CHECKING diff --git a/art/attacks/evasion/simba.py b/art/attacks/evasion/simba.py index bb4fac4c02..8eb793cd46 100644 --- a/art/attacks/evasion/simba.py +++ b/art/attacks/evasion/simba.py @@ -20,7 +20,7 @@ | Paper link: https://arxiv.org/abs/1905.07121 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging from typing import TYPE_CHECKING diff --git a/art/attacks/evasion/virtual_adversarial.py b/art/attacks/evasion/virtual_adversarial.py index 2441289627..4b3f17460a 100644 --- a/art/attacks/evasion/virtual_adversarial.py +++ b/art/attacks/evasion/virtual_adversarial.py @@ -20,7 +20,7 @@ | Paper link: https://arxiv.org/abs/1507.00677 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging from typing import TYPE_CHECKING diff --git a/art/attacks/evasion/wasserstein.py b/art/attacks/evasion/wasserstein.py index c25d376ddc..9ac9abf065 100644 --- a/art/attacks/evasion/wasserstein.py +++ b/art/attacks/evasion/wasserstein.py @@ -20,7 +20,7 @@ | Paper link: https://arxiv.org/abs/1902.07906 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging from typing import TYPE_CHECKING diff --git a/art/attacks/evasion/zoo.py b/art/attacks/evasion/zoo.py index cc543edc94..9686b93665 100644 --- a/art/attacks/evasion/zoo.py +++ b/art/attacks/evasion/zoo.py @@ -22,7 +22,7 @@ | Paper link: https://arxiv.org/abs/1708.03999 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging from typing import Any, TYPE_CHECKING diff --git a/art/attacks/extraction/copycat_cnn.py b/art/attacks/extraction/copycat_cnn.py index 28fa392ab4..dc06f96bf0 100644 --- a/art/attacks/extraction/copycat_cnn.py +++ b/art/attacks/extraction/copycat_cnn.py @@ -20,7 +20,7 @@ | Paper link: https://arxiv.org/abs/1806.05476 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging from typing import TYPE_CHECKING diff --git a/art/attacks/extraction/knockoff_nets.py b/art/attacks/extraction/knockoff_nets.py index 8ba20bb668..5336e74e13 100644 --- a/art/attacks/extraction/knockoff_nets.py +++ b/art/attacks/extraction/knockoff_nets.py @@ -20,7 +20,7 @@ | Paper link: https://arxiv.org/abs/1812.02766 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging from typing import TYPE_CHECKING diff --git a/art/attacks/inference/attribute_inference/meminf_based.py b/art/attacks/inference/attribute_inference/meminf_based.py index f380f48f4c..a540164f78 100644 --- a/art/attacks/inference/attribute_inference/meminf_based.py +++ b/art/attacks/inference/attribute_inference/meminf_based.py @@ -18,7 +18,7 @@ """ This module implements attribute inference attacks using membership inference attacks. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging from typing import TYPE_CHECKING diff --git a/art/attacks/inference/membership_inference/black_box_rule_based.py b/art/attacks/inference/membership_inference/black_box_rule_based.py index a703accf67..1638763814 100644 --- a/art/attacks/inference/membership_inference/black_box_rule_based.py +++ b/art/attacks/inference/membership_inference/black_box_rule_based.py @@ -19,7 +19,7 @@ This module implements membership inference attacks. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging from typing import TYPE_CHECKING diff --git a/art/attacks/inference/model_inversion/mi_face.py b/art/attacks/inference/model_inversion/mi_face.py index 26a478e268..44a7c76ef4 100644 --- a/art/attacks/inference/model_inversion/mi_face.py +++ b/art/attacks/inference/model_inversion/mi_face.py @@ -20,7 +20,7 @@ | Paper link: https://dl.acm.org/doi/10.1145/2810103.2813677 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging from typing import TYPE_CHECKING diff --git a/art/attacks/inference/reconstruction/white_box.py b/art/attacks/inference/reconstruction/white_box.py index e15c663feb..fb4c9178f7 100644 --- a/art/attacks/inference/reconstruction/white_box.py +++ b/art/attacks/inference/reconstruction/white_box.py @@ -18,7 +18,7 @@ """ This module implements reconstruction attacks. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging diff --git a/art/attacks/poisoning/backdoor_attack_dgm/backdoor_attack_dgm_trail.py b/art/attacks/poisoning/backdoor_attack_dgm/backdoor_attack_dgm_trail.py index b7f9edb9ac..40de9df3a6 100644 --- a/art/attacks/poisoning/backdoor_attack_dgm/backdoor_attack_dgm_trail.py +++ b/art/attacks/poisoning/backdoor_attack_dgm/backdoor_attack_dgm_trail.py @@ -18,7 +18,7 @@ """ This module implements poisoning attacks on DGMs. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging from typing import TYPE_CHECKING diff --git a/art/attacks/poisoning/bad_det/bad_det_gma.py b/art/attacks/poisoning/bad_det/bad_det_gma.py index 1501094e81..a5d5d48f4d 100644 --- a/art/attacks/poisoning/bad_det/bad_det_gma.py +++ b/art/attacks/poisoning/bad_det/bad_det_gma.py @@ -20,7 +20,7 @@ | Paper link: https://arxiv.org/abs/2205.14497 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging diff --git a/art/attacks/poisoning/bad_det/bad_det_oda.py b/art/attacks/poisoning/bad_det/bad_det_oda.py index 1d144f14bb..065413cbbf 100644 --- a/art/attacks/poisoning/bad_det/bad_det_oda.py +++ b/art/attacks/poisoning/bad_det/bad_det_oda.py @@ -20,7 +20,7 @@ | Paper link: https://arxiv.org/abs/2205.14497 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging diff --git a/art/attacks/poisoning/bad_det/bad_det_oga.py b/art/attacks/poisoning/bad_det/bad_det_oga.py index ddb6385d06..4988619e4b 100644 --- a/art/attacks/poisoning/bad_det/bad_det_oga.py +++ b/art/attacks/poisoning/bad_det/bad_det_oga.py @@ -20,7 +20,7 @@ | Paper link: https://arxiv.org/abs/2205.14497 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging diff --git a/art/attacks/poisoning/bad_det/bad_det_rma.py b/art/attacks/poisoning/bad_det/bad_det_rma.py index 1e11deb847..42302294ae 100644 --- a/art/attacks/poisoning/bad_det/bad_det_rma.py +++ b/art/attacks/poisoning/bad_det/bad_det_rma.py @@ -20,7 +20,7 @@ | Paper link: https://arxiv.org/abs/2205.14497 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging diff --git a/art/attacks/poisoning/gradient_matching_attack.py b/art/attacks/poisoning/gradient_matching_attack.py index a90c4dff97..fa4ede5699 100644 --- a/art/attacks/poisoning/gradient_matching_attack.py +++ b/art/attacks/poisoning/gradient_matching_attack.py @@ -20,7 +20,7 @@ | Paper link: https://arxiv.org/abs/2009.02276 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging from typing import Any, TYPE_CHECKING diff --git a/art/defences/detector/evasion/binary_input_detector.py b/art/defences/detector/evasion/binary_input_detector.py index f1990238ca..35b0d3f1d0 100644 --- a/art/defences/detector/evasion/binary_input_detector.py +++ b/art/defences/detector/evasion/binary_input_detector.py @@ -19,7 +19,7 @@ Module containing different methods for the detection of adversarial examples. All models are considered to be binary detectors. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging from typing import TYPE_CHECKING diff --git a/art/defences/detector/evasion/evasion_detector.py b/art/defences/detector/evasion/evasion_detector.py index db6b82c970..0bdf77170b 100644 --- a/art/defences/detector/evasion/evasion_detector.py +++ b/art/defences/detector/evasion/evasion_detector.py @@ -18,7 +18,7 @@ """ This module implements the abstract base class for all evasion detectors. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import abc from typing import Any diff --git a/art/defences/detector/poison/clustering_analyzer.py b/art/defences/detector/poison/clustering_analyzer.py index ddef6184a2..cc6aaab479 100644 --- a/art/defences/detector/poison/clustering_analyzer.py +++ b/art/defences/detector/poison/clustering_analyzer.py @@ -18,7 +18,7 @@ """ This module implements methodologies to analyze clusters and determine whether they are poisonous. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging from typing import Any diff --git a/art/defences/detector/poison/ground_truth_evaluator.py b/art/defences/detector/poison/ground_truth_evaluator.py index 095f52b448..73a033631b 100644 --- a/art/defences/detector/poison/ground_truth_evaluator.py +++ b/art/defences/detector/poison/ground_truth_evaluator.py @@ -18,7 +18,7 @@ """ This module implements classes to evaluate the performance of poison detection methods. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import json import logging diff --git a/art/defences/detector/poison/poison_filtering_defence.py b/art/defences/detector/poison/poison_filtering_defence.py index 685e2d617d..a50d4c22c1 100644 --- a/art/defences/detector/poison/poison_filtering_defence.py +++ b/art/defences/detector/poison/poison_filtering_defence.py @@ -18,7 +18,7 @@ """ This module implements the abstract base class for all poison filtering defences. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import abc import sys diff --git a/art/defences/detector/poison/provenance_defense.py b/art/defences/detector/poison/provenance_defense.py index 41ffd32d85..c426a23c65 100644 --- a/art/defences/detector/poison/provenance_defense.py +++ b/art/defences/detector/poison/provenance_defense.py @@ -20,7 +20,7 @@ | Paper link: https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8473440 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging from copy import deepcopy diff --git a/art/defences/detector/poison/spectral_signature_defense.py b/art/defences/detector/poison/spectral_signature_defense.py index 53fa8f44f8..538f17edbc 100644 --- a/art/defences/detector/poison/spectral_signature_defense.py +++ b/art/defences/detector/poison/spectral_signature_defense.py @@ -23,7 +23,7 @@ | Please keep in mind the limitations of defenses. For more information on the limitations of this specific defense, see https://arxiv.org/abs/1905.13409 . """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations from typing import TYPE_CHECKING diff --git a/art/defences/postprocessor/postprocessor.py b/art/defences/postprocessor/postprocessor.py index 9603a5a2cb..a538e22db3 100644 --- a/art/defences/postprocessor/postprocessor.py +++ b/art/defences/postprocessor/postprocessor.py @@ -18,7 +18,7 @@ """ This module implements the abstract base class for defences that post-process classifier output. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import abc diff --git a/art/defences/preprocessor/cutmix/cutmix.py b/art/defences/preprocessor/cutmix/cutmix.py index 00d8a67f39..6450be74b9 100644 --- a/art/defences/preprocessor/cutmix/cutmix.py +++ b/art/defences/preprocessor/cutmix/cutmix.py @@ -24,7 +24,7 @@ see https://arxiv.org/abs/1803.09868 . For details on how to evaluate classifier security in general, see https://arxiv.org/abs/1902.06705 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging diff --git a/art/defences/preprocessor/cutmix/cutmix_pytorch.py b/art/defences/preprocessor/cutmix/cutmix_pytorch.py index 3f549a90db..471620431c 100644 --- a/art/defences/preprocessor/cutmix/cutmix_pytorch.py +++ b/art/defences/preprocessor/cutmix/cutmix_pytorch.py @@ -24,7 +24,7 @@ see https://arxiv.org/abs/1803.09868 . For details on how to evaluate classifier security in general, see https://arxiv.org/abs/1902.06705 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging from typing import TYPE_CHECKING diff --git a/art/defences/preprocessor/cutout/cutout.py b/art/defences/preprocessor/cutout/cutout.py index d8b9f3fca2..212785f82f 100644 --- a/art/defences/preprocessor/cutout/cutout.py +++ b/art/defences/preprocessor/cutout/cutout.py @@ -24,7 +24,7 @@ see https://arxiv.org/abs/1803.09868 . For details on how to evaluate classifier security in general, see https://arxiv.org/abs/1902.06705 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging diff --git a/art/defences/preprocessor/cutout/cutout_pytorch.py b/art/defences/preprocessor/cutout/cutout_pytorch.py index bbcebc86a8..8e36df3988 100644 --- a/art/defences/preprocessor/cutout/cutout_pytorch.py +++ b/art/defences/preprocessor/cutout/cutout_pytorch.py @@ -24,7 +24,7 @@ see https://arxiv.org/abs/1803.09868 . For details on how to evaluate classifier security in general, see https://arxiv.org/abs/1902.06705 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging from typing import TYPE_CHECKING diff --git a/art/defences/preprocessor/feature_squeezing.py b/art/defences/preprocessor/feature_squeezing.py index b4467499ef..ea27e328bb 100644 --- a/art/defences/preprocessor/feature_squeezing.py +++ b/art/defences/preprocessor/feature_squeezing.py @@ -24,7 +24,7 @@ https://arxiv.org/abs/1803.09868 . For details on how to evaluate classifier security in general, see https://arxiv.org/abs/1902.06705 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging diff --git a/art/defences/preprocessor/jpeg_compression.py b/art/defences/preprocessor/jpeg_compression.py index 157de04ca5..5a947022cb 100644 --- a/art/defences/preprocessor/jpeg_compression.py +++ b/art/defences/preprocessor/jpeg_compression.py @@ -24,7 +24,7 @@ https://arxiv.org/abs/1802.00420 . For details on how to evaluate classifier security in general, see https://arxiv.org/abs/1902.06705 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations from io import BytesIO import logging diff --git a/art/defences/preprocessor/label_smoothing.py b/art/defences/preprocessor/label_smoothing.py index 6ceae3de55..f990203adc 100644 --- a/art/defences/preprocessor/label_smoothing.py +++ b/art/defences/preprocessor/label_smoothing.py @@ -24,7 +24,7 @@ | Please keep in mind the limitations of defences. For details on how to evaluate classifier security in general, see https://arxiv.org/abs/1902.06705 . """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging diff --git a/art/defences/preprocessor/mixup/mixup.py b/art/defences/preprocessor/mixup/mixup.py index ea4bf9b1d6..2749b1ba81 100644 --- a/art/defences/preprocessor/mixup/mixup.py +++ b/art/defences/preprocessor/mixup/mixup.py @@ -24,7 +24,7 @@ see https://arxiv.org/abs/1803.09868 . For details on how to evaluate classifier security in general, see https://arxiv.org/abs/1902.06705 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging diff --git a/art/defences/preprocessor/mixup/mixup_pytorch.py b/art/defences/preprocessor/mixup/mixup_pytorch.py index feb9ba08f9..c908263557 100644 --- a/art/defences/preprocessor/mixup/mixup_pytorch.py +++ b/art/defences/preprocessor/mixup/mixup_pytorch.py @@ -24,7 +24,7 @@ see https://arxiv.org/abs/1803.09868 . For details on how to evaluate classifier security in general, see https://arxiv.org/abs/1902.06705 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging from typing import TYPE_CHECKING diff --git a/art/defences/preprocessor/mp3_compression.py b/art/defences/preprocessor/mp3_compression.py index 7fc5280079..b1b86bdb09 100644 --- a/art/defences/preprocessor/mp3_compression.py +++ b/art/defences/preprocessor/mp3_compression.py @@ -23,7 +23,7 @@ | Please keep in mind the limitations of defences. For details on how to evaluate classifier security in general, see https://arxiv.org/abs/1902.06705. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging from io import BytesIO diff --git a/art/defences/preprocessor/mp3_compression_pytorch.py b/art/defences/preprocessor/mp3_compression_pytorch.py index def6629658..50f772119f 100644 --- a/art/defences/preprocessor/mp3_compression_pytorch.py +++ b/art/defences/preprocessor/mp3_compression_pytorch.py @@ -21,7 +21,7 @@ | Please keep in mind the limitations of defences. For details on how to evaluate classifier security in general, see https://arxiv.org/abs/1902.06705. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging from typing import TYPE_CHECKING diff --git a/art/defences/preprocessor/resample.py b/art/defences/preprocessor/resample.py index 0b8b7324cb..cda43f5090 100644 --- a/art/defences/preprocessor/resample.py +++ b/art/defences/preprocessor/resample.py @@ -23,8 +23,9 @@ | Please keep in mind the limitations of defences. For details on how to evaluate classifier security in general, see https://arxiv.org/abs/1902.06705. """ -import logging +from __future__ import annotations +import logging import numpy as np diff --git a/art/defences/preprocessor/thermometer_encoding.py b/art/defences/preprocessor/thermometer_encoding.py index e0038a42d4..aa28f19fe9 100644 --- a/art/defences/preprocessor/thermometer_encoding.py +++ b/art/defences/preprocessor/thermometer_encoding.py @@ -24,7 +24,7 @@ see https://arxiv.org/abs/1802.00420 . For details on how to evaluate classifier security in general, see https://arxiv.org/abs/1902.06705 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging from typing import TYPE_CHECKING diff --git a/art/defences/preprocessor/video_compression.py b/art/defences/preprocessor/video_compression.py index f19f29c483..14744f036c 100644 --- a/art/defences/preprocessor/video_compression.py +++ b/art/defences/preprocessor/video_compression.py @@ -21,7 +21,7 @@ | Please keep in mind the limitations of defences. For details on how to evaluate classifier security in general, see https://arxiv.org/abs/1902.06705. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging import os diff --git a/art/defences/preprocessor/video_compression_pytorch.py b/art/defences/preprocessor/video_compression_pytorch.py index 53f8a3bed8..7dbbd34880 100644 --- a/art/defences/preprocessor/video_compression_pytorch.py +++ b/art/defences/preprocessor/video_compression_pytorch.py @@ -21,7 +21,7 @@ | Please keep in mind the limitations of defences. For details on how to evaluate classifier security in general, see https://arxiv.org/abs/1902.06705. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging from typing import TYPE_CHECKING diff --git a/art/defences/trainer/trainer.py b/art/defences/trainer/trainer.py index 7e0ad326d5..bb6f66d7fe 100644 --- a/art/defences/trainer/trainer.py +++ b/art/defences/trainer/trainer.py @@ -18,7 +18,7 @@ """ This module implements the abstract base class for defences that adversarially train models. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import abc from typing import TYPE_CHECKING diff --git a/art/defences/transformer/evasion/defensive_distillation.py b/art/defences/transformer/evasion/defensive_distillation.py index 3c2f8eee26..2ba902b49b 100644 --- a/art/defences/transformer/evasion/defensive_distillation.py +++ b/art/defences/transformer/evasion/defensive_distillation.py @@ -20,7 +20,7 @@ | Paper link: https://arxiv.org/abs/1511.04508 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging from typing import TYPE_CHECKING diff --git a/art/defences/transformer/poisoning/strip.py b/art/defences/transformer/poisoning/strip.py index 45c7e60502..3ffdf57414 100644 --- a/art/defences/transformer/poisoning/strip.py +++ b/art/defences/transformer/poisoning/strip.py @@ -20,7 +20,7 @@ | Paper link: https://arxiv.org/abs/1902.06531 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging from typing import TYPE_CHECKING diff --git a/art/defences/transformer/transformer.py b/art/defences/transformer/transformer.py index da6419db5a..5ceb217b39 100644 --- a/art/defences/transformer/transformer.py +++ b/art/defences/transformer/transformer.py @@ -18,7 +18,7 @@ """ This module implements the abstract base class for defences that transform a classifier into a more robust classifier. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import abc from typing import TYPE_CHECKING diff --git a/art/estimators/certification/abstain.py b/art/estimators/certification/abstain.py index 78f8b004c6..1fea341443 100644 --- a/art/estimators/certification/abstain.py +++ b/art/estimators/certification/abstain.py @@ -19,7 +19,7 @@ This module implements a mixin to be added to classifier so that they may abstain from classification. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging diff --git a/art/estimators/certification/derandomized_smoothing/derandomized.py b/art/estimators/certification/derandomized_smoothing/derandomized.py index 9e2ee6ca0d..b7a3a0068b 100644 --- a/art/estimators/certification/derandomized_smoothing/derandomized.py +++ b/art/estimators/certification/derandomized_smoothing/derandomized.py @@ -23,7 +23,7 @@ | Paper link: https://arxiv.org/abs/2002.10733 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations from abc import ABC, abstractmethod import numpy as np diff --git a/art/estimators/certification/object_seeker/object_seeker.py b/art/estimators/certification/object_seeker/object_seeker.py index 5af59852af..7401121df8 100644 --- a/art/estimators/certification/object_seeker/object_seeker.py +++ b/art/estimators/certification/object_seeker/object_seeker.py @@ -42,7 +42,7 @@ | Paper link: https://arxiv.org/abs/2202.01811 """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import abc import logging diff --git a/art/estimators/keras.py b/art/estimators/keras.py index 76f389f2c9..22551e6dbb 100644 --- a/art/estimators/keras.py +++ b/art/estimators/keras.py @@ -18,7 +18,7 @@ """ This module implements the abstract estimator `KerasEstimator` for Keras models. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging diff --git a/art/estimators/mxnet.py b/art/estimators/mxnet.py index 7d7daf8c77..e293ef7969 100644 --- a/art/estimators/mxnet.py +++ b/art/estimators/mxnet.py @@ -18,7 +18,7 @@ """ This module implements the abstract estimator `MXEstimator` for MXNet Gluon models. """ -from __future__ import absolute_import, division, print_function, unicode_literals +from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging diff --git a/art/preprocessing/standardisation_mean_std/utils.py b/art/preprocessing/standardisation_mean_std/utils.py index 87c1392212..04fe15be87 100644 --- a/art/preprocessing/standardisation_mean_std/utils.py +++ b/art/preprocessing/standardisation_mean_std/utils.py @@ -18,6 +18,7 @@ """ This module implements utilities for standardisation with mean and standard deviation. """ +from __future__ import annotations from typing import TYPE_CHECKING From 35b9e094870b6a569c4791fce2920078eeea90e3 Mon Sep 17 00:00:00 2001 From: Beat Buesser Date: Thu, 15 Aug 2024 16:12:31 +0200 Subject: [PATCH 09/27] Add workflow for Python 3.11 and PyTorch 2.3 Signed-off-by: Beat Buesser --- .github/workflows/ci-pytorch.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/ci-pytorch.yml b/.github/workflows/ci-pytorch.yml index fd0f47675e..de0414c664 100644 --- a/.github/workflows/ci-pytorch.yml +++ b/.github/workflows/ci-pytorch.yml @@ -40,6 +40,12 @@ jobs: torch: 2.2.1 torchvision: 0.17.1+cpu torchaudio: 2.2.1 + - name: PyTorch 2.3.1 (Python 3.11) + framework: pytorch + python: '3.11' + torch: 2.3.1 + torchvision: 0.18.1+cpu + torchaudio: 2.3.1 name: ${{ matrix.name }} steps: From 15b55d19e42dbde4630e252a330203d1e80d7ae8 Mon Sep 17 00:00:00 2001 From: Beat Buesser Date: Thu, 15 Aug 2024 17:03:49 +0200 Subject: [PATCH 10/27] Fix new typing Signed-off-by: Beat Buesser --- .../expectation_over_transformation/pytorch.py | 2 ++ art/utils.py | 10 +++++----- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/art/preprocessing/expectation_over_transformation/pytorch.py b/art/preprocessing/expectation_over_transformation/pytorch.py index 209a2ed5f6..7d4f821e81 100644 --- a/art/preprocessing/expectation_over_transformation/pytorch.py +++ b/art/preprocessing/expectation_over_transformation/pytorch.py @@ -18,6 +18,8 @@ """ This module defines a base class for EoT in PyTorch. """ +from __future__ import annotations + from abc import abstractmethod import logging from typing import TYPE_CHECKING diff --git a/art/utils.py b/art/utils.py index fa12a24310..fa84c37a97 100644 --- a/art/utils.py +++ b/art/utils.py @@ -32,7 +32,7 @@ import zipfile from functools import wraps from inspect import signature -from typing import TYPE_CHECKING, Optional, Union +from typing import TYPE_CHECKING, Optional, Tuple, Union import numpy as np import six @@ -50,10 +50,10 @@ # ------------------------------------------------------------------------------------------------- CONSTANTS AND TYPES -DATASET_TYPE = tuple[ # pylint: disable=invalid-name - tuple[np.ndarray, np.ndarray], tuple[np.ndarray, np.ndarray], float, float +DATASET_TYPE = Tuple[ # pylint: disable=invalid-name + Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray], float, float ] -CLIP_VALUES_TYPE = tuple[Union[int, float, np.ndarray], Union[int, float, np.ndarray]] # pylint: disable=invalid-name +CLIP_VALUES_TYPE = Tuple[Union[int, float, np.ndarray], Union[int, float, np.ndarray]] # pylint: disable=invalid-name if TYPE_CHECKING: # pylint: disable=cyclic-import @@ -61,7 +61,7 @@ PREPROCESSING_TYPE = Optional[ # pylint: disable=invalid-name Union[ - tuple[Union[int, float, np.ndarray], Union[int, float, np.ndarray]], Preprocessor, tuple[Preprocessor, ...] + Tuple[Union[int, float, np.ndarray], Union[int, float, np.ndarray]], Preprocessor, Tuple[Preprocessor, ...] ] ] From 21197dc89bbefd47f8e84b5a99fe66de29f85567 Mon Sep 17 00:00:00 2001 From: Beat Buesser Date: Thu, 15 Aug 2024 17:19:24 +0200 Subject: [PATCH 11/27] Fix new typing Signed-off-by: Beat Buesser --- art/estimators/classification/pytorch.py | 1 - art/estimators/classification/scikitlearn.py | 1 - art/estimators/classification/tensorflow.py | 1 - art/estimators/encoding/encoder.py | 1 + art/estimators/generation/generator.py | 2 ++ art/estimators/object_detection/object_detector.py | 1 + art/estimators/object_tracking/object_tracker.py | 1 + art/estimators/pytorch.py | 2 ++ art/estimators/regression/pytorch.py | 1 - art/estimators/regression/regressor.py | 2 ++ art/estimators/speech_recognition/speech_recognizer.py | 2 ++ art/estimators/tensorflow.py | 2 ++ requirements_test.txt | 2 +- 13 files changed, 14 insertions(+), 5 deletions(-) diff --git a/art/estimators/classification/pytorch.py b/art/estimators/classification/pytorch.py index 48ef96fb4c..a14558f221 100644 --- a/art/estimators/classification/pytorch.py +++ b/art/estimators/classification/pytorch.py @@ -18,7 +18,6 @@ """ This module implements the classifier `PyTorchClassifier` for PyTorch models. """ - from __future__ import absolute_import, division, print_function, unicode_literals, annotations import copy diff --git a/art/estimators/classification/scikitlearn.py b/art/estimators/classification/scikitlearn.py index 0ba012ea05..b72cd94015 100644 --- a/art/estimators/classification/scikitlearn.py +++ b/art/estimators/classification/scikitlearn.py @@ -18,7 +18,6 @@ """ This module implements the classifiers for scikit-learn models. """ - from __future__ import absolute_import, division, print_function, unicode_literals, annotations from collections.abc import Callable diff --git a/art/estimators/classification/tensorflow.py b/art/estimators/classification/tensorflow.py index 272174066e..88629213b3 100644 --- a/art/estimators/classification/tensorflow.py +++ b/art/estimators/classification/tensorflow.py @@ -18,7 +18,6 @@ """ This module implements the classifier `TensorFlowClassifier` for TensorFlow models. """ - from __future__ import absolute_import, division, print_function, unicode_literals, annotations from collections.abc import Callable diff --git a/art/estimators/encoding/encoder.py b/art/estimators/encoding/encoder.py index b9c0225004..8be23c3790 100644 --- a/art/estimators/encoding/encoder.py +++ b/art/estimators/encoding/encoder.py @@ -18,6 +18,7 @@ """ This module implements mixin abstract base classes defining properties for all encoders in ART. """ +from __future__ import annotations import abc diff --git a/art/estimators/generation/generator.py b/art/estimators/generation/generator.py index 6d9dc004bf..62057bfd77 100644 --- a/art/estimators/generation/generator.py +++ b/art/estimators/generation/generator.py @@ -18,6 +18,8 @@ """ This module implements mixin abstract base classes defining properties for all generators in ART. """ +from __future__ import annotations + import abc diff --git a/art/estimators/object_detection/object_detector.py b/art/estimators/object_detection/object_detector.py index dbe43e8fa4..dbf78e0d42 100644 --- a/art/estimators/object_detection/object_detector.py +++ b/art/estimators/object_detection/object_detector.py @@ -18,6 +18,7 @@ """ This module implements mixin abstract base class for all object detectors in ART. """ +from __future__ import annotations from abc import ABC, abstractmethod diff --git a/art/estimators/object_tracking/object_tracker.py b/art/estimators/object_tracking/object_tracker.py index b0a9bc8e65..3ac2e2c1f2 100644 --- a/art/estimators/object_tracking/object_tracker.py +++ b/art/estimators/object_tracking/object_tracker.py @@ -18,6 +18,7 @@ """ This module implements mixin abstract base class for all object trackers in ART. """ +from __future__ import annotations from abc import ABC, abstractmethod diff --git a/art/estimators/pytorch.py b/art/estimators/pytorch.py index 970e24503c..cc264afef6 100644 --- a/art/estimators/pytorch.py +++ b/art/estimators/pytorch.py @@ -18,6 +18,8 @@ """ This module implements the abstract estimator `PyTorchEstimator` for PyTorch models. """ +from __future__ import annotations + import logging from typing import TYPE_CHECKING, Any diff --git a/art/estimators/regression/pytorch.py b/art/estimators/regression/pytorch.py index ba4486465b..ee96bdea8d 100644 --- a/art/estimators/regression/pytorch.py +++ b/art/estimators/regression/pytorch.py @@ -18,7 +18,6 @@ """ This module implements the regressor `PyTorchRegressor` for PyTorch models. """ - from __future__ import absolute_import, division, print_function, unicode_literals, annotations import copy diff --git a/art/estimators/regression/regressor.py b/art/estimators/regression/regressor.py index 9b6deb6c1d..3bf9fb3bc7 100644 --- a/art/estimators/regression/regressor.py +++ b/art/estimators/regression/regressor.py @@ -18,8 +18,10 @@ """ This module implements mixin abstract base class for all regressors in ART. """ +from __future__ import annotations from abc import ABC + from art.estimators.estimator import BaseEstimator diff --git a/art/estimators/speech_recognition/speech_recognizer.py b/art/estimators/speech_recognition/speech_recognizer.py index 0778a70eb6..17a710d098 100644 --- a/art/estimators/speech_recognition/speech_recognizer.py +++ b/art/estimators/speech_recognition/speech_recognizer.py @@ -19,6 +19,8 @@ This module implements mixin abstract base class and mixin abstract framework-specific classes for all speech recognizers in ART. """ +from __future__ import annotations + from abc import ABC, abstractmethod from typing import TYPE_CHECKING diff --git a/art/estimators/tensorflow.py b/art/estimators/tensorflow.py index 0a5aebfed7..664237d2e4 100644 --- a/art/estimators/tensorflow.py +++ b/art/estimators/tensorflow.py @@ -18,6 +18,8 @@ """ This module implements the abstract estimators `TensorFlowEstimator` and `TensorFlowV2Estimator` for TensorFlow models. """ +from __future__ import annotations + import logging from typing import Any, TYPE_CHECKING diff --git a/requirements_test.txt b/requirements_test.txt index 6e79d9130a..e9c7f64875 100644 --- a/requirements_test.txt +++ b/requirements_test.txt @@ -14,7 +14,7 @@ ffmpeg-python==0.2.0 cma==3.3.0 pandas==2.2.1 librosa==0.10.1 -numba~=0.56.4 +numba~=0.60.0 opencv-python sortedcontainers==2.4.0 h5py==3.10.0 From 897d2efb6a39a097c0ef6f562ccd710404a30354 Mon Sep 17 00:00:00 2001 From: Beat Buesser Date: Thu, 15 Aug 2024 17:48:52 +0200 Subject: [PATCH 12/27] Fix new typing Signed-off-by: Beat Buesser --- .github/workflows/ci-lingvo.yml | 2 +- art/defences/detector/evasion/subsetscanning/detector.py | 1 - art/defences/detector/evasion/subsetscanning/scanner.py | 2 ++ art/defences/detector/evasion/subsetscanning/scanningops.py | 2 ++ .../detector/evasion/subsetscanning/scoring_functions.py | 2 ++ art/defences/postprocessor/class_labels.py | 2 ++ art/defences/postprocessor/gaussian_noise.py | 2 ++ art/defences/postprocessor/high_confidence.py | 2 ++ art/defences/postprocessor/reverse_sigmoid.py | 2 ++ art/defences/postprocessor/rounded.py | 2 ++ 10 files changed, 17 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci-lingvo.yml b/.github/workflows/ci-lingvo.yml index cb9d8edfb9..3964a137a7 100644 --- a/.github/workflows/ci-lingvo.yml +++ b/.github/workflows/ci-lingvo.yml @@ -50,7 +50,7 @@ jobs: sudo apt-get update sudo apt-get -y -q install ffmpeg libavcodec-extra python -m pip install --upgrade pip setuptools wheel - pip install -q -r <(sed '/^scipy/d;/^matplotlib/d;/^pandas/d;/^statsmodels/d;/^numba/d;/^jax/d;/^h5py/d;/^Pillow/d;/^pytest/d;/^pytest-mock/d;/^torch/d;/^torchaudio/d;/^torchvision/d;/^xgboost/d;/^requests/d;/^tensorflow/d;/^keras/d;/^kornia/d;/^librosa/d;/^tqdm/d;/^timm/d;/^catboost/d;/^scikit-learn/d;/^GPy/d;/^lief/d;/^ultralytics/d;/^ipython/d' requirements_test.txt) + pip install -q -r <(sed '/^scipy/d;/^matplotlib/d;/^pandas/d;/^statsmodels/d;/^numba/d;/^jax/d;/^h5py/d;/^Pillow/d;/^pytest/d;/^pytest-mock/d;/^torch/d;/^torchaudio/d;/^torchvision/d;/^xgboost/d;/^requests/d;/^tensorflow/d;/^keras/d;/^kornia/d;/^librosa/d;/^tqdm/d;/^timm/d;/^catboost/d;/^scikit-learn/d;/^GPy/d;/^lief/d;/^ultralytics/d;/^ipython/d;/^pylint/d' requirements_test.txt) pip install scipy==1.5.4 pip install matplotlib==3.3.4 pip install pandas==1.1.5 diff --git a/art/defences/detector/evasion/subsetscanning/detector.py b/art/defences/detector/evasion/subsetscanning/detector.py index da1e64f324..fbcf139892 100644 --- a/art/defences/detector/evasion/subsetscanning/detector.py +++ b/art/defences/detector/evasion/subsetscanning/detector.py @@ -18,7 +18,6 @@ """ This module implements the fast generalized subset scan based detector. """ - from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging diff --git a/art/defences/detector/evasion/subsetscanning/scanner.py b/art/defences/detector/evasion/subsetscanning/scanner.py index 0481a9069b..0e8acd643a 100644 --- a/art/defences/detector/evasion/subsetscanning/scanner.py +++ b/art/defences/detector/evasion/subsetscanning/scanner.py @@ -18,6 +18,8 @@ """ Subset scanning based on FGSS """ +from __future__ import annotations + from collections.abc import Callable import numpy as np diff --git a/art/defences/detector/evasion/subsetscanning/scanningops.py b/art/defences/detector/evasion/subsetscanning/scanningops.py index e47bec0a70..9e292e3dcd 100644 --- a/art/defences/detector/evasion/subsetscanning/scanningops.py +++ b/art/defences/detector/evasion/subsetscanning/scanningops.py @@ -18,6 +18,8 @@ """ Scanning operations """ +from __future__ import annotations + from collections.abc import Callable import numpy as np diff --git a/art/defences/detector/evasion/subsetscanning/scoring_functions.py b/art/defences/detector/evasion/subsetscanning/scoring_functions.py index 3a90f831f6..28680e570e 100644 --- a/art/defences/detector/evasion/subsetscanning/scoring_functions.py +++ b/art/defences/detector/evasion/subsetscanning/scoring_functions.py @@ -18,6 +18,8 @@ """ Scanner scoring functions. """ +from __future__ import annotations + import numpy as np diff --git a/art/defences/postprocessor/class_labels.py b/art/defences/postprocessor/class_labels.py index e8679ed11f..9da4ff0da9 100644 --- a/art/defences/postprocessor/class_labels.py +++ b/art/defences/postprocessor/class_labels.py @@ -18,6 +18,8 @@ """ This module implements class labels added to the classifier output. """ +from __future__ import annotations + import logging import numpy as np diff --git a/art/defences/postprocessor/gaussian_noise.py b/art/defences/postprocessor/gaussian_noise.py index b1da672d87..d80573fb7e 100644 --- a/art/defences/postprocessor/gaussian_noise.py +++ b/art/defences/postprocessor/gaussian_noise.py @@ -18,6 +18,8 @@ """ This module implements Gaussian noise added to the classifier output. """ +from __future__ import annotations + import logging import numpy as np diff --git a/art/defences/postprocessor/high_confidence.py b/art/defences/postprocessor/high_confidence.py index 736c872508..bfcd21af30 100644 --- a/art/defences/postprocessor/high_confidence.py +++ b/art/defences/postprocessor/high_confidence.py @@ -18,6 +18,8 @@ """ This module implements confidence added to the classifier output. """ +from __future__ import annotations + import logging import numpy as np diff --git a/art/defences/postprocessor/reverse_sigmoid.py b/art/defences/postprocessor/reverse_sigmoid.py index 06d4a43bf5..20fa5c9389 100644 --- a/art/defences/postprocessor/reverse_sigmoid.py +++ b/art/defences/postprocessor/reverse_sigmoid.py @@ -20,6 +20,8 @@ | Paper link: https://arxiv.org/abs/1806.00054 """ +from __future__ import annotations + import logging import numpy as np diff --git a/art/defences/postprocessor/rounded.py b/art/defences/postprocessor/rounded.py index 91a71c9dd0..1cbe1d65a5 100644 --- a/art/defences/postprocessor/rounded.py +++ b/art/defences/postprocessor/rounded.py @@ -18,6 +18,8 @@ """ This module implements a rounding to the classifier output. """ +from __future__ import annotations + import logging import numpy as np From efbcb788b304a1fa2aa46a79a3968d352747a4f0 Mon Sep 17 00:00:00 2001 From: Beat Buesser Date: Thu, 15 Aug 2024 23:40:38 +0200 Subject: [PATCH 13/27] Fix new typing Signed-off-by: Beat Buesser --- .github/workflows/ci-lingvo.yml | 2 +- .github/workflows/ci-tensorflow-v1.yml | 3 ++- requirements_test.txt | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci-lingvo.yml b/.github/workflows/ci-lingvo.yml index 3964a137a7..af15b833a1 100644 --- a/.github/workflows/ci-lingvo.yml +++ b/.github/workflows/ci-lingvo.yml @@ -50,7 +50,7 @@ jobs: sudo apt-get update sudo apt-get -y -q install ffmpeg libavcodec-extra python -m pip install --upgrade pip setuptools wheel - pip install -q -r <(sed '/^scipy/d;/^matplotlib/d;/^pandas/d;/^statsmodels/d;/^numba/d;/^jax/d;/^h5py/d;/^Pillow/d;/^pytest/d;/^pytest-mock/d;/^torch/d;/^torchaudio/d;/^torchvision/d;/^xgboost/d;/^requests/d;/^tensorflow/d;/^keras/d;/^kornia/d;/^librosa/d;/^tqdm/d;/^timm/d;/^catboost/d;/^scikit-learn/d;/^GPy/d;/^lief/d;/^ultralytics/d;/^ipython/d;/^pylint/d' requirements_test.txt) + pip install -q -r <(sed '/^scipy/d;/^matplotlib/d;/^pandas/d;/^statsmodels/d;/^numba/d;/^jax/d;/^h5py/d;/^Pillow/d;/^pytest/d;/^pytest-mock/d;/^torch/d;/^torchaudio/d;/^torchvision/d;/^xgboost/d;/^requests/d;/^tensorflow/d;/^keras/d;/^kornia/d;/^librosa/d;/^tqdm/d;/^timm/d;/^catboost/d;/^scikit-learn/d;/^GPy/d;/^lief/d;/^ultralytics/d;/^ipython/d;/^pylint/d;/^mypy/d' requirements_test.txt) pip install scipy==1.5.4 pip install matplotlib==3.3.4 pip install pandas==1.1.5 diff --git a/.github/workflows/ci-tensorflow-v1.yml b/.github/workflows/ci-tensorflow-v1.yml index 96291b5b0d..e5036922f9 100644 --- a/.github/workflows/ci-tensorflow-v1.yml +++ b/.github/workflows/ci-tensorflow-v1.yml @@ -48,7 +48,7 @@ jobs: sudo apt-get update sudo apt-get -y -q install ffmpeg libavcodec-extra python -m pip install --upgrade pip setuptools wheel - pip install -q -r <(sed '/^pandas/d;/^scipy/d;/^matplotlib/d;/^xgboost/d;/^tensorflow/d;/^keras/d;/^jax/d;/^torch/d;/^Pillow/d;/^h5py/d;/^kornia/d;/^scikit-learn/d;/^pytest-mock/d;/^GPy/d;/^lief/d;/^statsmodels/d;/^ultralytics/d;/^ipython/d' requirements_test.txt) + pip install -q -r <(sed '/^pandas/d;/^scipy/d;/^matplotlib/d;/^xgboost/d;/^tensorflow/d;/^keras/d;/^jax/d;/^torch/d;/^Pillow/d;/^h5py/d;/^kornia/d;/^scikit-learn/d;/^pytest-mock/d;/^GPy/d;/^lief/d;/^statsmodels/d;/^ultralytics/d;/^ipython/d;/^numba/d' requirements_test.txt) pip install pandas==1.3.5 pip install scipy==1.7.2 pip install matplotlib==3.5.3 @@ -68,6 +68,7 @@ jobs: pip install GPy~=1.10.0 pip install lief==0.12.3 pip install statsmodels==0.13.5 + pip install numba==0.56.4 pip list - name: Run Tests run: ./run_tests.sh ${{ matrix.framework }} diff --git a/requirements_test.txt b/requirements_test.txt index e9c7f64875..6e79d9130a 100644 --- a/requirements_test.txt +++ b/requirements_test.txt @@ -14,7 +14,7 @@ ffmpeg-python==0.2.0 cma==3.3.0 pandas==2.2.1 librosa==0.10.1 -numba~=0.60.0 +numba~=0.56.4 opencv-python sortedcontainers==2.4.0 h5py==3.10.0 From baece58f438cd5ab6aee079a14ea27fe758a9b2f Mon Sep 17 00:00:00 2001 From: Beat Buesser Date: Fri, 16 Aug 2024 00:59:11 +0200 Subject: [PATCH 14/27] Fix new typing Signed-off-by: Beat Buesser --- .github/workflows/ci-lingvo.yml | 2 +- .../image_center_crop/pytorch.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci-lingvo.yml b/.github/workflows/ci-lingvo.yml index af15b833a1..e3e59fe8c6 100644 --- a/.github/workflows/ci-lingvo.yml +++ b/.github/workflows/ci-lingvo.yml @@ -50,7 +50,7 @@ jobs: sudo apt-get update sudo apt-get -y -q install ffmpeg libavcodec-extra python -m pip install --upgrade pip setuptools wheel - pip install -q -r <(sed '/^scipy/d;/^matplotlib/d;/^pandas/d;/^statsmodels/d;/^numba/d;/^jax/d;/^h5py/d;/^Pillow/d;/^pytest/d;/^pytest-mock/d;/^torch/d;/^torchaudio/d;/^torchvision/d;/^xgboost/d;/^requests/d;/^tensorflow/d;/^keras/d;/^kornia/d;/^librosa/d;/^tqdm/d;/^timm/d;/^catboost/d;/^scikit-learn/d;/^GPy/d;/^lief/d;/^ultralytics/d;/^ipython/d;/^pylint/d;/^mypy/d' requirements_test.txt) + pip install -q -r <(sed '/^scipy/d;/^matplotlib/d;/^pandas/d;/^statsmodels/d;/^numba/d;/^jax/d;/^h5py/d;/^Pillow/d;/^pytest/d;/^pytest-mock/d;/^torch/d;/^torchaudio/d;/^torchvision/d;/^xgboost/d;/^requests/d;/^tensorflow/d;/^keras/d;/^kornia/d;/^librosa/d;/^tqdm/d;/^timm/d;/^catboost/d;/^scikit-learn/d;/^GPy/d;/^lief/d;/^ultralytics/d;/^ipython/d;/^pylint/d;/^mypy/d;/^pycodestyle/d' requirements_test.txt) pip install scipy==1.5.4 pip install matplotlib==3.3.4 pip install pandas==1.1.5 diff --git a/art/preprocessing/expectation_over_transformation/image_center_crop/pytorch.py b/art/preprocessing/expectation_over_transformation/image_center_crop/pytorch.py index 22061be225..39ad3377a6 100644 --- a/art/preprocessing/expectation_over_transformation/image_center_crop/pytorch.py +++ b/art/preprocessing/expectation_over_transformation/image_center_crop/pytorch.py @@ -18,6 +18,8 @@ """ This module implements Expectation over Transformation preprocessing for image center crop in PyTorch. """ +from __future__ import annotations + import logging from typing import TYPE_CHECKING From 04ce4b2b57ea148bc7b5f4d7e712b919a836241c Mon Sep 17 00:00:00 2001 From: Beat Buesser Date: Fri, 16 Aug 2024 02:12:24 +0200 Subject: [PATCH 15/27] Fix new typing Signed-off-by: Beat Buesser --- .github/workflows/ci-lingvo.yml | 2 +- .github/workflows/ci-pytorch.yml | 6 ------ .github/workflows/ci-tensorflow-v1.yml | 2 +- 3 files changed, 2 insertions(+), 8 deletions(-) diff --git a/.github/workflows/ci-lingvo.yml b/.github/workflows/ci-lingvo.yml index e3e59fe8c6..7ab5e3387a 100644 --- a/.github/workflows/ci-lingvo.yml +++ b/.github/workflows/ci-lingvo.yml @@ -50,7 +50,7 @@ jobs: sudo apt-get update sudo apt-get -y -q install ffmpeg libavcodec-extra python -m pip install --upgrade pip setuptools wheel - pip install -q -r <(sed '/^scipy/d;/^matplotlib/d;/^pandas/d;/^statsmodels/d;/^numba/d;/^jax/d;/^h5py/d;/^Pillow/d;/^pytest/d;/^pytest-mock/d;/^torch/d;/^torchaudio/d;/^torchvision/d;/^xgboost/d;/^requests/d;/^tensorflow/d;/^keras/d;/^kornia/d;/^librosa/d;/^tqdm/d;/^timm/d;/^catboost/d;/^scikit-learn/d;/^GPy/d;/^lief/d;/^ultralytics/d;/^ipython/d;/^pylint/d;/^mypy/d;/^pycodestyle/d' requirements_test.txt) + pip install -q -r <(sed '/^scipy/d;/^matplotlib/d;/^pandas/d;/^statsmodels/d;/^numba/d;/^jax/d;/^h5py/d;/^Pillow/d;/^pytest/d;/^pytest-mock/d;/^torch/d;/^torchaudio/d;/^torchvision/d;/^xgboost/d;/^requests/d;/^tensorflow/d;/^keras/d;/^kornia/d;/^librosa/d;/^tqdm/d;/^timm/d;/^catboost/d;/^scikit-learn/d;/^GPy/d;/^lief/d;/^ultralytics/d;/^ipython/d;/^pylint/d;/^mypy/d;/^pycodestyle/d;/^black/d' requirements_test.txt) pip install scipy==1.5.4 pip install matplotlib==3.3.4 pip install pandas==1.1.5 diff --git a/.github/workflows/ci-pytorch.yml b/.github/workflows/ci-pytorch.yml index de0414c664..fd0f47675e 100644 --- a/.github/workflows/ci-pytorch.yml +++ b/.github/workflows/ci-pytorch.yml @@ -40,12 +40,6 @@ jobs: torch: 2.2.1 torchvision: 0.17.1+cpu torchaudio: 2.2.1 - - name: PyTorch 2.3.1 (Python 3.11) - framework: pytorch - python: '3.11' - torch: 2.3.1 - torchvision: 0.18.1+cpu - torchaudio: 2.3.1 name: ${{ matrix.name }} steps: diff --git a/.github/workflows/ci-tensorflow-v1.yml b/.github/workflows/ci-tensorflow-v1.yml index e5036922f9..04acf139c9 100644 --- a/.github/workflows/ci-tensorflow-v1.yml +++ b/.github/workflows/ci-tensorflow-v1.yml @@ -48,7 +48,7 @@ jobs: sudo apt-get update sudo apt-get -y -q install ffmpeg libavcodec-extra python -m pip install --upgrade pip setuptools wheel - pip install -q -r <(sed '/^pandas/d;/^scipy/d;/^matplotlib/d;/^xgboost/d;/^tensorflow/d;/^keras/d;/^jax/d;/^torch/d;/^Pillow/d;/^h5py/d;/^kornia/d;/^scikit-learn/d;/^pytest-mock/d;/^GPy/d;/^lief/d;/^statsmodels/d;/^ultralytics/d;/^ipython/d;/^numba/d' requirements_test.txt) + pip install -q -r <(sed '/^pandas/d;/^scipy/d;/^matplotlib/d;/^xgboost/d;/^tensorflow/d;/^keras/d;/^jax/d;/^torch/d;/^Pillow/d;/^h5py/d;/^kornia/d;/^scikit-learn/d;/^pytest-mock/d;/^GPy/d;/^lief/d;/^statsmodels/d;/^ultralytics/d;/^ipython/d;/^numba/d;/^pytest/d;/^pylint/d;/^mypy/d;/^pycodestyle/d;/^black/d' requirements_test.txt) pip install pandas==1.3.5 pip install scipy==1.7.2 pip install matplotlib==3.5.3 From 81676f06ce5e29fdefbf93fa15a9f58577e60f7b Mon Sep 17 00:00:00 2001 From: Beat Buesser Date: Fri, 16 Aug 2024 23:51:19 +0200 Subject: [PATCH 16/27] Fix new typing Signed-off-by: Beat Buesser --- .github/workflows/ci-deepspeech-v3.yml | 2 +- .github/workflows/ci-lingvo.yml | 2 +- .../poisoning/perturbations/audio_perturbations.py | 2 +- .../speech_recognition/pytorch_deep_speech.py | 12 ++++++------ requirements_test.txt | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/ci-deepspeech-v3.yml b/.github/workflows/ci-deepspeech-v3.yml index 3b922e2cc3..65de2ba594 100644 --- a/.github/workflows/ci-deepspeech-v3.yml +++ b/.github/workflows/ci-deepspeech-v3.yml @@ -22,7 +22,7 @@ on: - cron: '0 8 * * 0' jobs: - test_deepspeech_v3_torch_1_10: + test_deepspeech_v3_torch_2_1_1: name: PyTorchDeepSpeech v3 / PyTorch 2.1.1 runs-on: ubuntu-latest container: adversarialrobustnesstoolbox/art_testing_envs:deepspeech_v3_torch_2_1_1 diff --git a/.github/workflows/ci-lingvo.yml b/.github/workflows/ci-lingvo.yml index 7ab5e3387a..ab4464a978 100644 --- a/.github/workflows/ci-lingvo.yml +++ b/.github/workflows/ci-lingvo.yml @@ -50,7 +50,7 @@ jobs: sudo apt-get update sudo apt-get -y -q install ffmpeg libavcodec-extra python -m pip install --upgrade pip setuptools wheel - pip install -q -r <(sed '/^scipy/d;/^matplotlib/d;/^pandas/d;/^statsmodels/d;/^numba/d;/^jax/d;/^h5py/d;/^Pillow/d;/^pytest/d;/^pytest-mock/d;/^torch/d;/^torchaudio/d;/^torchvision/d;/^xgboost/d;/^requests/d;/^tensorflow/d;/^keras/d;/^kornia/d;/^librosa/d;/^tqdm/d;/^timm/d;/^catboost/d;/^scikit-learn/d;/^GPy/d;/^lief/d;/^ultralytics/d;/^ipython/d;/^pylint/d;/^mypy/d;/^pycodestyle/d;/^black/d' requirements_test.txt) + pip install -q -r <(sed '/^scipy/d;/^matplotlib/d;/^pandas/d;/^statsmodels/d;/^numba/d;/^jax/d;/^h5py/d;/^Pillow/d;/^pytest/d;/^pytest-mock/d;/^torch/d;/^torchaudio/d;/^torchvision/d;/^xgboost/d;/^requests/d;/^tensorflow/d;/^keras/d;/^kornia/d;/^librosa/d;/^tqdm/d;/^timm/d;/^catboost/d;/^scikit-learn/d;/^GPy/d;/^lief/d;/^ultralytics/d;/^ipython/d;/^pylint/d;/^mypy/d;/^pycodestyle/d;/^black/d;/^ruff/d' requirements_test.txt) pip install scipy==1.5.4 pip install matplotlib==3.3.4 pip install pandas==1.1.5 diff --git a/art/attacks/poisoning/perturbations/audio_perturbations.py b/art/attacks/poisoning/perturbations/audio_perturbations.py index 9b0ab67cb7..3aa17dcd20 100644 --- a/art/attacks/poisoning/perturbations/audio_perturbations.py +++ b/art/attacks/poisoning/perturbations/audio_perturbations.py @@ -21,7 +21,7 @@ because loading the audio trigger from disk (librosa.load()) is very slow and should be done only once. """ - +from __future__ import annotations import librosa import numpy as np diff --git a/art/estimators/speech_recognition/pytorch_deep_speech.py b/art/estimators/speech_recognition/pytorch_deep_speech.py index 76a220b938..4654ef2c55 100644 --- a/art/estimators/speech_recognition/pytorch_deep_speech.py +++ b/art/estimators/speech_recognition/pytorch_deep_speech.py @@ -24,7 +24,7 @@ from __future__ import annotations import logging -from typing import TYPE_CHECKING +from typing import Tuple, TYPE_CHECKING import numpy as np @@ -333,7 +333,7 @@ def __init__( loss_scale=1.0, ) - def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> tuple[np.ndarray, np.ndarray] | np.ndarray: + def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> Tuple[np.ndarray, np.ndarray] | np.ndarray: """ Perform prediction for a batch of inputs. @@ -597,7 +597,7 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in def compute_loss_and_decoded_output( self, masked_adv_input: "torch.Tensor", original_output: np.ndarray, **kwargs - ) -> tuple["torch.Tensor", np.ndarray]: + ) -> Tuple["torch.Tensor", np.ndarray]: """ Compute loss function and decoded output. @@ -658,7 +658,7 @@ def _preprocess_transform_model_input( x: "torch.Tensor", y: np.ndarray, real_lengths: np.ndarray, - ) -> tuple["torch.Tensor", "torch.Tensor", "torch.Tensor", "torch.Tensor", list]: + ) -> Tuple["torch.Tensor", "torch.Tensor", "torch.Tensor", "torch.Tensor", list]: """ Apply preprocessing and then transform the user input space into the model input space. This function is used by the ASR attack to attack into the PyTorchDeepSpeech estimator whose defences are called with the @@ -704,7 +704,7 @@ def _transform_model_input( compute_gradient: bool = False, tensor_input: bool = False, real_lengths: np.ndarray | None = None, - ) -> tuple["torch.Tensor", "torch.Tensor", "torch.Tensor", "torch.Tensor", list]: + ) -> Tuple["torch.Tensor", "torch.Tensor", "torch.Tensor", "torch.Tensor", list]: """ Transform the user input space into the model input space. @@ -834,7 +834,7 @@ def sample_rate(self) -> int: return sample_rate @property - def input_shape(self) -> tuple[int, ...]: + def input_shape(self) -> Tuple[int, ...]: """ Return the shape of one input sample. diff --git a/requirements_test.txt b/requirements_test.txt index 6e79d9130a..45179c8b56 100644 --- a/requirements_test.txt +++ b/requirements_test.txt @@ -63,7 +63,7 @@ mypy==1.11.1 pycodestyle==2.12.0 black==24.4.2 ruff==0.5.5 -types-six==1.16.21.20240513 +types-six==1.16.21.9 types-PyYAML==6.0.12.20240724 types-setuptools==71.1.0.20240726 From f7ad40cae59cad25801d7055b32ff5bb156986a0 Mon Sep 17 00:00:00 2001 From: Beat Buesser Date: Sat, 24 Aug 2024 00:56:55 +0200 Subject: [PATCH 17/27] Fix new typing Signed-off-by: Beat Buesser --- .github/workflows/ci-lingvo.yml | 2 +- .github/workflows/ci-tensorflow-v1.yml | 2 +- art/defences/preprocessor/mp3_compression_pytorch.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci-lingvo.yml b/.github/workflows/ci-lingvo.yml index ab4464a978..8451ffa6a4 100644 --- a/.github/workflows/ci-lingvo.yml +++ b/.github/workflows/ci-lingvo.yml @@ -50,7 +50,7 @@ jobs: sudo apt-get update sudo apt-get -y -q install ffmpeg libavcodec-extra python -m pip install --upgrade pip setuptools wheel - pip install -q -r <(sed '/^scipy/d;/^matplotlib/d;/^pandas/d;/^statsmodels/d;/^numba/d;/^jax/d;/^h5py/d;/^Pillow/d;/^pytest/d;/^pytest-mock/d;/^torch/d;/^torchaudio/d;/^torchvision/d;/^xgboost/d;/^requests/d;/^tensorflow/d;/^keras/d;/^kornia/d;/^librosa/d;/^tqdm/d;/^timm/d;/^catboost/d;/^scikit-learn/d;/^GPy/d;/^lief/d;/^ultralytics/d;/^ipython/d;/^pylint/d;/^mypy/d;/^pycodestyle/d;/^black/d;/^ruff/d' requirements_test.txt) + pip install -q -r <(sed '/^scipy/d;/^matplotlib/d;/^pandas/d;/^statsmodels/d;/^numba/d;/^jax/d;/^h5py/d;/^Pillow/d;/^pytest/d;/^pytest-mock/d;/^torch/d;/^torchaudio/d;/^torchvision/d;/^xgboost/d;/^requests/d;/^tensorflow/d;/^keras/d;/^kornia/d;/^librosa/d;/^tqdm/d;/^timm/d;/^catboost/d;/^scikit-learn/d;/^GPy/d;/^lief/d;/^ultralytics/d;/^ipython/d;/^pylint/d;/^mypy/d;/^pycodestyle/d;/^black/d;/^ruff/d;/^types-PyYAML/d;/^types-setuptools/d' requirements_test.txt) pip install scipy==1.5.4 pip install matplotlib==3.3.4 pip install pandas==1.1.5 diff --git a/.github/workflows/ci-tensorflow-v1.yml b/.github/workflows/ci-tensorflow-v1.yml index 04acf139c9..8aae5b640e 100644 --- a/.github/workflows/ci-tensorflow-v1.yml +++ b/.github/workflows/ci-tensorflow-v1.yml @@ -48,7 +48,7 @@ jobs: sudo apt-get update sudo apt-get -y -q install ffmpeg libavcodec-extra python -m pip install --upgrade pip setuptools wheel - pip install -q -r <(sed '/^pandas/d;/^scipy/d;/^matplotlib/d;/^xgboost/d;/^tensorflow/d;/^keras/d;/^jax/d;/^torch/d;/^Pillow/d;/^h5py/d;/^kornia/d;/^scikit-learn/d;/^pytest-mock/d;/^GPy/d;/^lief/d;/^statsmodels/d;/^ultralytics/d;/^ipython/d;/^numba/d;/^pytest/d;/^pylint/d;/^mypy/d;/^pycodestyle/d;/^black/d' requirements_test.txt) + pip install -q -r <(sed '/^pandas/d;/^scipy/d;/^matplotlib/d;/^xgboost/d;/^tensorflow/d;/^keras/d;/^jax/d;/^torch/d;/^Pillow/d;/^h5py/d;/^kornia/d;/^scikit-learn/d;/^pytest-mock/d;/^GPy/d;/^lief/d;/^statsmodels/d;/^ultralytics/d;/^ipython/d;/^numba/d;/^pytest/d;/^pylint/d;/^mypy/d;/^pycodestyle/d;/^black/d;/^types-PyYAML/d;/^types-setuptools/d' requirements_test.txt) pip install pandas==1.3.5 pip install scipy==1.7.2 pip install matplotlib==3.5.3 diff --git a/art/defences/preprocessor/mp3_compression_pytorch.py b/art/defences/preprocessor/mp3_compression_pytorch.py index 50f772119f..27bfee0c39 100644 --- a/art/defences/preprocessor/mp3_compression_pytorch.py +++ b/art/defences/preprocessor/mp3_compression_pytorch.py @@ -24,7 +24,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import TYPE_CHECKING +from typing import Tuple, TYPE_CHECKING from art.defences.preprocessor.mp3_compression import Mp3Compression from art.defences.preprocessor.preprocessor import PreprocessorPyTorch @@ -106,7 +106,7 @@ def backward(ctx, grad_output): def forward( self, x: "torch.Tensor", y: "torch.Tensor" | None = None - ) -> tuple["torch.Tensor", "torch.Tensor" | None]: + ) -> Tuple["torch.Tensor", "torch.Tensor" | None]: """ Apply MP3 compression to sample `x`. From 433c00c344c1ef1aefbdbed3cb32643328b992cd Mon Sep 17 00:00:00 2001 From: Beat Buesser Date: Mon, 26 Aug 2024 01:44:49 +0200 Subject: [PATCH 18/27] Fix new typing Signed-off-by: Beat Buesser --- art/defences/preprocessor/mp3_compression.py | 2 +- art/defences/preprocessor/mp3_compression_pytorch.py | 4 ++-- art/estimators/pytorch.py | 2 +- art/estimators/speech_recognition/pytorch_deep_speech.py | 9 ++++----- 4 files changed, 8 insertions(+), 9 deletions(-) diff --git a/art/defences/preprocessor/mp3_compression.py b/art/defences/preprocessor/mp3_compression.py index b1b86bdb09..c7516d5e34 100644 --- a/art/defences/preprocessor/mp3_compression.py +++ b/art/defences/preprocessor/mp3_compression.py @@ -150,7 +150,7 @@ def wav_to_mp3(x, sample_rate): if x.dtype != object and self.channels_first: x_mp3 = np.swapaxes(x_mp3, 1, 2) - if x_orig_type is not object and x.dtype is object and x.ndim == 2: + if x_orig_type != object and x.dtype == object and x.ndim == 2: x_mp3 = x_mp3.astype(x_orig_type) return x_mp3, y diff --git a/art/defences/preprocessor/mp3_compression_pytorch.py b/art/defences/preprocessor/mp3_compression_pytorch.py index 27bfee0c39..50f772119f 100644 --- a/art/defences/preprocessor/mp3_compression_pytorch.py +++ b/art/defences/preprocessor/mp3_compression_pytorch.py @@ -24,7 +24,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals, annotations import logging -from typing import Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING from art.defences.preprocessor.mp3_compression import Mp3Compression from art.defences.preprocessor.preprocessor import PreprocessorPyTorch @@ -106,7 +106,7 @@ def backward(ctx, grad_output): def forward( self, x: "torch.Tensor", y: "torch.Tensor" | None = None - ) -> Tuple["torch.Tensor", "torch.Tensor" | None]: + ) -> tuple["torch.Tensor", "torch.Tensor" | None]: """ Apply MP3 compression to sample `x`. diff --git a/art/estimators/pytorch.py b/art/estimators/pytorch.py index cc264afef6..cba467fa68 100644 --- a/art/estimators/pytorch.py +++ b/art/estimators/pytorch.py @@ -289,7 +289,7 @@ def _set_layer(self, train: bool, layerinfo: list["torch.nn.modules.Module"]) -> Set all layers that are an instance of `layerinfo` into training or evaluation mode. :param train: False for evaluation mode. - :param layerinfo: list of module types. + :param layerinfo: List of module types. """ import torch diff --git a/art/estimators/speech_recognition/pytorch_deep_speech.py b/art/estimators/speech_recognition/pytorch_deep_speech.py index 4654ef2c55..4a38fea428 100644 --- a/art/estimators/speech_recognition/pytorch_deep_speech.py +++ b/art/estimators/speech_recognition/pytorch_deep_speech.py @@ -24,7 +24,7 @@ from __future__ import annotations import logging -from typing import Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING import numpy as np @@ -34,7 +34,6 @@ from art.utils import get_file if TYPE_CHECKING: - import torch from deepspeech_pytorch.model import DeepSpeech @@ -333,7 +332,7 @@ def __init__( loss_scale=1.0, ) - def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> Tuple[np.ndarray, np.ndarray] | np.ndarray: + def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> tuple[np.ndarray, np.ndarray] | np.ndarray: """ Perform prediction for a batch of inputs. @@ -658,7 +657,7 @@ def _preprocess_transform_model_input( x: "torch.Tensor", y: np.ndarray, real_lengths: np.ndarray, - ) -> Tuple["torch.Tensor", "torch.Tensor", "torch.Tensor", "torch.Tensor", list]: + ) -> tuple["torch.Tensor", "torch.Tensor", "torch.Tensor", "torch.Tensor", list]: """ Apply preprocessing and then transform the user input space into the model input space. This function is used by the ASR attack to attack into the PyTorchDeepSpeech estimator whose defences are called with the @@ -704,7 +703,7 @@ def _transform_model_input( compute_gradient: bool = False, tensor_input: bool = False, real_lengths: np.ndarray | None = None, - ) -> Tuple["torch.Tensor", "torch.Tensor", "torch.Tensor", "torch.Tensor", list]: + ) -> tuple["torch.Tensor", "torch.Tensor", "torch.Tensor", "torch.Tensor", list]: """ Transform the user input space into the model input space. From 9f295a3a838d0f513d2d9ef56dfe3dcdc3a5e5d3 Mon Sep 17 00:00:00 2001 From: Beat Buesser Date: Mon, 26 Aug 2024 13:41:20 +0200 Subject: [PATCH 19/27] Fix new typing Signed-off-by: Beat Buesser --- .github/workflows/ci-lingvo.yml | 2 +- .github/workflows/ci-tensorflow-v1.yml | 1 + art/defences/preprocessor/mp3_compression.py | 2 +- art/estimators/speech_recognition/pytorch_deep_speech.py | 4 ++-- 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci-lingvo.yml b/.github/workflows/ci-lingvo.yml index 8451ffa6a4..c68b910529 100644 --- a/.github/workflows/ci-lingvo.yml +++ b/.github/workflows/ci-lingvo.yml @@ -30,7 +30,7 @@ jobs: include: - name: TensorFlow+Lingvo 2.1.0v1 (Keras 2.3.1 Python 3.6) framework: tensorflow2v1 - python: 3.6 + python: 3.9 tensorflow: 2.1.0 tf_version: v2 keras: 2.3.1 diff --git a/.github/workflows/ci-tensorflow-v1.yml b/.github/workflows/ci-tensorflow-v1.yml index 8aae5b640e..0334e5938e 100644 --- a/.github/workflows/ci-tensorflow-v1.yml +++ b/.github/workflows/ci-tensorflow-v1.yml @@ -69,6 +69,7 @@ jobs: pip install lief==0.12.3 pip install statsmodels==0.13.5 pip install numba==0.56.4 + pip install pytest pip list - name: Run Tests run: ./run_tests.sh ${{ matrix.framework }} diff --git a/art/defences/preprocessor/mp3_compression.py b/art/defences/preprocessor/mp3_compression.py index c7516d5e34..3679082f32 100644 --- a/art/defences/preprocessor/mp3_compression.py +++ b/art/defences/preprocessor/mp3_compression.py @@ -150,7 +150,7 @@ def wav_to_mp3(x, sample_rate): if x.dtype != object and self.channels_first: x_mp3 = np.swapaxes(x_mp3, 1, 2) - if x_orig_type != object and x.dtype == object and x.ndim == 2: + if x_orig_type != object and x.dtype == object and x.ndim == 2: # noqa: E721 x_mp3 = x_mp3.astype(x_orig_type) return x_mp3, y diff --git a/art/estimators/speech_recognition/pytorch_deep_speech.py b/art/estimators/speech_recognition/pytorch_deep_speech.py index 4a38fea428..ba1c17767a 100644 --- a/art/estimators/speech_recognition/pytorch_deep_speech.py +++ b/art/estimators/speech_recognition/pytorch_deep_speech.py @@ -596,7 +596,7 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in def compute_loss_and_decoded_output( self, masked_adv_input: "torch.Tensor", original_output: np.ndarray, **kwargs - ) -> Tuple["torch.Tensor", np.ndarray]: + ) -> tuple["torch.Tensor", np.ndarray]: """ Compute loss function and decoded output. @@ -833,7 +833,7 @@ def sample_rate(self) -> int: return sample_rate @property - def input_shape(self) -> Tuple[int, ...]: + def input_shape(self) -> tuple[int, ...]: """ Return the shape of one input sample. From 1b649e545351e2c34d2647ddd2c9e57899611450 Mon Sep 17 00:00:00 2001 From: Beat Buesser Date: Mon, 26 Aug 2024 14:43:05 +0200 Subject: [PATCH 20/27] Fix typos and grammar Signed-off-by: Beat Buesser --- art/attacks/attack.py | 14 +++++++------- .../adversarial_patch/adversarial_patch_numpy.py | 2 +- .../adversarial_patch_pytorch.py | 4 ++-- .../adversarial_patch_tensorflow.py | 4 ++-- art/attacks/evasion/auto_attack.py | 2 +- art/attacks/evasion/auto_conjugate_gradient.py | 2 +- art/attacks/evasion/brendel_bethge.py | 14 +++++++------- art/attacks/evasion/carlini.py | 2 +- art/attacks/evasion/dpatch.py | 6 +++--- art/attacks/evasion/elastic_net.py | 6 +++--- art/attacks/evasion/fast_gradient.py | 6 +++--- art/attacks/evasion/frame_saliency.py | 8 ++++---- .../evasion/graphite/graphite_blackbox.py | 6 +++--- .../graphite/graphite_whitebox_pytorch.py | 4 ++-- art/attacks/evasion/graphite/utils.py | 2 +- .../imperceptible_asr/imperceptible_asr.py | 12 ++++++------ .../imperceptible_asr_pytorch.py | 14 +++++++------- art/attacks/evasion/laser_attack/utils.py | 2 +- art/attacks/evasion/lowprofool.py | 6 +++--- .../over_the_air_flickering_pytorch.py | 2 +- art/attacks/evasion/overload/overload.py | 4 ++-- art/attacks/evasion/pe_malware_attack.py | 16 ++++++++-------- art/attacks/evasion/pixel_threshold.py | 6 +++--- .../projected_gradient_descent.py | 6 +++--- .../projected_gradient_descent_numpy.py | 8 ++++---- .../projected_gradient_descent_pytorch.py | 10 +++++----- .../projected_gradient_descent_tensorflow_v2.py | 10 +++++----- art/attacks/evasion/saliency_map.py | 2 +- art/attacks/evasion/square_attack.py | 2 +- .../functionally_equivalent_extraction.py | 4 ++-- .../attribute_inference/meminf_based.py | 4 ++-- .../white_box_decision_tree.py | 2 +- .../poisoning/adversarial_embedding_attack.py | 6 +++--- art/attacks/poisoning/backdoor_attack.py | 2 +- .../backdoor_attack_dgm_red.py | 4 ++-- .../backdoor_attack_dgm_trail.py | 4 ++-- art/attacks/poisoning/bad_det/bad_det_gma.py | 4 ++-- art/attacks/poisoning/bad_det/bad_det_oda.py | 4 ++-- art/attacks/poisoning/bad_det/bad_det_oga.py | 4 ++-- art/attacks/poisoning/bad_det/bad_det_rma.py | 4 ++-- .../poisoning/bullseye_polytope_attack.py | 6 +++--- .../poisoning/clean_label_backdoor_attack.py | 2 +- .../poisoning/feature_collision_attack.py | 8 ++++---- .../poisoning/gradient_matching_attack.py | 4 ++-- .../hidden_trigger_backdoor.py | 10 ++++------ .../hidden_trigger_backdoor_keras.py | 15 +++++++-------- .../hidden_trigger_backdoor_pytorch.py | 11 +++++------ .../hidden_trigger_backdoor/loss_meter.py | 2 +- art/attacks/poisoning/sleeper_agent_attack.py | 8 ++++---- art/config.py | 2 +- .../detector/evasion/subsetscanning/detector.py | 5 +++-- .../evasion/subsetscanning/scanningops.py | 2 +- .../detector/poison/activation_defence.py | 10 +++++----- .../detector/poison/clustering_analyzer.py | 2 +- art/defences/preprocessor/inverse_gan.py | 2 +- art/defences/preprocessor/mp3_compression.py | 6 +++--- art/defences/preprocessor/pixel_defend.py | 2 +- .../preprocessor/spatial_smoothing_tensorflow.py | 2 +- .../trainer/adversarial_trainer_oaat_pytorch.py | 4 ++-- .../certified_adversarial_trainer_pytorch.py | 10 +++++----- .../trainer/ibp_certified_trainer_pytorch.py | 8 ++++---- .../transformer/poisoning/neural_cleanse.py | 4 ++-- art/estimators/certification/deep_z/deep_z.py | 4 ++-- art/estimators/certification/deep_z/pytorch.py | 2 +- .../derandomized_smoothing/ablators/pytorch.py | 4 ++-- .../derandomized_smoothing/derandomized.py | 2 +- .../derandomized_smoothing/pytorch.py | 8 ++++---- .../certification/interval/interval.py | 2 +- art/estimators/certification/interval/pytorch.py | 4 ++-- .../certification/object_seeker/object_seeker.py | 2 +- .../certification/object_seeker/pytorch.py | 2 +- .../randomized_smoothing/macer/pytorch.py | 2 +- .../certification/randomized_smoothing/numpy.py | 8 +++++--- .../randomized_smoothing/pytorch.py | 6 +++--- .../randomized_smoothing/randomized_smoothing.py | 2 +- .../randomized_smoothing/smooth_adv/pytorch.py | 2 +- .../randomized_smoothing/smooth_mix/pytorch.py | 2 +- .../randomized_smoothing/tensorflow.py | 6 +++--- art/estimators/classification/blackbox.py | 2 +- .../classification/detector_classifier.py | 4 ++-- art/estimators/classification/ensemble.py | 8 ++++---- art/estimators/classification/hugging_face.py | 9 ++++----- art/estimators/classification/keras.py | 8 ++++---- art/estimators/classification/mxnet.py | 6 +++--- art/estimators/classification/pytorch.py | 12 ++++++------ art/estimators/classification/tensorflow.py | 16 ++++++++-------- art/estimators/estimator.py | 2 +- art/estimators/gan/tensorflow.py | 10 +++++----- art/estimators/generation/tensorflow.py | 6 +++--- art/estimators/object_detection/detr.py | 6 +++--- art/estimators/object_tracking/pytorch_goturn.py | 2 +- .../poison_mitigation/neural_cleanse/keras.py | 10 +++++----- .../neural_cleanse/neural_cleanse.py | 11 +++++++---- art/estimators/pytorch.py | 4 ++-- art/estimators/regression/blackbox.py | 2 +- art/estimators/regression/keras.py | 6 +++--- art/estimators/regression/pytorch.py | 10 +++++----- .../speech_recognition/pytorch_deep_speech.py | 16 ++++++++-------- .../speech_recognition/pytorch_espresso.py | 10 +++++----- .../speech_recognition/speech_recognizer.py | 2 +- .../speech_recognition/tensorflow_lingvo.py | 4 ++-- art/estimators/tensorflow.py | 4 ++-- art/metrics/gradient_check.py | 2 +- art/metrics/metrics.py | 2 +- art/metrics/verification_decisions_trees.py | 2 +- .../image/image_square_pad/pytorch.py | 2 +- .../image/image_square_pad/tensorflow.py | 2 +- art/summary_writer.py | 4 ++-- art/utils.py | 14 +++++++------- 109 files changed, 303 insertions(+), 302 deletions(-) diff --git a/art/attacks/attack.py b/art/attacks/attack.py index 595b5f4941..01cfe82149 100644 --- a/art/attacks/attack.py +++ b/art/attacks/attack.py @@ -256,7 +256,7 @@ def poison(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> tuple[ :param x: An array with the original inputs to be attacked. :param y: Target labels for `x`. Untargeted attacks set this value to None. - :return: An tuple holding the (poisoning examples, poisoning labels). + :return: A tuple holding the (poisoning examples, poisoning labels). """ raise NotImplementedError @@ -319,7 +319,7 @@ def poison(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> tuple[ :param x: An array with the original inputs to be attacked. :param y: Target labels for `x`. Untargeted attacks set this value to None. - :return: An tuple holding the (poisoning examples, poisoning labels). + :return: A tuple holding the (poisoning examples, poisoning labels). :rtype: `(np.ndarray, np.ndarray)` """ raise NotImplementedError @@ -363,7 +363,7 @@ def poison( - boxes [N, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. - labels [N]: the labels for each image - scores [N]: the scores or each prediction. - :return: An tuple holding the `(poisoning_examples, poisoning_labels)`. + :return: A tuple holding the `(poisoning_examples, poisoning_labels)`. """ raise NotImplementedError @@ -387,7 +387,7 @@ def poison(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> tuple[ :param x: An array with the original inputs to be attacked. :param y: Target labels for `x`. Untargeted attacks set this value to None. - :return: An tuple holding the `(poisoning_examples, poisoning_labels)`. + :return: A tuple holding the `(poisoning_examples, poisoning_labels)`. """ raise NotImplementedError @@ -404,9 +404,9 @@ def poison(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> tuple[ poisoning attack implementations. :param x: An array with the original inputs to be attacked. - :param y: Correct labels or target labels for `x`, depending if the attack is targeted + :param y: Correct labels or target labels for `x`, depending on if the attack is targeted or not. This parameter is only used by some of the attacks. - :return: An tuple holding the `(poisoning_examples, poisoning_labels)`. + :return: A tuple holding the `(poisoning_examples, poisoning_labels)`. """ raise NotImplementedError @@ -423,7 +423,7 @@ def extract(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> "CLAS attack implementations. :param x: An array with the original inputs to be attacked. - :param y: Correct labels or target labels for `x`, depending if the attack is targeted + :param y: Correct labels or target labels for `x`, depending on if the attack is targeted or not. This parameter is only used by some of the attacks. :return: ART classifier of the extracted model. """ diff --git a/art/attacks/evasion/adversarial_patch/adversarial_patch_numpy.py b/art/attacks/evasion/adversarial_patch/adversarial_patch_numpy.py index be36d15503..295182a742 100644 --- a/art/attacks/evasion/adversarial_patch/adversarial_patch_numpy.py +++ b/art/attacks/evasion/adversarial_patch/adversarial_patch_numpy.py @@ -263,7 +263,7 @@ def apply_patch( :param x: Instances to apply randomly transformed patch. :param scale: Scale of the applied patch in relation to the classifier input shape. :param patch_external: External patch to apply to images `x`. - :param mask: An boolean array of shape equal to the shape of a single samples (1, H, W) or the shape of `x` + :param mask: A boolean array of shape equal to the shape of a single samples (1, H, W) or the shape of `x` (N, H, W) without their channel dimensions. Any features for which the mask is True can be the center location of the patch during sampling. :return: The patched instances. diff --git a/art/attacks/evasion/adversarial_patch/adversarial_patch_pytorch.py b/art/attacks/evasion/adversarial_patch/adversarial_patch_pytorch.py index 0004b324a3..891e872481 100644 --- a/art/attacks/evasion/adversarial_patch/adversarial_patch_pytorch.py +++ b/art/attacks/evasion/adversarial_patch/adversarial_patch_pytorch.py @@ -481,7 +481,7 @@ def generate( # type: ignore :param x: An array with the original input images of shape NCHW or input videos of shape NFCHW. :param y: An array with the original true labels. - :param mask: An boolean array of shape equal to the shape of a single samples (1, H, W) or the shape of `x` + :param mask: A boolean array of shape equal to the shape of a single samples (1, H, W) or the shape of `x` (N, H, W) without their channel dimensions. Any features for which the mask is True can be the center location of the patch during sampling. :type mask: `np.ndarray` @@ -691,7 +691,7 @@ def apply_patch( :param x: Instances to apply randomly transformed patch. :param scale: Scale of the applied patch in relation to the estimator input shape. :param patch_external: External patch to apply to images `x`. - :param mask: An boolean array of shape equal to the shape of a single samples (1, H, W) or the shape of `x` + :param mask: A boolean array of shape equal to the shape of a single samples (1, H, W) or the shape of `x` (N, H, W) without their channel dimensions. Any features for which the mask is True can be the center location of the patch during sampling. :return: The patched samples. diff --git a/art/attacks/evasion/adversarial_patch/adversarial_patch_tensorflow.py b/art/attacks/evasion/adversarial_patch/adversarial_patch_tensorflow.py index 4b67f73658..60e0b9affc 100644 --- a/art/attacks/evasion/adversarial_patch/adversarial_patch_tensorflow.py +++ b/art/attacks/evasion/adversarial_patch/adversarial_patch_tensorflow.py @@ -374,7 +374,7 @@ def _random_overlay( x_origin_delta = x_origin - x_origin_shifted y_origin_delta = y_origin - y_origin_shifted - # Run translation in a second step to position patch exactly inside of the mask + # Run translation in a second step to position patch exactly inside the mask transform_vectors.append([a_0, a_1, x_origin_delta, b_0, b_1, y_origin_delta, 0, 0]) translation_vectors.append([1, 0, -x_shift, 0, 1, -y_shift, 0, 0]) @@ -527,7 +527,7 @@ def apply_patch( :param x: Instances to apply randomly transformed patch. :param scale: Scale of the applied patch in relation to the classifier input shape. :param patch_external: External patch to apply to images `x`. - :param mask: An boolean array of shape equal to the shape of a single samples (1, H, W) or the shape of `x` + :param mask: A boolean array of shape equal to the shape of a single samples (1, H, W) or the shape of `x` (N, H, W) without their channel dimensions. Any features for which the mask is True can be the center location of the patch during sampling. :return: The patched samples. diff --git a/art/attacks/evasion/auto_attack.py b/art/attacks/evasion/auto_attack.py index ea4c5d50e0..01a4046ec7 100644 --- a/art/attacks/evasion/auto_attack.py +++ b/art/attacks/evasion/auto_attack.py @@ -83,7 +83,7 @@ def __init__( """ Create a :class:`.AutoAttack` instance. - :param estimator: An trained estimator. + :param estimator: A trained estimator. :param norm: The norm of the adversarial perturbation. Possible values: "inf", np.inf, 1 or 2. :param eps: Maximum perturbation that the attacker can introduce. :param eps_step: Attack step size (input variation) at each iteration. diff --git a/art/attacks/evasion/auto_conjugate_gradient.py b/art/attacks/evasion/auto_conjugate_gradient.py index 5f268fb702..950b71ed01 100644 --- a/art/attacks/evasion/auto_conjugate_gradient.py +++ b/art/attacks/evasion/auto_conjugate_gradient.py @@ -102,7 +102,7 @@ def __init__( """ Create a :class:`.AutoConjugateGradient` instance. - :param estimator: An trained estimator. + :param estimator: A trained estimator. :param norm: The norm of the adversarial perturbation. Possible values: "inf", np.inf, 1 or 2. :param eps: Maximum perturbation that the attacker can introduce. :param eps_step: Attack step size (input variation) at each iteration. diff --git a/art/attacks/evasion/brendel_bethge.py b/art/attacks/evasion/brendel_bethge.py index bdf1622db0..bfd849f954 100644 --- a/art/attacks/evasion/brendel_bethge.py +++ b/art/attacks/evasion/brendel_bethge.py @@ -399,7 +399,7 @@ def _line_search_wolfe( while 1: # interpolate to find a trial step length between a_lo and a_hi # Need to choose interpolation here. Use cubic interpolation and then if the - # result is within delta * dalpha or outside of the interval bounded by a_lo or a_hi + # result is within delta * dalpha or outside the interval bounded by a_lo or a_hi # then use quadratic interpolation, if the result is still too close, then use bisection dalpha = a_hi - a_lo @@ -508,7 +508,7 @@ def _line_search_wolfe( while 1: # interpolate to find a trial step length between a_lo and a_hi # Need to choose interpolation here. Use cubic interpolation and then if the - # result is within delta * dalpha or outside of the interval bounded by a_lo or a_hi + # result is within delta * dalpha or outside the interval bounded by a_lo or a_hi # then use quadratic interpolation, if the result is still too close, then use bisection dalpha = a_hi - a_lo @@ -661,10 +661,10 @@ def solve(self, x0, x, b, min_, max_, c, r): if np.abs(cmax) < np.abs(c): # problem not solvable (boundary cannot be reached) if np.sqrt(cmaxnorm) < r: - # make largest possible step towards boundary while staying within bounds + # make the largest possible step towards boundary while staying within bounds _delta = self.optimize_boundary_s_t_trustregion(x0, x, b, min_, max_, c, r) else: - # make largest possible step towards boundary while staying within trust region + # make the largest possible step towards boundary while staying within trust region _delta = self.optimize_boundary_s_t_trustregion(x0, x, b, min_, max_, c, r) else: if cmaxnorm < r: @@ -681,7 +681,7 @@ def solve(self, x0, x, b, min_, max_, c, r): _delta = self.optimize_distance_s_t_boundary_and_trustregion(x0, x, b, min_, max_, c, r) else: # problem not solvable (boundary cannot be reached) - # make largest step towards boundary within trust region + # make the largest step towards boundary within trust region _delta = self.optimize_boundary_s_t_trustregion(x0, x, b, min_, max_, c, r) return _delta @@ -720,7 +720,7 @@ def _minimum_norm_to_boundary(self, x, b, _ell, _u, c, bnorm): min ||delta||_2^2 s.t. lower <= x + delta <= upper AND b.dot(delta) = c - Lets forget about the box constraints for a second, i.e. + Let's forget about the box constraints for a second, i.e. min ||delta||_2^2 s.t. b.dot(delta) = c @@ -1284,7 +1284,7 @@ def fun(self, epsilon, x0, x, b, ell, u, c, r, lambda0=None): min ||delta||_2^2 s.t. lower <= x + delta <= upper AND b.dot(delta) = c - Lets forget about the box constraints for a second, i.e. + Let's forget about the box constraints for a second, i.e. min ||delta||_2^2 s.t. b.dot(delta) = c diff --git a/art/attacks/evasion/carlini.py b/art/attacks/evasion/carlini.py index 53106f043e..03670ad1fd 100644 --- a/art/attacks/evasion/carlini.py +++ b/art/attacks/evasion/carlini.py @@ -163,7 +163,7 @@ def _loss( # column, last equation), the maximum is taken over Z_other - Z_target (or Z_target - Z_other respectively) # and -confidence. However, it doesn't seem that that would have the desired effect (loss term is <= 0 if and # only if the difference between the logit of the target and any other class differs by at least confidence). - # Hence the rearrangement here. + # Hence, the rearrangement here. if self.targeted: # if targeted, optimize for making the target class most likely diff --git a/art/attacks/evasion/dpatch.py b/art/attacks/evasion/dpatch.py index f0d30d7382..fc7d7183e5 100644 --- a/art/attacks/evasion/dpatch.py +++ b/art/attacks/evasion/dpatch.py @@ -116,7 +116,7 @@ def generate( - labels [N]: the labels for each image - scores [N]: the scores or each prediction. :param target_label: The target label of the DPatch attack. - :param mask: An boolean array of shape equal to the shape of a single samples (1, H, W) or the shape of `x` + :param mask: A boolean array of shape equal to the shape of a single samples (1, H, W) or the shape of `x` (N, H, W) without their channel dimensions. Any features for which the mask is True can be the center location of the patch during sampling. :type mask: `np.ndarray` @@ -276,7 +276,7 @@ def _augment_images_with_patch( :param random_location: If True apply patch at randomly shifted locations, otherwise place patch at origin (top-left corner). :param channels_first: Set channels first or last. - :param mask: An boolean array of shape equal to the shape of a single samples (1, H, W) or the shape of `x` + :param mask: A boolean array of shape equal to the shape of a single samples (1, H, W) or the shape of `x` (N, H, W) without their channel dimensions. Any features for which the mask is True can be the center location of the patch during sampling. :param transforms: Patch transforms, requires `random_location=False`, and `mask=None`. @@ -362,7 +362,7 @@ def apply_patch( :param x: Images to be patched. :param patch_external: External patch to apply to images `x`. If None the attacks patch will be applied. :param random_location: True if patch location should be random. - :param mask: An boolean array of shape equal to the shape of a single samples (1, H, W) or the shape of `x` + :param mask: A boolean array of shape equal to the shape of a single samples (1, H, W) or the shape of `x` (N, H, W) without their channel dimensions. Any features for which the mask is True can be the center location of the patch during sampling. :return: The patched images. diff --git a/art/attacks/evasion/elastic_net.py b/art/attacks/evasion/elastic_net.py index cca14d9586..6598f6a1ad 100644 --- a/art/attacks/evasion/elastic_net.py +++ b/art/attacks/evasion/elastic_net.py @@ -254,7 +254,7 @@ def _generate_batch(self, x_batch: np.ndarray, y_batch: np.ndarray) -> np.ndarra c_lower_bound = np.zeros(x_batch.shape[0]) c_upper_bound = 10e10 * np.ones(x_batch.shape[0]) - # Initialize best distortions and best attacks globally + # Initialize the best distortions and best attacks globally o_best_dist = np.inf * np.ones(x_batch.shape[0]) o_best_attack = x_batch.copy() @@ -329,7 +329,7 @@ def _generate_bss(self, x_batch: np.ndarray, y_batch: np.ndarray, c_batch: np.nd :param x_batch: A batch of original examples. :param y_batch: A batch of targets (0-1 hot). :param c_batch: A batch of constants. - :return: A tuple of best elastic distances, best labels, best attacks + :return: A tuple of the best elastic distances, best labels, best attacks """ def compare(o_1, o_2): @@ -337,7 +337,7 @@ def compare(o_1, o_2): return o_1 == o_2 return o_1 != o_2 - # Initialize best distortions and best changed labels and best attacks + # Initialize the best distortions and best changed labels and best attacks best_dist = np.inf * np.ones(x_batch.shape[0]) best_label = [-np.inf] * x_batch.shape[0] best_attack = x_batch.copy() diff --git a/art/attacks/evasion/fast_gradient.py b/art/attacks/evasion/fast_gradient.py index 5db802b864..80571469b7 100644 --- a/art/attacks/evasion/fast_gradient.py +++ b/art/attacks/evasion/fast_gradient.py @@ -149,7 +149,7 @@ def _minimal_perturbation(self, x: np.ndarray, y: np.ndarray, mask: np.ndarray) mask_batch = mask if mask is not None: # Here we need to make a distinction: if the masks are different for each input, we need to index - # those for the current batch. Otherwise (i.e. mask is meant to be broadcasted), keep it as it is. + # those for the current batch. Otherwise, (i.e. mask is meant to be broadcasted), keep it as it is. if len(mask.shape) == len(x.shape): mask_batch = mask[batch_index_1:batch_index_2] @@ -410,7 +410,7 @@ def _compute_perturbation( targeted=self.targeted, ) - # Check for NaN before normalisation an replace with 0 + # Check for NaN before normalisation and replace with 0 if grad.dtype != object and np.isnan(grad).any(): # pragma: no cover logger.warning("Elements of the loss gradient are NaN and have been replaced with 0.0.") grad = np.where(np.isnan(grad), 0.0, grad) @@ -542,7 +542,7 @@ def _compute( mask_batch = mask if mask is not None: # Here we need to make a distinction: if the masks are different for each input, we need to index - # those for the current batch. Otherwise (i.e. mask is meant to be broadcasted), keep it as it is. + # those for the current batch. Otherwise, (i.e. mask is meant to be broadcasted), keep it as it is. if len(mask.shape) == len(x.shape): mask_batch = mask[batch_index_1:batch_index_2] diff --git a/art/attacks/evasion/frame_saliency.py b/art/attacks/evasion/frame_saliency.py index 941ac78e01..024c4d65c2 100644 --- a/art/attacks/evasion/frame_saliency.py +++ b/art/attacks/evasion/frame_saliency.py @@ -77,9 +77,9 @@ def __init__( :param attacker: An adversarial evasion attacker which supports masking. Currently supported: ProjectedGradientDescent, BasicIterativeMethod, FastGradientMethod. :param method: Specifies which method to use: "iterative_saliency" (adds perturbation iteratively to frame - with highest saliency score until attack is successful), "iterative_saliency_refresh" (updates - perturbation after each iteration), "one_shot" (adds all perturbations at once, i.e. defaults to - original attack). + with the highest saliency score until attack is successful), "iterative_saliency_refresh" + (updates perturbation after each iteration), "one_shot" (adds all perturbations at once, i.e. + defaults to original attack). :param frame_index: Index of the axis in input (feature) array `x` representing the frame dimension. :param batch_size: Size of the batch on which adversarial samples are generated. :param verbose: Show progress bars. @@ -143,7 +143,7 @@ def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.n # Generate adversarial perturbations. If the method is "iterative_saliency_refresh", we will use a mask so that # only the next frame to be perturbed is considered in the attack; moreover we keep track of the next frames to - # be perturbed so they will not be perturbed again later on. + # be perturbed, so they will not be perturbed again later on. mask = np.ones(x.shape) if self.method == "iterative_saliency_refresh": mask = np.zeros(x.shape) diff --git a/art/attacks/evasion/graphite/graphite_blackbox.py b/art/attacks/evasion/graphite/graphite_blackbox.py index 8bec5156b1..ef803f9c32 100644 --- a/art/attacks/evasion/graphite/graphite_blackbox.py +++ b/art/attacks/evasion/graphite/graphite_blackbox.py @@ -42,7 +42,7 @@ attack that only requires class predictions. | Paper link: https://arxiv.org/abs/2002.07088 -| Original github link: https://github.com/ryan-feng/GRAPHITE +| Original GitHub link: https://github.com/ryan-feng/GRAPHITE """ from __future__ import absolute_import, division, print_function, unicode_literals, annotations @@ -79,7 +79,7 @@ class GRAPHITEBlackbox(EvasionAttack): stickers. | Paper link: https://arxiv.org/abs/2002.07088 - | Original github link: https://github.com/ryan-feng/GRAPHITE + | Original GitHub link: https://github.com/ryan-feng/GRAPHITE """ attack_params = EvasionAttack.attack_params + [ @@ -616,7 +616,7 @@ def _get_coarse_reduced_mask( :param pts: Optional. A set of points that will set the crop size in the perspective transform. :return: mask, adjusted list of patches, adjusted list of indices """ - # binary search leftmost pivot value for which tr exceeeds specificed threshold if one exists + # binary search leftmost pivot value for which `tr` exceeds specified threshold if one exists num_patches = len(patches) if num_patches == 1: pivot = 0 diff --git a/art/attacks/evasion/graphite/graphite_whitebox_pytorch.py b/art/attacks/evasion/graphite/graphite_whitebox_pytorch.py index fe2c8c8788..23d080e187 100644 --- a/art/attacks/evasion/graphite/graphite_whitebox_pytorch.py +++ b/art/attacks/evasion/graphite/graphite_whitebox_pytorch.py @@ -42,7 +42,7 @@ This is a robust physical perturbation attack. | Paper link: https://arxiv.org/abs/2002.07088 -| Original github link: https://github.com/ryan-feng/GRAPHITE +| Original GitHub link: https://github.com/ryan-feng/GRAPHITE """ from __future__ import absolute_import, division, print_function, unicode_literals, annotations @@ -71,7 +71,7 @@ class GRAPHITEWhiteboxPyTorch(EvasionAttack): that generates robust physical perturbations that can be applied as stickers. | Paper link: https://arxiv.org/abs/2002.07088 - | Original github link: https://github.com/ryan-feng/GRAPHITE + | Original GitHub link: https://github.com/ryan-feng/GRAPHITE """ attack_params = EvasionAttack.attack_params + [ diff --git a/art/attacks/evasion/graphite/utils.py b/art/attacks/evasion/graphite/utils.py index ba08efa136..1b21afe3bf 100644 --- a/art/attacks/evasion/graphite/utils.py +++ b/art/attacks/evasion/graphite/utils.py @@ -41,7 +41,7 @@ This module implements helper functions for GRAPHITE attacks. | Paper link: https://arxiv.org/abs/2002.07088 -| Original github link: https://github.com/ryan-feng/GRAPHITE +| Original GitHub link: https://github.com/ryan-feng/GRAPHITE """ from __future__ import annotations diff --git a/art/attacks/evasion/imperceptible_asr/imperceptible_asr.py b/art/attacks/evasion/imperceptible_asr/imperceptible_asr.py index ec1d5e6d77..f30f6ee824 100644 --- a/art/attacks/evasion/imperceptible_asr/imperceptible_asr.py +++ b/art/attacks/evasion/imperceptible_asr/imperceptible_asr.py @@ -158,7 +158,7 @@ def __init__( self._masking_threshold_tf = tf1.placeholder( tf1.float32, shape=[None, None, None], name="art_masking_threshold" ) - # TensorFlow loss gradient ops + # TensorFlow - loss gradient operations self._loss_gradient_masking_threshold_op_tf = self._loss_gradient_masking_threshold_tf( self._delta, self._power_spectral_density_maximum_tf, self._masking_threshold_tf ) @@ -172,7 +172,7 @@ def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.n Generate imperceptible, adversarial examples. :param x: An array with the original inputs to be attacked. - :param y: Target values of shape (batch_size,). Each sample in `y` is a string and it may possess different + :param y: Target values of shape (batch_size,). Each sample in `y` is a string, and it may possess different lengths. A possible example of `y` could be: `y = np.array(['SIXTY ONE', 'HELLO'])`. :return: An array holding the adversarial examples. """ @@ -221,7 +221,7 @@ def _create_adversarial(self, x, y) -> np.ndarray: | Paper link: https://arxiv.org/abs/1801.01944. :param x: An array with the original inputs to be attacked. - :param y: Target values of shape (batch_size,). Each sample in `y` is a string and it may possess different + :param y: Target values of shape (batch_size,). Each sample in `y` is a string, and it may possess different lengths. A possible example of `y` could be: `y = np.array(['SIXTY ONE', 'HELLO'])`. :return: An array with the adversarial outputs. """ @@ -279,7 +279,7 @@ def _create_imperceptible(self, x: np.ndarray, x_adversarial: np.ndarray, y: np. :param x: An array with the original inputs to be attacked. :param x_adversarial: An array with the adversarial examples. - :param y: Target values of shape (batch_size,). Each sample in `y` is a string and it may possess different + :param y: Target values of shape (batch_size,). Each sample in `y` is a string, and it may possess different lengths. A possible example of `y` could be: `y = np.array(['SIXTY ONE', 'HELLO'])`. :return: An array with the imperceptible, adversarial outputs. """ @@ -505,7 +505,7 @@ def _approximate_power_spectral_density_tf( Approximate the power spectral density for a perturbation `perturbation` in TensorFlow. Note that a stabilized PSD approximate is returned, where the `10*log10`-term has been canceled out. - Following Qin et al (2019) this mitigates optimization instabilities. + Following Qin et al. (2019) this mitigates optimization instabilities. :param perturbation: Adversarial perturbation. :param psd_maximum_stabilized: Stabilized maximum across frames, i.e. shape is `(batch_size, frame_length)`, of @@ -792,7 +792,7 @@ def find_maskers(psd_vector: np.ndarray) -> tuple[np.ndarray, np.ndarray]: :param psd_vector: PSD vector of shape `(window_size // 2 + 1)`. :return: Possible PSD maskers and indices. """ - # identify maskers. For simplification it is assumed that all maskers are tonal (vs. nontonal). + # identify maskers. For simplification, it is assumed that all maskers are tonal (vs. nontonal). masker_idx = ss.argrelmax(psd_vector)[0] # smooth maskers with their direct neighbors diff --git a/art/attacks/evasion/imperceptible_asr/imperceptible_asr_pytorch.py b/art/attacks/evasion/imperceptible_asr/imperceptible_asr_pytorch.py index 84e2a8bcfe..e0e586be9a 100644 --- a/art/attacks/evasion/imperceptible_asr/imperceptible_asr_pytorch.py +++ b/art/attacks/evasion/imperceptible_asr/imperceptible_asr_pytorch.py @@ -124,7 +124,7 @@ def __init__( :param optimizer_2: The optimizer applied for the second stage of the optimization of the attack. If `None` attack will use `torch.optim.Adam`. :param global_max_length: The length of the longest audio signal allowed by this attack. - :param initial_rescale: Initial rescale coefficient to speedup the decrease of the perturbation size during + :param initial_rescale: Initial rescale coefficient to speed up the decrease of the perturbation size during the first stage of the optimization of the attack. :param decrease_factor_eps: The factor to adjust the rescale coefficient during the first stage of the optimization of the attack. @@ -230,7 +230,7 @@ def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.n :param x: Samples of shape (nb_samples, seq_length). Note that, it is allowable that sequences in the batch could have different lengths. A possible example of `x` could be: `x = np.array([np.array([0.1, 0.2, 0.1, 0.4]), np.array([0.3, 0.1])])`. - :param y: Target values of shape (nb_samples). Each sample in `y` is a string and it may possess different + :param y: Target values of shape (nb_samples). Each sample in `y` is a string, and it may possess different lengths. A possible example of `y` could be: `y = np.array(['SIXTY ONE', 'HELLO'])`. Note that, this class only supports targeted attack. :return: An array holding the adversarial examples. @@ -298,7 +298,7 @@ def _generate_batch(self, x: np.ndarray, y: np.ndarray) -> np.ndarray: :param x: Samples of shape (nb_samples, seq_length). Note that, it is allowable that sequences in the batch could have different lengths. A possible example of `x` could be: `x = np.array([np.array([0.1, 0.2, 0.1, 0.4]), np.array([0.3, 0.1])])`. - :param y: Target values of shape (nb_samples). Each sample in `y` is a string and it may possess different + :param y: Target values of shape (nb_samples). Each sample in `y` is a string, and it may possess different lengths. A possible example of `y` could be: `y = np.array(['SIXTY ONE', 'HELLO'])`. Note that, this class only supports targeted attack. :return: A batch of adversarial examples. @@ -344,7 +344,7 @@ def _attack_1st_stage(self, x: np.ndarray, y: np.ndarray) -> tuple["torch.Tensor :param x: Samples of shape (nb_samples, seq_length). Note that, it is allowable that sequences in the batch could have different lengths. A possible example of `x` could be: `x = np.array([np.array([0.1, 0.2, 0.1, 0.4]), np.array([0.3, 0.1])])`. - :param y: Target values of shape (nb_samples). Each sample in `y` is a string and it may possess different + :param y: Target values of shape (nb_samples). Each sample in `y` is a string, and it may possess different lengths. A possible example of `y` could be: `y = np.array(['SIXTY ONE', 'HELLO'])`. Note that, this class only supports targeted attack. :return: A tuple of two tensors: @@ -449,7 +449,7 @@ def _forward_1st_stage( :param original_input: Samples of shape (nb_samples, seq_length). Note that, sequences in the batch must have equal lengths. A possible example of `original_input` could be: `original_input = np.array([np.array([0.1, 0.2, 0.1]), np.array([0.3, 0.1, 0.0])])`. - :param original_output: Target values of shape (nb_samples). Each sample in `original_output` is a string and + :param original_output: Target values of shape (nb_samples). Each sample in `original_output` is a string, and it may possess different lengths. A possible example of `original_output` could be: `original_output = np.array(['SIXTY ONE', 'HELLO'])`. :param local_batch_size: Current batch size. @@ -491,7 +491,7 @@ def _attack_2nd_stage( :param x: Samples of shape (nb_samples, seq_length). Note that, it is allowable that sequences in the batch could have different lengths. A possible example of `x` could be: `x = np.array([np.array([0.1, 0.2, 0.1, 0.4]), np.array([0.3, 0.1])])`. - :param y: Target values of shape (nb_samples). Each sample in `y` is a string and it may possess different + :param y: Target values of shape (nb_samples). Each sample in `y` is a string, and it may possess different lengths. A possible example of `y` could be: `y = np.array(['SIXTY ONE', 'HELLO'])`. Note that, this class only supports targeted attack. :param theta_batch: Original thresholds. @@ -566,7 +566,7 @@ class only supports targeted attack. for local_batch_size_idx in range(local_batch_size): if decoded_output[local_batch_size_idx] == y[local_batch_size_idx]: if loss_2nd_stage[local_batch_size_idx] < best_loss_2nd_stage[local_batch_size_idx]: - # Update best loss at 2nd stage + # Update the best loss at 2nd stage best_loss_2nd_stage[local_batch_size_idx] = ( loss_2nd_stage[local_batch_size_idx].detach().cpu().numpy() ) diff --git a/art/attacks/evasion/laser_attack/utils.py b/art/attacks/evasion/laser_attack/utils.py index 65d36cfac3..cb8cec3e9f 100644 --- a/art/attacks/evasion/laser_attack/utils.py +++ b/art/attacks/evasion/laser_attack/utils.py @@ -289,7 +289,7 @@ def log(self, adv_object: AdversarialObject) -> None: def save_image(self, image: np.ndarray) -> None: """ - Saves images generated during lasting process to the artifacts directory. + Saves images generated during lasting process to the artifact's directory. :param image: Image to save. """ diff --git a/art/attacks/evasion/lowprofool.py b/art/attacks/evasion/lowprofool.py index 59014db9f3..958e748480 100644 --- a/art/attacks/evasion/lowprofool.py +++ b/art/attacks/evasion/lowprofool.py @@ -149,7 +149,7 @@ def __weighted_lp_norm(self, perturbations: np.ndarray) -> np.ndarray: def __weighted_lp_norm_gradient(self, perturbations: np.ndarray) -> np.ndarray: """ - Gradient of the weighted Lp-space norm with regards to the data vector. + Gradient of the weighted Lp-space norm in regard to the data vector. :param perturbations: Perturbations of samples towards being adversarial. :return: Weighted Lp-norm gradients array. @@ -174,8 +174,8 @@ def __weighted_lp_norm_gradient(self, perturbations: np.ndarray) -> np.ndarray: def __get_gradients(self, samples: np.ndarray, perturbations: np.ndarray, targets: np.ndarray) -> np.ndarray: """ - Gradient of the objective function with regards to the data vector, i.e. sum of the classifier's loss gradient - and weighted lp-space norm gradient, both with regards to data vector. + Gradient of the objective function in regard to the data vector, i.e. sum of the classifier's loss gradient + and weighted lp-space norm gradient, both in regard to data vector. :param samples: Base design matrix. :param perturbations: Perturbations of samples towards being adversarial. diff --git a/art/attacks/evasion/over_the_air_flickering/over_the_air_flickering_pytorch.py b/art/attacks/evasion/over_the_air_flickering/over_the_air_flickering_pytorch.py index aaca3a5977..fd8bd60f41 100644 --- a/art/attacks/evasion/over_the_air_flickering/over_the_air_flickering_pytorch.py +++ b/art/attacks/evasion/over_the_air_flickering/over_the_air_flickering_pytorch.py @@ -221,7 +221,7 @@ def _compute_perturbation( :param x: Current adversarial examples. :param y: Target values (class labels) one-hot-encoded of shape `(nb_samples, nb_classes)`. - :param perturbation: Currently accumulated perturbation + :param perturbation: Currently, accumulated perturbation :return: Perturbations. """ import torch diff --git a/art/attacks/evasion/overload/overload.py b/art/attacks/evasion/overload/overload.py index 0002e8c030..9fd40747b8 100644 --- a/art/attacks/evasion/overload/overload.py +++ b/art/attacks/evasion/overload/overload.py @@ -63,7 +63,7 @@ def __init__( batch_size: int, ) -> None: """ - Create a overload attack instance. + Create an overload attack instance. :param estimator: A PyTorch object detection estimator for a YOLO5 model. :param eps: Maximum perturbation that the attacker can introduce. @@ -207,7 +207,7 @@ def xywh2xyxy(xywh: "torch.Tensor") -> "torch.Tensor": """ Convert the representation from xywh format yo xyxy format. - :param xyhw: A n by 4 boxes store the information in xyhw format + :param xyhw: An n by 4 boxes store the information in xyhw format where [x ,y, w h] is [center_x, center_y, width, height] :return: The n by 4 boxes in xyxy format where [x1, y1, x2, y2] is [top_left_x, top_left_y, bottom_right_x, bottom_right_y] diff --git a/art/attacks/evasion/pe_malware_attack.py b/art/attacks/evasion/pe_malware_attack.py index 46eb5ca227..8816ba80af 100644 --- a/art/attacks/evasion/pe_malware_attack.py +++ b/art/attacks/evasion/pe_malware_attack.py @@ -278,14 +278,14 @@ def get_adv_malware( """ Project the adversarial example back though the closest l2 vector. - :embeddings: Adversarially optimised embeddings - :labels: Labels for the data - :fsize: Size of the original malware - :data: Original data in the feature space - :perturbation_size: Size of the l0 attack to append (if any). - :perturb_sizes: List, with each element itself being a list of the start positions of a + :param embeddings: Adversarially optimised embeddings + :param labels: Labels for the data + :param fsize: Size of the original malware + :param data: Original data in the feature space + :param perturbation_size: Size of the l0 attack to append (if any). + :param perturb_sizes: List, with each element itself being a list of the start positions of a perturbation regions in a sample - :perturb_starts: List, with each element itself being a list of the start positions of a + :param perturb_starts: List, with each element itself being a list of the start positions of a start of the perturbation regions in a sample :return data: Numpy array with valid data samples. @@ -682,7 +682,7 @@ def insert_section( :param verbose: lief outputs a lot to the console, particularly if we are processing many files. By default, suppress printing of messages. Can be toggled on/off by True/False :return manipulated_data: Executable with section inserted and turned into a numpy array of - the appropriate size + the appropriate size. :return len(manipulated_file): Size of original file :return information_on_section.pointerto_raw_data: The start of the inserted section :return information_on_section.virtual_size: Size of the inserted section diff --git a/art/attacks/evasion/pixel_threshold.py b/art/attacks/evasion/pixel_threshold.py index 5671e2e353..d994dcee4c 100644 --- a/art/attacks/evasion/pixel_threshold.py +++ b/art/attacks/evasion/pixel_threshold.py @@ -176,7 +176,7 @@ def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.n For sanity check, pass th=10 to the Attack instance." ) - # NOTE: Pixel and Threshold Attacks are well defined for unprocessed images where the pixel values are, + # NOTE: Pixel and Threshold Attacks are well-defined for unprocessed images where the pixel values are, # 8-Bit color i.e., the pixel values are np.uint8 in range [0, 255]. # TO-DO: Better checking of input image. @@ -742,7 +742,7 @@ def differential_evolution( # pragma: no cover initializes the population randomly - this has the drawback that clustering can occur, preventing the whole of parameter space being covered. Use of an array to specify a population subset could be used, - for example, to create a tight bunch of initial guesses in an location + for example, to create a tight bunch of initial guesses in a location where the solution is known to exist, thereby reducing time for convergence. atol : float, optional @@ -937,7 +937,7 @@ class DifferentialEvolutionSolver: # pragma: no cover initializes the population randomly - this has the drawback that clustering can occur, preventing the whole of parameter space being covered. Use of an array to specify a population could be used, for - example, to create a tight bunch of initial guesses in an location + example, to create a tight bunch of initial guesses in a location where the solution is known to exist, thereby reducing time for convergence. atol : float, optional diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent.py index 143acea8cb..555c9809fe 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent.py @@ -17,7 +17,7 @@ # SOFTWARE. """ This module implements the Projected Gradient Descent attack `ProjectedGradientDescent` as an iterative method in which, -after each iteration, the perturbation is projected on an lp-ball of specified radius (in addition to clipping the +after each iteration, the perturbation is projected on a lp-ball of specified radius (in addition to clipping the values of the adversarial sample so that it lies in the permitted data range). This is the attack proposed by Madry et al. for adversarial training. @@ -54,7 +54,7 @@ class ProjectedGradientDescent(EvasionAttack): """ The Projected Gradient Descent attack is an iterative method in which, after each iteration, the perturbation is - projected on an lp-ball of specified radius (in addition to clipping the values of the adversarial sample so that it + projected on a lp-ball of specified radius (in addition to clipping the values of the adversarial sample so that it lies in the permitted data range). This is the attack proposed by Madry et al. for adversarial training. | Paper link: https://arxiv.org/abs/1706.06083 @@ -94,7 +94,7 @@ def __init__( """ Create a :class:`.ProjectedGradientDescent` instance. - :param estimator: An trained estimator. + :param estimator: A trained estimator. :param norm: The norm of the adversarial perturbation, supporting "inf", `np.inf` or a real `p >= 1`. Currently, when `p` is not infinity, the projection step only rescales the noise, which may be suboptimal for `p != 2`. diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_numpy.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_numpy.py index d768628976..4d251f7da8 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_numpy.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_numpy.py @@ -17,7 +17,7 @@ # SOFTWARE. """ This module implements the Projected Gradient Descent attack `ProjectedGradientDescent` as an iterative method in which, -after each iteration, the perturbation is projected on an lp-ball of specified radius (in addition to clipping the +after each iteration, the perturbation is projected on a lp-ball of specified radius (in addition to clipping the values of the adversarial sample so that it lies in the permitted data range). This is the attack proposed by Madry et al. for adversarial training. @@ -48,7 +48,7 @@ class ProjectedGradientDescentCommon(FastGradientMethod): """ Common class for different variations of implementation of the Projected Gradient Descent attack. The attack is an - iterative method in which, after each iteration, the perturbation is projected on an lp-ball of specified radius (in + iterative method in which, after each iteration, the perturbation is projected on a lp-ball of specified radius (in addition to clipping the values of the adversarial sample so that it lies in the permitted data range). This is the attack proposed by Madry et al. for adversarial training. @@ -241,7 +241,7 @@ def _check_params(self) -> None: # pragma: no cover class ProjectedGradientDescentNumpy(ProjectedGradientDescentCommon): """ The Projected Gradient Descent attack is an iterative method in which, after each iteration, the perturbation is - projected on an lp-ball of specified radius (in addition to clipping the values of the adversarial sample so that it + projected on a lp-ball of specified radius (in addition to clipping the values of the adversarial sample so that it lies in the permitted data range). This is the attack proposed by Madry et al. for adversarial training. | Paper link: https://arxiv.org/abs/1706.06083 @@ -265,7 +265,7 @@ def __init__( """ Create a :class:`.ProjectedGradientDescentNumpy` instance. - :param estimator: An trained estimator. + :param estimator: A trained estimator. :param norm: The norm of the adversarial perturbation, supporting "inf", `np.inf` or a real `p >= 1`. Currently, when `p` is not infinity, the projection step only rescales the noise, which may be suboptimal for `p != 2`. diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py index 6c7be88172..849d1cd54b 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_pytorch.py @@ -17,7 +17,7 @@ # SOFTWARE. """ This module implements the Projected Gradient Descent attack `ProjectedGradientDescent` as an iterative method in which, -after each iteration, the perturbation is projected on an lp-ball of specified radius (in addition to clipping the +after each iteration, the perturbation is projected on a lp-ball of specified radius (in addition to clipping the values of the adversarial sample so that it lies in the permitted data range). This is the attack proposed by Madry et al. for adversarial training. @@ -51,7 +51,7 @@ class ProjectedGradientDescentPyTorch(ProjectedGradientDescentCommon): """ The Projected Gradient Descent attack is an iterative method in which, after each iteration, the perturbation is - projected on an lp-ball of specified radius (in addition to clipping the values of the adversarial sample so that it + projected on a lp-ball of specified radius (in addition to clipping the values of the adversarial sample so that it lies in the permitted data range). This is the attack proposed by Madry et al. for adversarial training. | Paper link: https://arxiv.org/abs/1706.06083 @@ -77,7 +77,7 @@ def __init__( """ Create a :class:`.ProjectedGradientDescentPyTorch` instance. - :param estimator: An trained estimator. + :param estimator: A trained estimator. :param norm: The norm of the adversarial perturbation, supporting "inf", `np.inf` or a real `p >= 1`. Currently, when `p` is not infinity, the projection step only rescales the noise, which may be suboptimal for `p != 2`. @@ -158,7 +158,7 @@ def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.n # Create dataset if mask is not None: # Here we need to make a distinction: if the masks are different for each input, we need to index - # those for the current batch. Otherwise (i.e. mask is meant to be broadcasted), keep it as it is. + # those for the current batch. Otherwise, (i.e. mask is meant to be broadcasted), keep it as it is. if len(mask.shape) == len(x.shape): dataset = torch.utils.data.TensorDataset( torch.from_numpy(x.astype(ART_NUMPY_DTYPE)), @@ -321,7 +321,7 @@ def _compute_perturbation_pytorch( targeted=self.targeted, ) - # Check for nan before normalisation an replace with 0 + # Check for nan before normalisation and replace with 0 if torch.any(grad.isnan()): # pragma: no cover logger.warning("Elements of the loss gradient are NaN and have been replaced with 0.0.") grad[grad.isnan()] = 0.0 diff --git a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py index 71d7129cf1..e07255f3d3 100644 --- a/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py +++ b/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_tensorflow_v2.py @@ -17,7 +17,7 @@ # SOFTWARE. """ This module implements the Projected Gradient Descent attack `ProjectedGradientDescent` as an iterative method in which, -after each iteration, the perturbation is projected on an lp-ball of specified radius (in addition to clipping the +after each iteration, the perturbation is projected on a lp-ball of specified radius (in addition to clipping the values of the adversarial sample so that it lies in the permitted data range). This is the attack proposed by Madry et al. for adversarial training. @@ -51,7 +51,7 @@ class ProjectedGradientDescentTensorFlowV2(ProjectedGradientDescentCommon): """ The Projected Gradient Descent attack is an iterative method in which, after each iteration, the perturbation is - projected on an lp-ball of specified radius (in addition to clipping the values of the adversarial sample so that it + projected on a lp-ball of specified radius (in addition to clipping the values of the adversarial sample so that it lies in the permitted data range). This is the attack proposed by Madry et al. for adversarial training. | Paper link: https://arxiv.org/abs/1706.06083 @@ -77,7 +77,7 @@ def __init__( """ Create a :class:`.ProjectedGradientDescentTensorFlowV2` instance. - :param estimator: An trained estimator. + :param estimator: A trained estimator. :param norm: The norm of the adversarial perturbation, supporting "inf", `np.inf` or a real `p >= 1`. Currently, when `p` is not infinity, the projection step only rescales the noise, which may be suboptimal for `p != 2`. @@ -156,7 +156,7 @@ def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.n # Create dataset if mask is not None: # Here we need to make a distinction: if the masks are different for each input, we need to index - # those for the current batch. Otherwise (i.e. mask is meant to be broadcasted), keep it as it is. + # those for the current batch. Otherwise, (i.e. mask is meant to be broadcasted), keep it as it is. if len(mask.shape) == len(x.shape): dataset = tf.data.Dataset.from_tensor_slices( ( @@ -332,7 +332,7 @@ def _compute_perturbation( targeted=self.targeted, ) - # Check for NaN before normalisation an replace with 0 + # Check for NaN before normalisation and replace with 0 if tf.reduce_any(tf.math.is_nan(grad)): # pragma: no cover logger.warning("Elements of the loss gradient are NaN and have been replaced with 0.0.") grad = tf.where(tf.math.is_nan(grad), tf.zeros_like(grad), grad) diff --git a/art/attacks/evasion/saliency_map.py b/art/attacks/evasion/saliency_map.py index 896bbc296b..5fab881f61 100644 --- a/art/attacks/evasion/saliency_map.py +++ b/art/attacks/evasion/saliency_map.py @@ -145,7 +145,7 @@ def generate(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.n # Apply attack with clipping if self.estimator.clip_values is not None: - # Prepare update depending of theta + # Prepare update depending on theta if self.theta > 0: clip_func, clip_value = np.minimum, clip_max # type: ignore else: # pragma: no cover diff --git a/art/attacks/evasion/square_attack.py b/art/attacks/evasion/square_attack.py index abab3db4d1..e1d361c612 100644 --- a/art/attacks/evasion/square_attack.py +++ b/art/attacks/evasion/square_attack.py @@ -81,7 +81,7 @@ def __init__( """ Create a :class:`.SquareAttack` instance. - :param estimator: An trained estimator. + :param estimator: A trained estimator. :param norm: The norm of the adversarial perturbation. Possible values: "inf", np.inf, 1 or 2. :param adv_criterion: The criterion which the attack should use in determining adversariality. :param loss: The loss function which the attack should use for optimization. diff --git a/art/attacks/extraction/functionally_equivalent_extraction.py b/art/attacks/extraction/functionally_equivalent_extraction.py index ac28cae248..9353e3b20b 100644 --- a/art/attacks/extraction/functionally_equivalent_extraction.py +++ b/art/attacks/extraction/functionally_equivalent_extraction.py @@ -16,7 +16,7 @@ # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """ -This module implements the Functionally Equivalent Extraction attack mainly following Jagielski et al, 2019. +This module implements the Functionally Equivalent Extraction attack mainly following Jagielski et al. (2019). This module contains en example application for MNIST which can be run as `python functionally_equivalent_extraction.py` producing output like: @@ -103,7 +103,7 @@ def extract( Extract the targeted model. :param x: Samples of input data of shape (num_samples, num_features). - :param y: Correct labels or target labels for `x`, depending if the attack is targeted + :param y: Correct labels or target labels for `x`, depending on if the attack is targeted or not. This parameter is only used by some of the attacks. :param delta_0: Initial step size of binary search. :param fraction_true: Fraction of output predictions that have to fulfill criteria for critical point. diff --git a/art/attacks/inference/attribute_inference/meminf_based.py b/art/attacks/inference/attribute_inference/meminf_based.py index a540164f78..a87ac1be6e 100644 --- a/art/attacks/inference/attribute_inference/meminf_based.py +++ b/art/attacks/inference/attribute_inference/meminf_based.py @@ -39,7 +39,7 @@ class AttributeInferenceMembership(AttributeInferenceAttack): """ - Implementation of a an attribute inference attack that utilizes a membership inference attack. + Implementation of an attribute inference attack that utilizes a membership inference attack. The idea is to find the target feature value that causes the membership inference attack to classify the sample as a member with the highest confidence. @@ -111,7 +111,7 @@ def infer(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndar else: probabilities = np.hstack((probabilities, predicted)) - # needs to be of type float so we can later replace back the actual values + # needs to be of type float, so that we can later replace back the actual values value_indexes = np.argmax(probabilities, axis=1).astype(x.dtype) pred_values = np.zeros_like(value_indexes) for index, value in enumerate(values): diff --git a/art/attacks/inference/attribute_inference/white_box_decision_tree.py b/art/attacks/inference/attribute_inference/white_box_decision_tree.py index 1db23c5234..bb9e6a9506 100644 --- a/art/attacks/inference/attribute_inference/white_box_decision_tree.py +++ b/art/attacks/inference/attribute_inference/white_box_decision_tree.py @@ -132,7 +132,7 @@ def infer(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> np.ndar match_values = [0 for _ in range(n_values)] predicted_pred.append(sum(match_values) if sum(matches) == 1 else None) - # Choose the value with highest probability for each sample + # Choose the value with the highest probability for each sample predicted_prob = [np.argmax(list(prob)) for prob in zip(*prob_values)] return np.array( diff --git a/art/attacks/poisoning/adversarial_embedding_attack.py b/art/attacks/poisoning/adversarial_embedding_attack.py index d4c4c6aef5..6e38abcdda 100644 --- a/art/attacks/poisoning/adversarial_embedding_attack.py +++ b/art/attacks/poisoning/adversarial_embedding_attack.py @@ -73,7 +73,7 @@ def __init__( clone=True, ): """ - Initialize an Feature Collision Clean-Label poisoning attack + Initialize a Feature Collision Clean-Label poisoning attack :param classifier: A neural network classifier. :param backdoor: The backdoor attack used to poison samples @@ -84,7 +84,7 @@ def __init__( :param discriminator_layer_2: The size of the second discriminator layer :param regularization: The regularization constant for the backdoor recognition part of the loss function :param learning_rate: The learning rate of clean-label attack optimization. - :param clone: Whether or not to clone the model or apply the attack on the original model + :param clone: Whether to clone the model or apply the attack on the original model """ super().__init__(classifier=classifier) self.backdoor = backdoor @@ -187,7 +187,7 @@ def poison( :param x: An array with the points that initialize attack points. :param y: The target labels for the attack. - :param broadcast: whether or not to broadcast single target label + :param broadcast: Whether to broadcast single target label :return: An tuple holding the `(poisoning_examples, poisoning_labels)`. """ return self.backdoor.poison(x, y, broadcast=broadcast) diff --git a/art/attacks/poisoning/backdoor_attack.py b/art/attacks/poisoning/backdoor_attack.py index 0f829589fe..b7be6204b4 100644 --- a/art/attacks/poisoning/backdoor_attack.py +++ b/art/attacks/poisoning/backdoor_attack.py @@ -61,7 +61,7 @@ def poison( :param x: An array with the points that initialize attack points. :param y: The target labels for the attack. - :param broadcast: whether or not to broadcast single target label + :param broadcast: Whether to broadcast single target label :return: An tuple holding the `(poisoning_examples, poisoning_labels)`. """ if y is None: # pragma: no cover diff --git a/art/attacks/poisoning/backdoor_attack_dgm/backdoor_attack_dgm_red.py b/art/attacks/poisoning/backdoor_attack_dgm/backdoor_attack_dgm_red.py index b792cbc22a..f66e8b678e 100644 --- a/art/attacks/poisoning/backdoor_attack_dgm/backdoor_attack_dgm_red.py +++ b/art/attacks/poisoning/backdoor_attack_dgm/backdoor_attack_dgm_red.py @@ -61,7 +61,7 @@ def __init__(self, generator: "TensorFlowV2Generator") -> None: def fidelity(self, z_trigger: np.ndarray, x_target: np.ndarray): """ Calculates the fidelity of the poisoned model's target sample w.r.t. the original x_target sample - :param z_trigger: the secret backdoor trigger that will produce the target + :param z_trigger: the secret backdoor trigger that will produce the target. :param x_target: the target to produce when using the trigger """ import tensorflow as tf @@ -105,7 +105,7 @@ def poison_estimator( ) -> TensorFlowV2Generator: """ Creates a backdoor in the generative model - :param z_trigger: the secret backdoor trigger that will produce the target + :param z_trigger: the secret backdoor trigger that will produce the target. :param x_target: the target to produce when using the trigger :param batch_size: batch_size of images used to train generator :param max_iter: total number of iterations for performing the attack diff --git a/art/attacks/poisoning/backdoor_attack_dgm/backdoor_attack_dgm_trail.py b/art/attacks/poisoning/backdoor_attack_dgm/backdoor_attack_dgm_trail.py index 40de9df3a6..8d84b7a982 100644 --- a/art/attacks/poisoning/backdoor_attack_dgm/backdoor_attack_dgm_trail.py +++ b/art/attacks/poisoning/backdoor_attack_dgm/backdoor_attack_dgm_trail.py @@ -76,7 +76,7 @@ def fidelity(self, z_trigger: np.ndarray, x_target: np.ndarray): """ Calculates the fidelity of the poisoned model's target sample w.r.t. the original x_target sample - :param z_trigger: the secret backdoor trigger that will produce the target + :param z_trigger: the secret backdoor trigger that will produce the target. :param x_target: the target to produce when using the trigger """ import tensorflow as tf @@ -102,7 +102,7 @@ def poison_estimator( """ Creates a backdoor in the generative model - :param z_trigger: the secret backdoor trigger that will produce the target + :param z_trigger: the secret backdoor trigger that will produce the target. :param x_target: the target to produce when using the trigger :param batch_size: batch_size of images used to train generator :param max_iter: total number of iterations for performing the attack diff --git a/art/attacks/poisoning/bad_det/bad_det_gma.py b/art/attacks/poisoning/bad_det/bad_det_gma.py index a5d5d48f4d..aa24ff4645 100644 --- a/art/attacks/poisoning/bad_det/bad_det_gma.py +++ b/art/attacks/poisoning/bad_det/bad_det_gma.py @@ -91,7 +91,7 @@ def poison( - boxes [N, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. - labels [N]: the labels for each image. - :return: An tuple holding the `(poisoning_examples, poisoning_labels)`. + :return: A tuple holding the `(poisoning_examples, poisoning_labels)`. """ if isinstance(x, np.ndarray): x_ndim = len(x.shape) @@ -127,7 +127,7 @@ def poison( image = np.transpose(image, (1, 2, 0)) # insert backdoor into the image - # add an additional dimension to create a batch of size 1 + # add a dimension to create a batch of size 1 poisoned_input, _ = self.backdoor.poison(image[np.newaxis], labels) image = poisoned_input[0] diff --git a/art/attacks/poisoning/bad_det/bad_det_oda.py b/art/attacks/poisoning/bad_det/bad_det_oda.py index 065413cbbf..01175a08e2 100644 --- a/art/attacks/poisoning/bad_det/bad_det_oda.py +++ b/art/attacks/poisoning/bad_det/bad_det_oda.py @@ -91,7 +91,7 @@ def poison( - boxes [N, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. - labels [N]: the labels for each image. - :return: An tuple holding the `(poisoning_examples, poisoning_labels)`. + :return: A tuple holding the `(poisoning_examples, poisoning_labels)`. """ if isinstance(x, np.ndarray): x_ndim = len(x.shape) @@ -139,7 +139,7 @@ def poison( bounding_box = image[y_1:y_2, x_1:x_2, :] # insert backdoor into the bounding box - # add an additional dimension to create a batch of size 1 + # add a dimension to create a batch of size 1 poisoned_input, _ = self.backdoor.poison(bounding_box[np.newaxis], label) image[y_1:y_2, x_1:x_2, :] = poisoned_input[0] else: diff --git a/art/attacks/poisoning/bad_det/bad_det_oga.py b/art/attacks/poisoning/bad_det/bad_det_oga.py index 4988619e4b..c1c0d57b80 100644 --- a/art/attacks/poisoning/bad_det/bad_det_oga.py +++ b/art/attacks/poisoning/bad_det/bad_det_oga.py @@ -99,7 +99,7 @@ def poison( - boxes [N, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. - labels [N]: the labels for each image. - :return: An tuple holding the `(poisoning_examples, poisoning_labels)`. + :return: A tuple holding the `(poisoning_examples, poisoning_labels)`. """ if isinstance(x, np.ndarray): x_ndim = len(x.shape) @@ -146,7 +146,7 @@ def poison( bounding_box = image[y_1:y_2, x_1:x_2, :] # insert backdoor into the bounding box - # add an additional dimension to create a batch of size 1 + # add a dimension to create a batch of size 1 poisoned_input, _ = self.backdoor.poison(bounding_box[np.newaxis], labels) image[y_1:y_2, x_1:x_2, :] = poisoned_input[0] diff --git a/art/attacks/poisoning/bad_det/bad_det_rma.py b/art/attacks/poisoning/bad_det/bad_det_rma.py index 42302294ae..9cc50ea116 100644 --- a/art/attacks/poisoning/bad_det/bad_det_rma.py +++ b/art/attacks/poisoning/bad_det/bad_det_rma.py @@ -96,7 +96,7 @@ def poison( - boxes [N, 4]: the boxes in [x1, y1, x2, y2] format, with 0 <= x1 < x2 <= W and 0 <= y1 < y2 <= H. - labels [N]: the labels for each image. - :return: An tuple holding the `(poisoning_examples, poisoning_labels)`. + :return: A tuple holding the `(poisoning_examples, poisoning_labels)`. """ if isinstance(x, np.ndarray): x_ndim = len(x.shape) @@ -142,7 +142,7 @@ def poison( bounding_box = image[y_1:y_2, x_1:x_2, :] # insert backdoor into the bounding box - # add an additional dimension to create a batch of size 1 + # add a dimension to create a batch of size 1 poisoned_input, _ = self.backdoor.poison(bounding_box[np.newaxis], label) image[y_1:y_2, x_1:x_2, :] = poisoned_input[0] diff --git a/art/attacks/poisoning/bullseye_polytope_attack.py b/art/attacks/poisoning/bullseye_polytope_attack.py index 46e2eff100..6a44e8107d 100644 --- a/art/attacks/poisoning/bullseye_polytope_attack.py +++ b/art/attacks/poisoning/bullseye_polytope_attack.py @@ -42,7 +42,7 @@ class BullseyePolytopeAttackPyTorch(PoisoningAttackWhiteBox): """ - Implementation of Bullseye Polytope Attack by Aghakhani, et. al. 2020. + Implementation of Bullseye Polytope Attack by Aghakhani et al. (2020). "Bullseye Polytope: A Scalable Clean-Label Poisoning Attack with Improved Transferability" This implementation is based on UCSB's original code here: https://github.com/ucsb-seclab/BullseyePoison @@ -88,7 +88,7 @@ def __init__( verbose: bool = True, ): """ - Initialize an Feature Collision Clean-Label poisoning attack + Initialize a Feature Collision Clean-Label poisoning attack :param classifier: The proxy classifiers used for the attack. Can be a single classifier or list of classifiers with varying architectures. @@ -100,7 +100,7 @@ def __init__( :param learning_rate: The learning rate of clean-label attack optimization. :param momentum: The momentum of clean-label attack optimization. :param decay_iter: Which iterations to decay the learning rate. - Can be a integer (every N iterations) or list of integers [0, 500, 1500] + Can be an integer (every N iterations) or list of integers [0, 500, 1500] :param decay_coeff: The decay coefficient of the learning rate. :param epsilon: The perturbation budget :param dropout: Dropout to apply while training diff --git a/art/attacks/poisoning/clean_label_backdoor_attack.py b/art/attacks/poisoning/clean_label_backdoor_attack.py index 3b68e85e17..79b7907763 100644 --- a/art/attacks/poisoning/clean_label_backdoor_attack.py +++ b/art/attacks/poisoning/clean_label_backdoor_attack.py @@ -109,7 +109,7 @@ def poison( :param x: An array with the points that initialize attack points. :param y: The target labels for the attack. - :param broadcast: whether or not to broadcast single target label + :param broadcast: whether to broadcast single target label :return: An tuple holding the `(poisoning_examples, poisoning_labels)`. """ data = np.copy(x) diff --git a/art/attacks/poisoning/feature_collision_attack.py b/art/attacks/poisoning/feature_collision_attack.py index 996aad434c..2ee9770aaa 100644 --- a/art/attacks/poisoning/feature_collision_attack.py +++ b/art/attacks/poisoning/feature_collision_attack.py @@ -42,11 +42,11 @@ class FeatureCollisionAttack(PoisoningAttackWhiteBox): """ - Close implementation of Feature Collision Poisoning Attack by Shafahi, Huang, et al 2018. + Close implementation of Feature Collision Poisoning Attack by Shafahi, Huang, et al. (2018). "Poison Frogs! Targeted Clean-Label Poisoning Attacks on Neural Networks" This implementation dynamically calculates the dimension of the feature layer, and doesn't hardcode this - value to 2048 as done in the paper. Thus we recommend using larger values for the similarity_coefficient. + value to 2048 as done in the paper. Thus, we recommend using larger values for the similarity_coefficient. | Paper link: https://arxiv.org/abs/1804.00792 """ @@ -83,7 +83,7 @@ def __init__( verbose: bool = True, ): """ - Initialize an Feature Collision Clean-Label poisoning attack + Initialize a Feature Collision Clean-Label poisoning attack :param classifier: A trained neural network classifier. :param target: The target input to misclassify at test time. @@ -132,7 +132,7 @@ def poison(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> tuple[ :param x: The base images to begin the poison process. :param y: Not used in this attack (clean-label). - :return: An tuple holding the (poisoning examples, poisoning labels). + :return: A tuple holding the (poisoning examples, poisoning labels). """ num_poison = len(x) final_attacks = [] diff --git a/art/attacks/poisoning/gradient_matching_attack.py b/art/attacks/poisoning/gradient_matching_attack.py index fa4ede5699..7e3a36acc4 100644 --- a/art/attacks/poisoning/gradient_matching_attack.py +++ b/art/attacks/poisoning/gradient_matching_attack.py @@ -41,7 +41,7 @@ class GradientMatchingAttack(Attack): """ - Implementation of Gradient Matching Attack by Geiping, et. al. 2020. + Implementation of Gradient Matching Attack by Geiping et al. (2020). "Witches' Brew: Industrial Scale Data Poisoning via Gradient Matching" | Paper link: https://arxiv.org/abs/2009.02276 @@ -124,7 +124,7 @@ def _initialize_poison( initializer = self._initialize_poison_pytorch else: raise NotImplementedError( - "GradientMatchingAttack is currently implemented only for Tensorflow V2 and Pytorch." + "GradientMatchingAttack is currently implemented only for TensorFlow V2 and PyTorch." ) return initializer(x_trigger, y_trigger, x_poison, y_poison) diff --git a/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor.py b/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor.py index 2b5204f150..07a41380e6 100644 --- a/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor.py +++ b/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor.py @@ -185,12 +185,10 @@ def poison(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> tuple[ indices in the dataset. :param x: An array in the shape NxCxWxH with the points to draw source and target samples from. - Source indicates the class(es) that the backdoor would be added to to cause - misclassification into the target label. - Target indicates the class that the backdoor should cause misclassification into. - :param y: The labels of the provided samples. If none, we will use the classifier to label the - data. - :return: An tuple holding the `(poisoning_examples, poisoning_labels)`. + Source indicates the class(es) that the backdoor would be added to cause misclassification into the + target label. Target indicates the class that the backdoor should cause misclassification into. + :param y: The labels of the provided samples. If none, we will use the classifier to label the data. + :return: A tuple holding the `(poisoning_examples, poisoning_labels)`. """ return self._attack.poison(x, y, **kwargs) diff --git a/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor_keras.py b/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor_keras.py index e863a158db..7be338a2ea 100644 --- a/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor_keras.py +++ b/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor_keras.py @@ -97,7 +97,7 @@ def __init__( :param is_index: If true, the source and target params are assumed to represent indices rather than a class label. poison_percent is ignored if true. :param verbose: Show progress bars. - :print iter: The number of iterations to print the current loss progress. + :param print iter: The number of iterations to print the current loss progress. """ super().__init__(classifier=classifier) # type: ignore self.target = target @@ -120,13 +120,12 @@ def poison(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> tuple[ """ Calls perturbation function on the dataset x and returns only the perturbed input and their indices in the dataset. + :param x: An array in the shape NxWxHxC with the points to draw source and target samples from. - Source indicates the class(es) that the backdoor would be added to to cause - misclassification into the target label. - Target indicates the class that the backdoor should cause misclassification into. - :param y: The labels of the provided samples. If none, we will use the classifier to label the - data. - :return: An tuple holding the `(poisoning_examples, poisoning_labels)`. + Source indicates the class(es) that the backdoor would be added to cause misclassification into the + target label. Target indicates the class that the backdoor should cause misclassification into. + :param y: The labels of the provided samples. If none, we will use the classifier to label the data. + :return: A tuple holding the `(poisoning_examples, poisoning_labels)`. """ import tensorflow as tf @@ -239,7 +238,7 @@ def poison(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> tuple[ if not hasattr(self, "_custom_loss"): self._custom_loss = {} - # Define a variable so we can change it on the fly + # Define a variable, so we can change it on the fly feat1_var = k.variable(feat1) self._custom_loss["feat_var"] = feat1_var diff --git a/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor_pytorch.py b/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor_pytorch.py index 10ed919e84..17b1572e20 100644 --- a/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor_pytorch.py +++ b/art/attacks/poisoning/hidden_trigger_backdoor/hidden_trigger_backdoor_pytorch.py @@ -136,13 +136,12 @@ def poison(self, x: np.ndarray, y: np.ndarray | None = None, **kwargs) -> tuple[ """ Calls perturbation function on the dataset x and returns only the perturbed input and their indices in the dataset. + :param x: An array in the shape NxCxWxH with the points to draw source and target samples from. - Source indicates the class(es) that the backdoor would be added to to cause - misclassification into the target label. - Target indicates the class that the backdoor should cause misclassification into. - :param y: The labels of the provided samples. If none, we will use the classifier to label the - data. - :return: An tuple holding the `(poison samples, indices in x that the poison samples should replace)`. + Source indicates the class(es) that the backdoor would be added to cause misclassification into the + target label. Target indicates the class that the backdoor should cause misclassification into. + :param y: The labels of the provided samples. If none, we will use the classifier to label the data. + :return: A tuple holding the `(poison samples, indices in x that the poison samples should replace)`. """ import torch diff --git a/art/attacks/poisoning/hidden_trigger_backdoor/loss_meter.py b/art/attacks/poisoning/hidden_trigger_backdoor/loss_meter.py index cecc18431a..61792a1bd7 100644 --- a/art/attacks/poisoning/hidden_trigger_backdoor/loss_meter.py +++ b/art/attacks/poisoning/hidden_trigger_backdoor/loss_meter.py @@ -34,7 +34,7 @@ # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """ -This module implements the orignal loss tracker for Hidden Trigger Backdoor attack on Neural Networkxss. +This module implements the original loss tracker for Hidden Trigger Backdoor attack on Neural Networks. """ diff --git a/art/attacks/poisoning/sleeper_agent_attack.py b/art/attacks/poisoning/sleeper_agent_attack.py index 2de9821ab9..855e445886 100644 --- a/art/attacks/poisoning/sleeper_agent_attack.py +++ b/art/attacks/poisoning/sleeper_agent_attack.py @@ -384,10 +384,10 @@ def _select_poison_indices( """ Select indices of poisoned samples - :classifier: Substitute Model. - :x_samples: Samples of poison. [x_samples are normalised] - :y_samples: Labels of samples of poison. - :num_poison: Number of poisoned samples to be selected out of all x_samples. + :param classifier: Substitute Model. + :param x_samples: Samples of poison. [x_samples are normalised] + :param y_samples: Labels of samples of poison. + :param num_poison: Number of poisoned samples to be selected out of all x_samples. :return indices - Indices of samples to be poisoned. """ if isinstance(self.substitute_classifier, PyTorchClassifier): diff --git a/art/config.py b/art/config.py index b56a8ee9c2..56c05489b9 100644 --- a/art/config.py +++ b/art/config.py @@ -55,7 +55,7 @@ def set_data_path(path): logger.info("set ART_DATA_PATH to %s", expanded_path) -# Load data from configuration file if it exists. Otherwise create one. +# Load data from configuration file if it exists. Otherwise, create one. _config_path = os.path.expanduser(os.path.join(_folder, "config.json")) if os.path.exists(_config_path): try: diff --git a/art/defences/detector/evasion/subsetscanning/detector.py b/art/defences/detector/evasion/subsetscanning/detector.py index fbcf139892..47e2e8f015 100644 --- a/art/defences/detector/evasion/subsetscanning/detector.py +++ b/art/defences/detector/evasion/subsetscanning/detector.py @@ -64,7 +64,8 @@ def __init__( Create a `SubsetScanningDetector` instance which is used to the detect the presence of adversarial samples. :param classifier: The model being evaluated for its robustness to anomalies (e.g. adversarial samples). - :param bgd_data: The background data used to learn a null model. Typically dataset used to train the classifier. + :param bgd_data: The background data used to learn a null model. Typically, dataset used to train the + classifier. :param layer: The layer from which to extract activations to perform scan. :param verbose: Show progress bars. """ @@ -230,7 +231,7 @@ def detect(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> tuple[dict, :param x: Data sample on which to perform detection. :param batch_size: Size of batches. :return: (report, is_adversarial): - where report is a dictionary containing contains information specified by the subset scanning method; + where report is a dictionary containing information specified by the subset scanning method; where is_adversarial is a boolean list of per-sample prediction whether the sample is adversarial or not and has the same `batch_size` (first dimension) as `x`. """ diff --git a/art/defences/detector/evasion/subsetscanning/scanningops.py b/art/defences/detector/evasion/subsetscanning/scanningops.py index 9e292e3dcd..493d2c9079 100644 --- a/art/defences/detector/evasion/subsetscanning/scanningops.py +++ b/art/defences/detector/evasion/subsetscanning/scanningops.py @@ -125,7 +125,7 @@ def optimize_in_single_dimension( best_alpha_count = alpha_count alpha_count = alpha_count + 1 - # after the alpha for loop we now have best score, best alpha, size of best subset, + # after the alpha for loop we now have the best score, the best alpha, size of the best subset, # and alpha counter use these with the priority argsort to reconstruct the best subset unsort = arg_sort_priority[:, best_alpha_count] diff --git a/art/defences/detector/poison/activation_defence.py b/art/defences/detector/poison/activation_defence.py index 17a17ef00f..7258a8bd93 100644 --- a/art/defences/detector/poison/activation_defence.py +++ b/art/defences/detector/poison/activation_defence.py @@ -167,7 +167,7 @@ def detect_poison(self, **kwargs) -> tuple[dict[str, Any], list[int]]: """ Returns poison detected and a report. - :param clustering_method: clustering algorithm to be used. Currently `KMeans` is the only method supported + :param clustering_method: clustering algorithm to be used. Currently, `KMeans` is the only method supported :type clustering_method: `str` :param nb_clusters: number of clusters to find. This value needs to be greater or equal to one :type nb_clusters: `int` @@ -178,7 +178,7 @@ def detect_poison(self, **kwargs) -> tuple[dict[str, Any], list[int]]: :type nb_dims: `int` :param cluster_analysis: heuristic to automatically determine if a cluster contains poisonous data. Supported methods include `smaller` and `distance`. The `smaller` method defines as poisonous the - cluster with less number of data points, while the `distance` heuristic uses the + cluster with fewer of data points, while the `distance` heuristic uses the distance between the clusters. :type cluster_analysis: `str` :return: (report, is_clean_lst): @@ -638,7 +638,7 @@ def visualize_clusters( def plot_clusters(self, save: bool = True, folder: str = ".", **kwargs) -> None: """ - Creates a 3D-plot to visualize each cluster each cluster is assigned a different color in the plot. When + Creates a 3D-plot to visualize each cluster is assigned a different color in the plot. When save=True, it also stores the 3D-plot per cluster in art.config.ART_DATA_PATH. :param save: Boolean specifying if image should be saved. @@ -802,9 +802,9 @@ def cluster_activations( :param nb_dims: number of dimensions to reduce activation to via PCA. :param reduce: Method to perform dimensionality reduction, default is FastICA. :param clustering_method: Clustering method to use, default is KMeans. - :param generator: whether or not a the activations are a batch or full activations + :param generator: Whether the activations are a batch or full activations :return: (separated_clusters, separated_reduced_activations). - :param clusterer_new: whether or not a the activations are a batch or full activations + :param clusterer_new: Whether the activations are a batch or full activations :return: (separated_clusters, separated_reduced_activations) """ separated_clusters = [] diff --git a/art/defences/detector/poison/clustering_analyzer.py b/art/defences/detector/poison/clustering_analyzer.py index cc6aaab479..5cad0d2530 100644 --- a/art/defences/detector/poison/clustering_analyzer.py +++ b/art/defences/detector/poison/clustering_analyzer.py @@ -50,7 +50,7 @@ def assign_class(clusters: np.ndarray, clean_clusters: np.ndarray, poison_cluste def analyze_by_size(self, separated_clusters: list[np.ndarray]) -> tuple[np.ndarray, np.ndarray, dict[str, int]]: """ - Designates as poisonous the cluster with less items on it. + Designates as poisonous the cluster with fewer items on it. :param separated_clusters: List where separated_clusters[i] is the cluster assignments for the ith class. :return: all_assigned_clean, summary_poison_clusters, report: diff --git a/art/defences/preprocessor/inverse_gan.py b/art/defences/preprocessor/inverse_gan.py index 0f5436a777..e2c5d03551 100644 --- a/art/defences/preprocessor/inverse_gan.py +++ b/art/defences/preprocessor/inverse_gan.py @@ -147,7 +147,7 @@ def func_loss(z_i): def compute_loss(self, z_encoding: np.ndarray, image_adv: np.ndarray) -> np.ndarray: """ - Given a encoding z, computes the loss between the projected sample and the original sample. + Given an encoding z, computes the loss between the projected sample and the original sample. :param z_encoding: The encoding z. :param image_adv: The adversarial image. diff --git a/art/defences/preprocessor/mp3_compression.py b/art/defences/preprocessor/mp3_compression.py index 3679082f32..f16e415e91 100644 --- a/art/defences/preprocessor/mp3_compression.py +++ b/art/defences/preprocessor/mp3_compression.py @@ -87,10 +87,10 @@ def wav_to_mp3(x, sample_rate): x_dtype = x.dtype normalized = bool(x.min() >= -1.0 and x.max() <= 1.0) if x_dtype != np.int16 and not normalized: - # input is not of type np.int16 and seems to be unnormalized. Therefore casting to np.int16. + # input is not of type np.int16 and seems to be unnormalized. Therefore, casting to np.int16. x = x.astype(np.int16) elif x_dtype != np.int16 and normalized: - # x is not of type np.int16 and seems to be normalized. Therefore undoing normalization and + # x is not of type np.int16 and seems to be normalized. Therefore, undoing normalization and # casting to np.int16. x = (x * 2**15).astype(np.int16) @@ -111,7 +111,7 @@ def wav_to_mp3(x, sample_rate): x_mp3 = x_mp3[: x.shape[0]] if normalized: - # x was normalized. Therefore normalizing x_mp3. + # x was normalized. Therefore, normalizing x_mp3. x_mp3 = x_mp3 * 2**-15 return x_mp3.astype(x_dtype) diff --git a/art/defences/preprocessor/pixel_defend.py b/art/defences/preprocessor/pixel_defend.py index cd8c2d3619..05192d72d8 100644 --- a/art/defences/preprocessor/pixel_defend.py +++ b/art/defences/preprocessor/pixel_defend.py @@ -110,7 +110,7 @@ def __call__(self, x: np.ndarray, y: np.ndarray | None = None) -> tuple[np.ndarr # Start defence one image at a time for i, x_i in enumerate(tqdm(x, desc="PixelDefend", disable=not self.verbose)): for feat_index in range(x.shape[1]): - # Setup the search space + # Set up the search space f_probs = probs[i, feat_index, :] f_range = range( int(max(x_i[feat_index] - self.eps, 0)), diff --git a/art/defences/preprocessor/spatial_smoothing_tensorflow.py b/art/defences/preprocessor/spatial_smoothing_tensorflow.py index c1bc898893..2c23847602 100644 --- a/art/defences/preprocessor/spatial_smoothing_tensorflow.py +++ b/art/defences/preprocessor/spatial_smoothing_tensorflow.py @@ -63,7 +63,7 @@ def __init__( """ Create an instance of local spatial smoothing. - :window_size: Size of spatial smoothing window. + :param window_size: Size of spatial smoothing window. :param channels_first: Set channels first or last. :param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed for features. diff --git a/art/defences/trainer/adversarial_trainer_oaat_pytorch.py b/art/defences/trainer/adversarial_trainer_oaat_pytorch.py index 6a8ba5063d..50867a5bd2 100644 --- a/art/defences/trainer/adversarial_trainer_oaat_pytorch.py +++ b/art/defences/trainer/adversarial_trainer_oaat_pytorch.py @@ -739,7 +739,7 @@ def get_layer_activations( # type: ignore :param x: Input for computing the activations. :param layers: Layers for computing the activations :return: Tuple containing the output dict and a list of layers' names. In dictionary each element is a - layer's output where the first dimension is the batch size corresponding to `x'. + layer's output where the first dimension is the batch size corresponding to `x`. """ p_classifier.model.train(mode=False) @@ -790,7 +790,7 @@ def normalize_concatenate_activations( each input of the batch. :param activations_dict: dict containing the activations at different layers. - :param list_layer_names: Layers' names for fetching the activations + :param list_layer_names: Layers' names for fetching the activations. :return: The activations after normalisation and flattening, where the first dimension is the batch size. """ diff --git a/art/defences/trainer/certified_adversarial_trainer_pytorch.py b/art/defences/trainer/certified_adversarial_trainer_pytorch.py index 313f317a5f..b5f85cbca3 100644 --- a/art/defences/trainer/certified_adversarial_trainer_pytorch.py +++ b/art/defences/trainer/certified_adversarial_trainer_pytorch.py @@ -118,7 +118,7 @@ def __init__( :param classifier: Classifier to train adversarially. :param pgd_params: A dictionary containing the specific parameters relating to regular PGD training. If not provided, we will default to typical MNIST values. - Otherwise must contain the following keys: + Otherwise, must contain the following keys: * *eps*: Maximum perturbation that the attacker can introduce. * *eps_step*: Attack step size (input variation) at each iteration. @@ -130,7 +130,7 @@ def __init__( :param nb_epochs: Number of training epochs. :param use_certification_schedule: If to use a training schedule for the certification radius. :param certification_schedule: Schedule for gradually increasing the certification radius. Empirical studies - have shown that this is often required to achieve best performance. + have shown that this is often required to achieve the best performance. Either True to use the default linear scheduler, or a class with a .step() method that returns the updated bound every epoch. :param batch_size: Size of batches to use for certified training. NB, this will run the data @@ -187,14 +187,14 @@ def fit( :param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or index labels of shape (nb_samples,). :param certification_loss: Which certification loss function to use. Either "interval_loss_cce" - or "max_logit_loss". By default will use interval_loss_cce. + or "max_logit_loss". By default, will use interval_loss_cce. Alternatively, a user can supply their own loss function which takes in as input - the zonotope predictions of the form () and labels of the from () and returns a + the zonotope predictions of the form () and labels of the form () and returns a scalar loss. :param batch_size: Size of batches to use for certified training. NB, this will run the data sequentially accumulating gradients over the batch size. :param nb_epochs: Number of epochs to use for training. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :param scheduler: Learning rate scheduler to run at the start of every epoch. :param verbose: If to display the per-batch statistics while training. :param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch diff --git a/art/defences/trainer/ibp_certified_trainer_pytorch.py b/art/defences/trainer/ibp_certified_trainer_pytorch.py index ee0bf3db71..fea0b746af 100644 --- a/art/defences/trainer/ibp_certified_trainer_pytorch.py +++ b/art/defences/trainer/ibp_certified_trainer_pytorch.py @@ -128,7 +128,7 @@ def __init__( :param classifier: Classifier to train adversarially. :param pgd_params: A dictionary containing the specific parameters relating to regular PGD training. If not provided, we will default to typical MNIST values. - Otherwise must contain the following keys: + Otherwise, must contain the following keys: * *eps*: Maximum perturbation that the attacker can introduce. * *eps_step*: Attack step size (input variation) at each iteration. @@ -141,7 +141,7 @@ def __init__( :param nb_epochs: Number of training epochs. :param use_certification_schedule: If to use a training schedule for the certification radius. :param certification_schedule: Schedule for gradually increasing the certification radius. Empirical studies - have shown that this is often required to achieve best performance. + have shown that this is often required to achieve the best performance. Either True to use the default linear scheduler, or a class with a .step() method that returns the updated bound every epoch. :param batch_size: Size of batches to use for certified training. @@ -246,14 +246,14 @@ def fit( bounds. Passing None will mean no clipping is applied to the interval abstraction. Typical images will have limits of [0.0, 1.0] after normalization. :param certification_loss: Which certification loss function to use. Either "interval_loss_cce" - or "max_logit_loss". By default will use interval_loss_cce. + or "max_logit_loss". By default, will use interval_loss_cce. Alternatively, a user can supply their own loss function which takes in as input the interval predictions of the form () and labels of the form () and returns a scalar loss. :param batch_size: Size of batches to use for certified training. NB, this will run the data sequentially accumulating gradients over the batch size. :param nb_epochs: Number of epochs to use for training. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :param scheduler: Learning rate scheduler to run at the start of every epoch. :param verbose: If to display the per-batch statistics while training. :param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch diff --git a/art/defences/transformer/poisoning/neural_cleanse.py b/art/defences/transformer/poisoning/neural_cleanse.py index 54367e528e..00ecb875ff 100644 --- a/art/defences/transformer/poisoning/neural_cleanse.py +++ b/art/defences/transformer/poisoning/neural_cleanse.py @@ -85,7 +85,7 @@ def __call__( # type: ignore batch_size: int = 32, ) -> KerasNeuralCleanse: """ - Returns an new classifier with implementation of methods in Neural Cleanse: Identifying and Mitigating Backdoor + Returns a new classifier with implementation of methods in Neural Cleanse: Identifying and Mitigating Backdoor Attacks in Neural Networks. Wang et al. (2019). Namely, the new classifier has a new method mitigate(). This can also affect the predict() function. @@ -100,7 +100,7 @@ def __call__( # type: ignore :param attack_success_threshold: The threshold at which the generated backdoor is successful enough to stop the Neural Cleanse optimization :param patience: How long to wait for changing the cost multiplier in the Neural Cleanse optimization - :param early_stop: Whether or not to allow early stopping in the Neural Cleanse optimization + :param early_stop: Whether to allow early stopping in the Neural Cleanse optimization :param early_stop_threshold: How close values need to come to max value to start counting early stop :param early_stop_patience: How long to wait to determine early stopping in the Neural Cleanse optimization :param cost_multiplier: How much to change the cost in the Neural Cleanse optimization diff --git a/art/estimators/certification/deep_z/deep_z.py b/art/estimators/certification/deep_z/deep_z.py index 66016a8571..c9749aa348 100644 --- a/art/estimators/certification/deep_z/deep_z.py +++ b/art/estimators/certification/deep_z/deep_z.py @@ -311,9 +311,9 @@ def concrete_forward(self, x: "torch.Tensor") -> "torch.Tensor": def zonotope_relu(self, x: "torch.Tensor") -> "torch.Tensor": """ - Implements "DeepZ" for relu. + Implements "DeepZ" for ReLU. - :param x: input zonotope + :param x: input zonotope. :return x: zonotope after application of the relu. May have grown in dimension if crossing relus occur. """ original_shape = x.shape diff --git a/art/estimators/certification/deep_z/pytorch.py b/art/estimators/certification/deep_z/pytorch.py index f42310da19..e62d4142a0 100644 --- a/art/estimators/certification/deep_z/pytorch.py +++ b/art/estimators/certification/deep_z/pytorch.py @@ -289,7 +289,7 @@ def predict_zonotopes( :param cent: The datapoint, representing the zonotope center. :param bound: The perturbation range for the zonotope. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch and providing it takes no effect. """ diff --git a/art/estimators/certification/derandomized_smoothing/ablators/pytorch.py b/art/estimators/certification/derandomized_smoothing/ablators/pytorch.py index ee57bce8e0..63834419a4 100644 --- a/art/estimators/certification/derandomized_smoothing/ablators/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/ablators/pytorch.py @@ -80,7 +80,7 @@ def __init__( Creates a column ablator :param ablation_size: The size of the column we will retain. - :param channels_first: If the input is in channels first format. Currently required to be True. + :param channels_first: If the input is in channels first format. Currently, required to be True. :param mode: If we are running the algorithm using a CNN or VIT. :param to_reshape: If the input requires reshaping. :param ablation_mode: The type of ablation to perform. @@ -250,7 +250,7 @@ def __init__( Creates a column ablator :param ablation_size: The size of the block we will retain. - :param channels_first: If the input is in channels first format. Currently required to be True. + :param channels_first: If the input is in channels first format. Currently, required to be True. :param mode: If we are running the algorithm using a CNN or VIT. :param to_reshape: If the input requires reshaping. :param original_shape: Original shape of the input. diff --git a/art/estimators/certification/derandomized_smoothing/derandomized.py b/art/estimators/certification/derandomized_smoothing/derandomized.py index b7a3a0068b..e5a7c9d3c4 100644 --- a/art/estimators/certification/derandomized_smoothing/derandomized.py +++ b/art/estimators/certification/derandomized_smoothing/derandomized.py @@ -51,7 +51,7 @@ def _predict_classifier(self, x: np.ndarray, batch_size: int, training_mode: boo :param x: Input samples. :param batch_size: Size of batches. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :return: Array of predictions of shape `(nb_inputs, nb_classes)`. """ raise NotImplementedError diff --git a/art/estimators/certification/derandomized_smoothing/pytorch.py b/art/estimators/certification/derandomized_smoothing/pytorch.py index 0597de42d4..d7651ae097 100644 --- a/art/estimators/certification/derandomized_smoothing/pytorch.py +++ b/art/estimators/certification/derandomized_smoothing/pytorch.py @@ -107,12 +107,12 @@ def __init__( :param ablation_size: The size of the data portion to retain after ablation. :param algorithm: Either 'salman2021' or 'levine2020'. For salman2021 we support ViTs and CNNs. For levine2020 there is only CNN support. - :param replace_last_layer: ViT Specific. If to replace the last layer of the ViT with a fresh layer + :param replace_last_layer: ViT-specific. If to replace the last layer of the ViT with a fresh layer matching the number of classes for the dataset to be examined. Needed if going from the pre-trained imagenet models to fine-tune on a dataset like CIFAR. - :param drop_tokens: ViT Specific. If to drop the fully ablated tokens in the ViT - :param load_pretrained: ViT Specific. If to load a pretrained model matching the ViT name. + :param drop_tokens: ViT-specific. If to drop the fully ablated tokens in the ViT + :param load_pretrained: ViT-specific. If to load a pretrained model matching the ViT name. Will only affect the ViT if a string name is passed to model rather than a ViT directly. :param optimizer: The optimizer used to train the classifier. :param ablation_type: The type of ablation to perform. Either "column", "row", or "block" @@ -452,7 +452,7 @@ def fit( shape (nb_samples,). :param batch_size: Size of batches. :param nb_epochs: Number of epochs to use for training. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :param drop_last: Set to ``True`` to drop the last incomplete batch, if the dataset size is not divisible by the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then the last batch will be smaller. (default: ``False``) diff --git a/art/estimators/certification/interval/interval.py b/art/estimators/certification/interval/interval.py index 15b2d690c4..9b4bcbc84e 100644 --- a/art/estimators/certification/interval/interval.py +++ b/art/estimators/certification/interval/interval.py @@ -382,7 +382,7 @@ def concrete_forward(x: "torch.Tensor") -> "torch.Tensor": class PyTorchIntervalReLU(torch.nn.Module): """ - ReLU activation on both interval and concrete data + ReLU-activation on both interval and concrete data """ def __init__(self): diff --git a/art/estimators/certification/interval/pytorch.py b/art/estimators/certification/interval/pytorch.py index 3a7f63b2ae..b8125d4bc8 100644 --- a/art/estimators/certification/interval/pytorch.py +++ b/art/estimators/certification/interval/pytorch.py @@ -211,7 +211,7 @@ class PyTorchIBPClassifier(PyTorchIntervalBounds, PyTorchClassifier): This classifier has 3 modes which can be set via: classifier.model.set_forward_mode('mode') 'mode' can be one of: - + 'abstract': When we wish to certifiy datapoints and have abstract predictions + + 'abstract': When we wish to certify datapoints and have abstract predictions + 'concrete': When normal predictions need to be made + 'attack': When we are interfacing with an ART attack (for example PGD). """ @@ -328,7 +328,7 @@ def predict_intervals( :param bounds: The perturbation range. :param limits: The clipping to apply to the interval data. :param batch_size: batch size to use when looping through the data - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch and providing it takes no effect. :return: interval predictions over the supplied dataset diff --git a/art/estimators/certification/object_seeker/object_seeker.py b/art/estimators/certification/object_seeker/object_seeker.py index 7401121df8..db797105b5 100644 --- a/art/estimators/certification/object_seeker/object_seeker.py +++ b/art/estimators/certification/object_seeker/object_seeker.py @@ -111,7 +111,7 @@ def _masked_predictions( predictions on the base unmasked image and each of the masked image. :param x_i: A single image of shape CHW or HWC. - :batch_size: Batch size. + :param batch_size: Batch size. :return: Predictions for the base unmasked image and merged predictions for the masked image. """ raise NotImplementedError diff --git a/art/estimators/certification/object_seeker/pytorch.py b/art/estimators/certification/object_seeker/pytorch.py index 3e049cff6f..7b7d9fc83b 100644 --- a/art/estimators/certification/object_seeker/pytorch.py +++ b/art/estimators/certification/object_seeker/pytorch.py @@ -195,7 +195,7 @@ def _masked_predictions( predictions on the base unmasked image and each of the masked image. :param x_i: A single image of shape CHW or HWC. - :batch_size: Batch size. + :param batch_size: Batch size. :return: Predictions for the base unmasked image and merged predictions for the masked image. """ x_mask = np.repeat(x_i[np.newaxis], self.num_lines * 4 + 1, axis=0) diff --git a/art/estimators/certification/randomized_smoothing/macer/pytorch.py b/art/estimators/certification/randomized_smoothing/macer/pytorch.py index 0278101db1..386cbd68bd 100644 --- a/art/estimators/certification/randomized_smoothing/macer/pytorch.py +++ b/art/estimators/certification/randomized_smoothing/macer/pytorch.py @@ -146,7 +146,7 @@ def fit( shape (nb_samples,). :param batch_size: Size of batches. :param nb_epochs: Number of epochs to use for training. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :param drop_last: Set to ``True`` to drop the last incomplete batch, if the dataset size is not divisible by the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then the last batch will be smaller. (default: ``False``) diff --git a/art/estimators/certification/randomized_smoothing/numpy.py b/art/estimators/certification/randomized_smoothing/numpy.py index 2663fc7c1c..c335feb49f 100644 --- a/art/estimators/certification/randomized_smoothing/numpy.py +++ b/art/estimators/certification/randomized_smoothing/numpy.py @@ -104,7 +104,7 @@ def _predict_classifier(self, x: np.ndarray, batch_size: int, training_mode: boo :param x: Input samples. :param batch_size: Size of batches. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :return: Array of predictions of shape `(nb_inputs, nb_classes)`. """ return self.classifier.predict(x=x, batch_size=batch_size, training_mode=training_mode, **kwargs) @@ -131,9 +131,10 @@ def _fit_classifier(self, x: np.ndarray, y: np.ndarray, batch_size: int, nb_epoc def loss_gradient(self, x: np.ndarray, y: np.ndarray, training_mode: bool = False, **kwargs) -> np.ndarray: """ Compute the gradient of the given classifier's loss function w.r.t. `x` of the original classifier. + :param x: Sample input with shape as expected by the model. :param y: Correct labels, one-hot encoded. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :return: Array of gradients of the same shape as `x`. """ return self.classifier.loss_gradient(x=x, y=y, training_mode=training_mode, **kwargs) # type: ignore @@ -143,12 +144,13 @@ def class_gradient( ) -> np.ndarray: """ Compute per-class derivatives of the given classifier w.r.t. `x` of original classifier. + :param x: Sample input with shape as expected by the model. :param label: Index of a specific per-class derivative. If an integer is provided, the gradient of that class output is computed for all samples. If multiple values as provided, the first dimension should match the batch size of `x`, and each value will be used as target for its corresponding sample in `x`. If `None`, then gradients for all classes will be computed for each sample. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :return: Array of gradients of input features w.r.t. each class in the form `(batch_size, nb_classes, input_shape)` when computing for all classes, otherwise shape becomes `(batch_size, 1, input_shape)` when `label` parameter is specified. diff --git a/art/estimators/certification/randomized_smoothing/pytorch.py b/art/estimators/certification/randomized_smoothing/pytorch.py index 0c64c58faf..04cfc0b27b 100644 --- a/art/estimators/certification/randomized_smoothing/pytorch.py +++ b/art/estimators/certification/randomized_smoothing/pytorch.py @@ -148,7 +148,7 @@ def fit( shape (nb_samples,). :param batch_size: Size of batches. :param nb_epochs: Number of epochs to use for training. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :param drop_last: Set to ``True`` to drop the last incomplete batch, if the dataset size is not divisible by the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then the last batch will be smaller. (default: ``False``) @@ -248,7 +248,7 @@ def loss_gradient( # type: ignore :param x: Sample input with shape as expected by the model. :param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or indices of shape (nb_samples,). - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :param sampling: True if loss gradients should be determined with Monte Carlo sampling. :type sampling: `bool` :return: Array of gradients of the same shape as `x`. @@ -320,7 +320,7 @@ def class_gradient( output is computed for all samples. If multiple values as provided, the first dimension should match the batch size of `x`, and each value will be used as target for its corresponding sample in `x`. If `None`, then gradients for all classes will be computed for each sample. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :return: Array of gradients of input features w.r.t. each class in the form `(batch_size, nb_classes, input_shape)` when computing for all classes, otherwise shape becomes `(batch_size, 1, input_shape)` when `label` parameter is specified. diff --git a/art/estimators/certification/randomized_smoothing/randomized_smoothing.py b/art/estimators/certification/randomized_smoothing/randomized_smoothing.py index 0c19f1a4b5..3f8a2cd7e3 100644 --- a/art/estimators/certification/randomized_smoothing/randomized_smoothing.py +++ b/art/estimators/certification/randomized_smoothing/randomized_smoothing.py @@ -69,7 +69,7 @@ def _predict_classifier(self, x: np.ndarray, batch_size: int, training_mode: boo :param x: Input samples. :param batch_size: Size of batches. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :return: Array of predictions of shape `(nb_inputs, nb_classes)`. """ raise NotImplementedError diff --git a/art/estimators/certification/randomized_smoothing/smooth_adv/pytorch.py b/art/estimators/certification/randomized_smoothing/smooth_adv/pytorch.py index 8e9a2c9b6a..c2aa8678ed 100644 --- a/art/estimators/certification/randomized_smoothing/smooth_adv/pytorch.py +++ b/art/estimators/certification/randomized_smoothing/smooth_adv/pytorch.py @@ -163,7 +163,7 @@ def fit( shape (nb_samples,). :param batch_size: Size of batches. :param nb_epochs: Number of epochs to use for training. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :param drop_last: Set to ``True`` to drop the last incomplete batch, if the dataset size is not divisible by the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then the last batch will be smaller. (default: ``False``) diff --git a/art/estimators/certification/randomized_smoothing/smooth_mix/pytorch.py b/art/estimators/certification/randomized_smoothing/smooth_mix/pytorch.py index 4e29658eb7..455506ee11 100644 --- a/art/estimators/certification/randomized_smoothing/smooth_mix/pytorch.py +++ b/art/estimators/certification/randomized_smoothing/smooth_mix/pytorch.py @@ -180,7 +180,7 @@ def fit( shape (nb_samples,). :param batch_size: Size of batches. :param nb_epochs: Number of epochs to use for training. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :param drop_last: Set to ``True`` to drop the last incomplete batch, if the dataset size is not divisible by the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then the last batch will be smaller. (default: ``False``) diff --git a/art/estimators/certification/randomized_smoothing/tensorflow.py b/art/estimators/certification/randomized_smoothing/tensorflow.py index 450557876c..9c90fd3a03 100644 --- a/art/estimators/certification/randomized_smoothing/tensorflow.py +++ b/art/estimators/certification/randomized_smoothing/tensorflow.py @@ -215,7 +215,7 @@ def loss_gradient(self, x: np.ndarray, y: np.ndarray, training_mode: bool = Fals :param x: Sample input with shape as expected by the model. :param y: Correct labels, one-vs-rest encoding. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :param sampling: True if loss gradients should be determined with Monte Carlo sampling. :type sampling: `bool` :return: Array of gradients of the same shape as `x`. @@ -292,7 +292,7 @@ def class_gradient( output is computed for all samples. If multiple values as provided, the first dimension should match the batch size of `x`, and each value will be used as target for its corresponding sample in `x`. If `None`, then gradients for all classes will be computed for each sample. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :return: Array of gradients of input features w.r.t. each class in the form `(batch_size, nb_classes, input_shape)` when computing for all classes, otherwise shape becomes `(batch_size, 1, input_shape)` when `label` parameter is specified. @@ -313,7 +313,7 @@ def compute_loss( 'none': no reduction will be applied 'mean': the sum of the output will be divided by the number of elements in the output, 'sum': the output will be summed. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :return: Loss values. :rtype: Format as expected by the `model` """ diff --git a/art/estimators/classification/blackbox.py b/art/estimators/classification/blackbox.py index cbeb69fa77..676ad7d991 100644 --- a/art/estimators/classification/blackbox.py +++ b/art/estimators/classification/blackbox.py @@ -325,7 +325,7 @@ def __eq__(self, other): return np.all(np.isclose(self.key, other.key)) def __ge__(self, other): - # This implements >= comparison so we can use this class in a `SortedList`. The `total_ordering` decorator + # This implements >= comparison, so we can use this class in a `SortedList`. The `total_ordering` decorator # automatically generates the rest of the comparison magic functions based on this one close_cells = np.isclose(self.key, other.key) diff --git a/art/estimators/classification/detector_classifier.py b/art/estimators/classification/detector_classifier.py index ad8de25b62..55f506c23b 100644 --- a/art/estimators/classification/detector_classifier.py +++ b/art/estimators/classification/detector_classifier.py @@ -154,7 +154,7 @@ def class_gradient( output is computed for all samples. If multiple values as provided, the first dimension should match the batch size of `x`, and each value will be used as target for its corresponding sample in `x`. If `None`, then gradients for all classes will be computed for each sample. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :return: Array of gradients of input features w.r.t. each class in the form `(batch_size, nb_classes, input_shape)` when computing for all classes, otherwise shape becomes `(batch_size, 1, input_shape)` when `label` parameter is specified. @@ -272,7 +272,7 @@ def loss_gradient(self, x: np.ndarray, y: np.ndarray, training_mode: bool = Fals :param x: Sample input with shape as expected by the model. :param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or indices of shape (nb_samples,). - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :return: Array of gradients of the same shape as `x`. :raises `NotImplementedException`: This method is not supported for detector-classifiers. """ diff --git a/art/estimators/classification/ensemble.py b/art/estimators/classification/ensemble.py index d40ec42e7f..f3a1b53d1b 100644 --- a/art/estimators/classification/ensemble.py +++ b/art/estimators/classification/ensemble.py @@ -256,9 +256,9 @@ def class_gradient( Compute per-class derivatives w.r.t. `x`. :param x: Sample input with shape as expected by the model. - :param label: Index of a specific per-class derivative. If `None`, then gradients for all - classes will be computed. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param label: Index of a specific per-class derivative. If `None`, then gradients for all classes will be + computed. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :param raw: Return the individual classifier raw outputs (not aggregated). :return: Array of gradients of input features w.r.t. each class in the form `(batch_size, nb_classes, input_shape)` when computing for all classes, otherwise shape becomes @@ -286,7 +286,7 @@ def loss_gradient( :param x: Sample input with shape as expected by the model. :param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or indices of shape (nb_samples,). - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :param raw: Return the individual classifier raw outputs (not aggregated). :return: Array of gradients of the same shape as `x`. If `raw=True`, shape becomes `[nb_classifiers, x.shape]`. """ diff --git a/art/estimators/classification/hugging_face.py b/art/estimators/classification/hugging_face.py index 0ada16d6ea..ab0b46c32d 100644 --- a/art/estimators/classification/hugging_face.py +++ b/art/estimators/classification/hugging_face.py @@ -67,9 +67,8 @@ def __init__( """ Initialization of HuggingFaceClassifierPyTorch specifically for the PyTorch-based backend. - :param model: Huggingface model model which returns outputs of type - ImageClassifierOutput from the transformers library. - Must have the logits attribute set as output. + :param model: Huggingface model which returns outputs of type ImageClassifierOutput from the `transformers` + library. Must have the logits attribute set as output. :param loss: The loss function for which to compute gradients for training. The target label must be raw categorical, i.e. not converted to one-hot encoding. :param input_shape: The shape of one input instance. @@ -97,7 +96,7 @@ def __init__( :param device_type: Type of device on which the classifier is run, either `gpu` or `cpu`. :param processor: Optional argument. Function which takes in a batch of data and performs the preprocessing relevant to a given foundation model. - Must be differentiable for grandient based defences and attacks. + Must be differentiable for gradient based defences and attacks. """ import torch @@ -125,7 +124,7 @@ def __init__( def prefix_function(function: Callable, postfunction: Callable) -> Callable[[Any, Any], torch.Tensor]: """ Huggingface returns logit under outputs.logits. To make this compatible with ART we wrap the forward pass - function of a HF model here, which automatically extracts the logits. + function of an HF model here, which automatically extracts the logits. :param function: The first function to run, in our case the forward pass of the model. :param postfunction: Second function to run, in this case simply extracts the logits. diff --git a/art/estimators/classification/keras.py b/art/estimators/classification/keras.py index 6575523361..4f93473cfa 100644 --- a/art/estimators/classification/keras.py +++ b/art/estimators/classification/keras.py @@ -99,7 +99,7 @@ def __init__( :param input_layer: The index of the layer to consider as input for models with multiple input layers. The layer with this index will be considered for computing gradients. For models with only one input layer this values is not required. - :param output_layer: Which layer to consider as the output when the models has multiple output layers. The layer + :param output_layer: Which layer to consider as the output when the models have multiple output layers. The layer with this index will be considered for computing gradients. For models with only one output layer this values is not required. """ @@ -428,7 +428,7 @@ def loss_gradient(self, x: np.ndarray, y: np.ndarray, training_mode: bool = Fals :param x: Sample input with shape as expected by the model. :param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or indices of shape (nb_samples,). - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :return: Array of gradients of the same shape as `x`. """ # Check shape of preprocessed `x` because of custom function for `_loss_gradients` @@ -467,7 +467,7 @@ def class_gradient( output is computed for all samples. If multiple values are provided, the first dimension should match the batch size of `x`, and each value will be used as target for its corresponding sample in `x`. If `None`, then gradients for all classes will be computed for each sample. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :return: Array of gradients of input features w.r.t. each class in the form `(batch_size, nb_classes, input_shape)` when computing for all classes, otherwise shape becomes `(batch_size, 1, input_shape)` when `label` parameter is specified. @@ -534,7 +534,7 @@ def predict(self, x: np.ndarray, batch_size: int = 128, training_mode: bool = Fa :param x: Input samples. :param batch_size: Size of batches. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :return: Array of predictions of shape `(nb_inputs, nb_classes)`. """ # Apply preprocessing diff --git a/art/estimators/classification/mxnet.py b/art/estimators/classification/mxnet.py index b6ed747a9d..c70e7f0d3e 100644 --- a/art/estimators/classification/mxnet.py +++ b/art/estimators/classification/mxnet.py @@ -266,7 +266,7 @@ def predict(self, x: np.ndarray, batch_size: int = 128, training_mode: bool = Fa :param x: Input samples. :param batch_size: Size of batches. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :return: Array of predictions of shape `(nb_inputs, nb_classes)`. """ import mxnet as mx @@ -312,7 +312,7 @@ def class_gradient( output is computed for all samples. If multiple values as provided, the first dimension should match the batch size of `x`, and each value will be used as target for its corresponding sample in `x`. If `None`, then gradients for all classes will be computed for each sample. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :return: Array of gradients of input features w.r.t. each class in the form `(batch_size, nb_classes, input_shape)` when computing for all classes, otherwise shape becomes `(batch_size, 1, input_shape)` when `label` parameter is specified. @@ -384,7 +384,7 @@ def loss_gradient(self, x: np.ndarray, y: np.ndarray, training_mode: bool = Fals :param x: Sample input with shape as expected by the model. :param y: Target values (class labels) one-hot-encoded of shape `(nb_samples, nb_classes)` or indices of shape `(nb_samples,)`. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :return: Array of gradients of the same shape as `x`. """ import mxnet as mx diff --git a/art/estimators/classification/pytorch.py b/art/estimators/classification/pytorch.py index a14558f221..e18714d379 100644 --- a/art/estimators/classification/pytorch.py +++ b/art/estimators/classification/pytorch.py @@ -302,7 +302,7 @@ def predict(self, x: np.ndarray, batch_size: int = 128, training_mode: bool = Fa :param x: Input samples. :param batch_size: Size of batches. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :return: Array of predictions of shape `(nb_inputs, nb_classes)`. """ import torch @@ -383,7 +383,7 @@ def fit( shape (nb_samples,). :param batch_size: Size of batches. :param nb_epochs: Number of epochs to use for training. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :param drop_last: Set to ``True`` to drop the last incomplete batch, if the dataset size is not divisible by the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then the last batch will be smaller. (default: ``False``) @@ -576,7 +576,7 @@ def class_gradient( output is computed for all samples. If multiple values as provided, the first dimension should match the batch size of `x`, and each value will be used as target for its corresponding sample in `x`. If `None`, then gradients for all classes will be computed for each sample. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. Note on RNN-like models: Backpropagation through RNN modules in eval mode raises RuntimeError due to cudnn issues and require training mode, i.e. RuntimeError: cudnn RNN backward can only be called in training mode. Therefore, if the model is an RNN type we @@ -785,7 +785,7 @@ def loss_gradient( :param x: Sample input with shape as expected by the model. :param y: Target values (class labels) one-hot-encoded of shape `(nb_samples, nb_classes)` or indices of shape `(nb_samples,)`. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. Note on RNN-like models: Backpropagation through RNN modules in eval mode raises RuntimeError due to cudnn issues and require training mode, i.e. RuntimeError: cudnn RNN backward can only be called in training mode. Therefore, if the model is an RNN type we @@ -881,10 +881,10 @@ def custom_loss_gradient( """ Compute the gradient of the loss function w.r.t. `x`. - :loss_fn: Loss function w.r.t to which gradient needs to be calculated. + :param loss_fn: Loss function w.r.t to which gradient needs to be calculated. :param x: Sample input with shape as expected by the model(base image). :param y: Sample input with shape as expected by the model(target image). - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode.` + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode.` :param layer_name: Name of the layer from which activation needs to be extracted/activation layer. :return: Array of gradients of the same shape as `x`. """ diff --git a/art/estimators/classification/tensorflow.py b/art/estimators/classification/tensorflow.py index 88629213b3..98c2304b30 100644 --- a/art/estimators/classification/tensorflow.py +++ b/art/estimators/classification/tensorflow.py @@ -233,7 +233,7 @@ def predict(self, x: np.ndarray, batch_size: int = 128, training_mode: bool = Fa :param x: Input samples. :param batch_size: Size of batches. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :return: Array of predictions of shape `(num_inputs, nb_classes)`. """ if self.learning is not None: @@ -388,7 +388,7 @@ def class_gradient( output is computed for all samples. If multiple values as provided, the first dimension should match the batch size of `x`, and each value will be used as target for its corresponding sample in `x`. If `None`, then gradients for all classes will be computed for each sample. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :return: Array of gradients of input features w.r.t. each class in the form `(batch_size, nb_classes, input_shape)` when computing for all classes, otherwise shape becomes `(batch_size, 1, input_shape)` when `label` parameter is specified. @@ -449,7 +449,7 @@ def loss_gradient(self, x: np.ndarray, y: np.ndarray, training_mode: bool = Fals :param x: Sample input with shape as expected by the model. :param y: Target values (class labels) one-hot-encoded of shape `(nb_samples, nb_classes)` or indices of shape `(nb_samples,)`. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :return: Array of gradients of the same shape as `x`. """ if self.learning is not None: @@ -922,7 +922,7 @@ def predict(self, x: np.ndarray, batch_size: int = 128, training_mode: bool = Fa :param x: Input samples. :param batch_size: Size of batches. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :return: Array of predictions of shape `(nb_inputs, nb_classes)`. """ # Apply preprocessing @@ -952,7 +952,7 @@ def _predict_framework(self, x: "tf.Tensor", training_mode: bool = False) -> "tf Perform prediction for a batch of inputs. :param x: Input samples. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :return: Array of predictions of shape `(nb_inputs, nb_classes)`. """ # Apply preprocessing @@ -1108,7 +1108,7 @@ def class_gradient( output is computed for all samples. If multiple values as provided, the first dimension should match the batch size of `x`, and each value will be used as target for its corresponding sample in `x`. If `None`, then gradients for all classes will be computed for each sample. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :return: Array of gradients of input features w.r.t. each class in the form `(batch_size, nb_classes, input_shape)` when computing for all classes, otherwise shape becomes `(batch_size, 1, input_shape)` when `label` parameter is specified. @@ -1202,7 +1202,7 @@ def compute_loss( 'none': no reduction will be applied 'mean': the sum of the output will be divided by the number of elements in the output, 'sum': the output will be summed. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :return: Array of losses of the same shape as `x`. """ import tensorflow as tf @@ -1267,7 +1267,7 @@ def loss_gradient( :param x: Sample input with shape as expected by the model. :param y: Correct labels, one-vs-rest encoding. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :return: Array of gradients of the same shape as `x`. """ import tensorflow as tf diff --git a/art/estimators/estimator.py b/art/estimators/estimator.py index 0d2e84618b..83ad06613b 100644 --- a/art/estimators/estimator.py +++ b/art/estimators/estimator.py @@ -40,7 +40,7 @@ class BaseEstimator(ABC): """ The abstract base class `BaseEstimator` defines the basic requirements of an estimator in ART. The BaseEstimator is - is the highest abstraction of a machine learning model in ART. + the highest abstraction of a machine learning model in ART. """ estimator_params = [ diff --git a/art/estimators/gan/tensorflow.py b/art/estimators/gan/tensorflow.py index 7cf617ee0a..8204583f88 100644 --- a/art/estimators/gan/tensorflow.py +++ b/art/estimators/gan/tensorflow.py @@ -66,7 +66,7 @@ def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> np.ndarray: """ Generates a sample. - :param x: A input seed. + :param x: An input seed. :param batch_size: The batch size for predictions. :return: The generated sample. """ @@ -85,10 +85,10 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in """ Creates a generative model - :param x: the secret backdoor trigger that will produce the target - :param y: the target to produce when using the trigger - :param batch_size: batch_size of images used to train generator - :param nb_epochs: total number of iterations for performing the attack + :param x: The secret backdoor trigger that will produce the target. + :param y: The target to produce when using the trigger. + :param batch_size: batch_size of images used to train generator. + :param nb_epochs: total number of iterations for performing the attack. """ import tensorflow as tf diff --git a/art/estimators/generation/tensorflow.py b/art/estimators/generation/tensorflow.py index 431e3e1531..9095b94d69 100644 --- a/art/estimators/generation/tensorflow.py +++ b/art/estimators/generation/tensorflow.py @@ -216,8 +216,8 @@ def __init__( """ Initialization specific to TensorFlow generator implementations. - :encoding_length: length of the input seed - :model: TensorFlow model, neural network or other. + :param encoding_length: length of the input seed + :param model: TensorFlow model, neural network or other. :param channels_first: Set channels first or last. :param clip_values: Tuple of the form `(min, max)` of floats or `np.ndarray` representing the minimum and maximum values allowed for features. If floats are provided, these will be used as the range @@ -263,7 +263,7 @@ def predict(self, x: np.ndarray, batch_size: int = 128, training_mode: bool = Fa :param x: Encodings. :param batch_size: Batch size. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :return: Array of prediction projections of shape `(num_inputs, nb_classes)`. """ # Run prediction with batch processing diff --git a/art/estimators/object_detection/detr.py b/art/estimators/object_detection/detr.py index 774f48f0c9..9cae222a55 100644 --- a/art/estimators/object_detection/detr.py +++ b/art/estimators/object_detection/detr.py @@ -120,13 +120,13 @@ def forward(self, outputs, targets): # Compute the classification cost. Contrary to the loss, we don't use the NLL, # but approximate it in 1 - proba[target class]. - # The 1 is a constant that doesn't change the matching, it can be ommitted. + # The 1 is a constant that doesn't change the matching, it can be omitted. cost_class = -out_prob[:, tgt_ids] # Compute the L1 cost between boxes cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1) - # Compute the giou cost betwen boxes + # Compute the giou cost between boxes cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox)) # Final cost matrix @@ -323,7 +323,7 @@ def forward(self, outputs, targets): with torch.no_grad(): indices = self.matcher(outputs_without_aux, targets) - # Compute the average number of target boxes accross all nodes, for normalization purposes + # Compute the average number of target boxes across all nodes, for normalization purposes num_boxes = sum(len(t["labels"]) for t in targets) num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device) num_boxes = torch.clamp(num_boxes, min=1).item() diff --git a/art/estimators/object_tracking/pytorch_goturn.py b/art/estimators/object_tracking/pytorch_goturn.py index 0f42332132..f4a2129d72 100644 --- a/art/estimators/object_tracking/pytorch_goturn.py +++ b/art/estimators/object_tracking/pytorch_goturn.py @@ -507,7 +507,7 @@ def crop_pad_image(bbox_tight: "torch.Tensor", image: "torch.Tensor") -> tuple[ "torch.Tensor", ]: """ - Around the bounding box, we define a extra context factor of 2, which we will crop from the original image. + Around the bounding box, we define an extra context factor of 2, which we will crop from the original image. :param bbox_tight: Coordinates of bounding box [x1, y1, x2, y2]. :param image: Frame to be cropped and padded. diff --git a/art/estimators/poison_mitigation/neural_cleanse/keras.py b/art/estimators/poison_mitigation/neural_cleanse/keras.py index ca752b4f54..9e23d955c3 100644 --- a/art/estimators/poison_mitigation/neural_cleanse/keras.py +++ b/art/estimators/poison_mitigation/neural_cleanse/keras.py @@ -106,7 +106,7 @@ def __init__( :param input_layer: The index of the layer to consider as input for models with multiple input layers. The layer with this index will be considered for computing gradients. For models with only one input layer this values is not required. - :param output_layer: Which layer to consider as the output when the models has multiple output layers. The layer + :param output_layer: Which layer to consider as the output when the models have multiple output layers. The layer with this index will be considered for computing gradients. For models with only one output layer this values is not required. :param steps: The maximum number of steps to run the Neural Cleanse optimization @@ -116,7 +116,7 @@ def __init__( :param attack_success_threshold: The threshold at which the generated backdoor is successful enough to stop the Neural Cleanse optimization :param patience: How long to wait for changing the cost multiplier in the Neural Cleanse optimization - :param early_stop: Whether or not to allow early stopping in the Neural Cleanse optimization + :param early_stop: Whether to allow early stopping in the Neural Cleanse optimization :param early_stop_threshold: How close values need to come to max value to start counting early stop :param early_stop_patience: How long to wait to determine early stopping in the Neural Cleanse optimization :param cost_multiplier: How much to change the cost in the Neural Cleanse optimization @@ -362,7 +362,7 @@ def predict(self, x: np.ndarray, batch_size: int = 128, training_mode: bool = Fa :param x: Input data to predict. :param batch_size: Batch size. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :return: Array of predictions of shape `(nb_inputs, nb_classes)`. """ return NeuralCleanseMixin.predict(self, x, batch_size=batch_size, training_mode=training_mode, **kwargs) @@ -385,7 +385,7 @@ def loss_gradient(self, x: np.ndarray, y: np.ndarray, training_mode: bool = Fals :param x: Sample input with shape as expected by the model. :param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or indices of shape (nb_samples,). - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :return: Array of gradients of the same shape as `x`. """ return self.loss_gradient(x=x, y=y, training_mode=training_mode, **kwargs) @@ -405,7 +405,7 @@ def class_gradient( output is computed for all samples. If multiple values as provided, the first dimension should match the batch size of `x`, and each value will be used as target for its corresponding sample in `x`. If `None`, then gradients for all classes will be computed for each sample. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :return: Array of gradients of input features w.r.t. each class in the form `(batch_size, nb_classes, input_shape)` when computing for all classes, otherwise shape becomes `(batch_size, 1, input_shape)` when `label` parameter is specified. diff --git a/art/estimators/poison_mitigation/neural_cleanse/neural_cleanse.py b/art/estimators/poison_mitigation/neural_cleanse/neural_cleanse.py index 73f5c31c00..a73f0757c8 100644 --- a/art/estimators/poison_mitigation/neural_cleanse/neural_cleanse.py +++ b/art/estimators/poison_mitigation/neural_cleanse/neural_cleanse.py @@ -67,7 +67,7 @@ def __init__( :param attack_success_threshold: The threshold at which the generated backdoor is successful enough to stop the Neural Cleanse optimization :param patience: How long to wait for changing the cost multiplier in the Neural Cleanse optimization - :param early_stop: Whether or not to allow early stopping in the Neural Cleanse optimization + :param early_stop: Whether to allow early stopping in the Neural Cleanse optimization :param early_stop_threshold: How close values need to come to max value to start counting early stop :param early_stop_patience: How long to wait to determine early stopping in the Neural Cleanse optimization :param cost_multiplier: How much to change the cost in the Neural Cleanse optimization @@ -97,7 +97,7 @@ def _predict_classifier( :param x: Input samples. :param batch_size: Size of batches. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :return: Array of predictions of shape `(nb_inputs, nb_classes)`. """ raise NotImplementedError @@ -229,6 +229,7 @@ def check_backdoor_effective(self, backdoor_data: np.ndarray, backdoor_labels: n def backdoor_examples(self, x_val: np.ndarray, y_val: np.ndarray) -> tuple[np.ndarray, np.ndarray, np.ndarray]: """ Generate reverse-engineered backdoored examples using validation data + :param x_val: validation data :param y_val: validation labels :return: a tuple containing (clean data, backdoored data, labels) @@ -266,14 +267,16 @@ def generate_backdoor( ) -> tuple[np.ndarray, np.ndarray]: """ Generates a possible backdoor for the model. Returns the pattern and the mask + :return: A tuple of the pattern and mask for the model. """ raise NotImplementedError def outlier_detection(self, x_val: np.ndarray, y_val: np.ndarray) -> list[tuple[int, np.ndarray, np.ndarray]]: """ - Returns a tuple of suspected of suspected poison labels and their mask and pattern - :return: A list of tuples containing the the class index, mask, and pattern for suspected labels + Returns a tuple of suspected poison labels and their mask and pattern + + :return: A list of tuples containing the class index, mask, and pattern for suspected labels """ l1_norms = [] masks = [] diff --git a/art/estimators/pytorch.py b/art/estimators/pytorch.py index cba467fa68..75a3791583 100644 --- a/art/estimators/pytorch.py +++ b/art/estimators/pytorch.py @@ -138,7 +138,7 @@ def _check_params(self) -> None: def _apply_preprocessing(self, x, y, fit: bool = False, no_grad=True) -> tuple[Any, Any]: """ - Apply all preprocessing defences of the estimator on the raw inputs `x` and `y`. This function is should + Apply all preprocessing defences of the estimator on the raw inputs `x` and `y`. This function should only be called from function `_apply_preprocessing`. The method overrides art.estimators.estimator::BaseEstimator._apply_preprocessing(). @@ -218,7 +218,7 @@ def chain_processes(x, y): def _apply_preprocessing_gradient(self, x, gradients, fit=False): """ Apply the backward pass to the gradients through all preprocessing defences that have been applied to `x` - and `y` in the forward pass. This function is should only be called from function + and `y` in the forward pass. This function should only be called from function `_apply_preprocessing_gradient`. The method overrides art.estimators.estimator::LossGradientsMixin._apply_preprocessing_gradient(). diff --git a/art/estimators/regression/blackbox.py b/art/estimators/regression/blackbox.py index f701760ea9..231ea323e9 100644 --- a/art/estimators/regression/blackbox.py +++ b/art/estimators/regression/blackbox.py @@ -233,7 +233,7 @@ def __eq__(self, other): return np.all(np.isclose(self.key, other.key)) def __ge__(self, other): - # This implements >= comparison so we can use this class in a `SortedList`. The `total_ordering` decorator + # This implements >= comparison, so we can use this class in a `SortedList`. The `total_ordering` decorator # automatically generates the rest of the comparison magic functions based on this one close_cells = np.isclose(self.key, other.key) diff --git a/art/estimators/regression/keras.py b/art/estimators/regression/keras.py index f83f85de61..783d11c756 100644 --- a/art/estimators/regression/keras.py +++ b/art/estimators/regression/keras.py @@ -88,7 +88,7 @@ def __init__( :param input_layer: The index of the layer to consider as input for models with multiple input layers. The layer with this index will be considered for computing gradients. For models with only one input layer this values is not required. - :param output_layer: Which layer to consider as the output when the models has multiple output layers. The layer + :param output_layer: Which layer to consider as the output when the models have multiple output layers. The layer with this index will be considered for computing gradients. For models with only one output layer this values is not required. """ @@ -376,7 +376,7 @@ def loss_gradient(self, x: np.ndarray, y: np.ndarray, training_mode: bool = Fals :param x: Sample input with shape as expected by the model. :param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or indices of shape (nb_samples,). - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :return: Array of gradients of the same shape as `x`. """ # Check shape of preprocessed `x` because of custom function for `_loss_gradients` @@ -402,7 +402,7 @@ def predict(self, x: np.ndarray, batch_size: int = 128, training_mode: bool = Fa :param x: Input samples. :param batch_size: Size of batches. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :return: Array of predictions of shape `(nb_inputs, nb_classes)`. """ # Apply preprocessing diff --git a/art/estimators/regression/pytorch.py b/art/estimators/regression/pytorch.py index ee96bdea8d..903bb26e1c 100644 --- a/art/estimators/regression/pytorch.py +++ b/art/estimators/regression/pytorch.py @@ -238,7 +238,7 @@ def predict(self, x: np.ndarray, batch_size: int = 128, training_mode: bool = Fa :param x: Input samples. :param batch_size: Size of batches. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :return: Array of predictions of shape `(nb_inputs, nb_classes)`. """ import torch @@ -318,7 +318,7 @@ def fit( shape (nb_samples,). :param batch_size: Size of batches. :param nb_epochs: Number of epochs to use for training. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :param drop_last: Set to ``True`` to drop the last incomplete batch, if the dataset size is not divisible by the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then the last batch will be smaller. (default: ``False``) @@ -607,7 +607,7 @@ def loss_gradient( :param x: Sample input with shape as expected by the model. :param y: Target values (class labels) one-hot-encoded of shape `(nb_samples, nb_classes)` or indices of shape `(nb_samples,)`. - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. Note on RNN-like models: Backpropagation through RNN modules in eval mode raises RuntimeError due to cudnn issues and require training mode, i.e. RuntimeError: cudnn RNN backward can only be called in training mode. Therefore, if the model is an RNN type we @@ -708,10 +708,10 @@ def custom_loss_gradient( """ Compute the gradient of the loss function w.r.t. `x`. - :loss_fn: Loss function w.r.t to which gradient needs to be calculated. + :param loss_fn: Loss function w.r.t to which gradient needs to be calculated. :param x: Sample input with shape as expected by the model(base image). :param y: Sample input with shape as expected by the model(target image). - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode.` + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode.` :param layer_name: Name of the layer from which activation needs to be extracted/activation layer. :return: Array of gradients of the same shape as `x`. """ diff --git a/art/estimators/speech_recognition/pytorch_deep_speech.py b/art/estimators/speech_recognition/pytorch_deep_speech.py index ba1c17767a..43640029c5 100644 --- a/art/estimators/speech_recognition/pytorch_deep_speech.py +++ b/art/estimators/speech_recognition/pytorch_deep_speech.py @@ -85,7 +85,7 @@ def __init__( Initialization of an instance PyTorchDeepSpeech. :param model: DeepSpeech model. - :param pretrained_model: The choice of pretrained model if a pretrained model is required. Currently this + :param pretrained_model: The choice of pretrained model if a pretrained model is required. Currently, this estimator supports 3 different pretrained models consisting of `an4`, `librispeech` and `tedlium`. :param filename: Name of the file. @@ -107,7 +107,7 @@ def __init__( outputs. :param beta: Language model word bonus (all words). This parameter is only used when users want transcription outputs. - :param cutoff_top_n: Cutoff_top_n characters with highest probs in vocabulary will be used in beam search. This + :param cutoff_top_n: Cutoff_top_n characters with the highest probs in vocabulary will be used in beam search. This parameter is only used when users want transcription outputs. :param cutoff_prob: Cutoff probability in pruning. This parameter is only used when users want transcription outputs. @@ -284,7 +284,7 @@ def __init__( # Create the language model config first lm_config = LMConfig() - # Then setup the config + # Then set up the config if decoder_type == "greedy": lm_config.decoder_type = DecoderType.greedy elif decoder_type == "beam": @@ -434,7 +434,7 @@ def loss_gradient(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray: :param x: Samples of shape (nb_samples, seq_length). Note that, it is allowable that sequences in the batch could have different lengths. A possible example of `x` could be: `x = np.array([np.array([0.1, 0.2, 0.1, 0.4]), np.array([0.3, 0.1])])`. - :param y: Target values of shape (nb_samples). Each sample in `y` is a string and it may possess different + :param y: Target values of shape (nb_samples). Each sample in `y` is a string, and it may possess different lengths. A possible example of `y` could be: `y = np.array(['SIXTY ONE', 'HELLO'])`. :return: Loss gradients of the same shape as `x`. """ @@ -511,7 +511,7 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in :param x: Samples of shape (nb_samples, seq_length). Note that, it is allowable that sequences in the batch could have different lengths. A possible example of `x` could be: `x = np.array([np.array([0.1, 0.2, 0.1, 0.4]), np.array([0.3, 0.1])])`. - :param y: Target values of shape (nb_samples). Each sample in `y` is a string and it may possess different + :param y: Target values of shape (nb_samples). Each sample in `y` is a string, and it may possess different lengths. A possible example of `y` could be: `y = np.array(['SIXTY ONE', 'HELLO'])`. :param batch_size: Size of batches. :param nb_epochs: Number of epochs to use for training. @@ -601,7 +601,7 @@ def compute_loss_and_decoded_output( Compute loss function and decoded output. :param masked_adv_input: The perturbed inputs. - :param original_output: Target values of shape (nb_samples). Each sample in `original_output` is a string and + :param original_output: Target values of shape (nb_samples). Each sample in `original_output` is a string, and it may possess different lengths. A possible example of `original_output` could be: `original_output = np.array(['SIXTY ONE', 'HELLO'])`. :param real_lengths: Real lengths of original sequences. @@ -664,7 +664,7 @@ def _preprocess_transform_model_input( `_apply_preprocessing` function. :param x: Samples of shape (nb_samples, seq_length). - :param y: Target values of shape (nb_samples). Each sample in `y` is a string and it may possess different + :param y: Target values of shape (nb_samples). Each sample in `y` is a string, and it may possess different lengths. A possible example of `y` could be: `y = np.array(['SIXTY ONE', 'HELLO'])`. :param real_lengths: Real lengths of original sequences. :return: A tuple of inputs and targets in the model space with the original index @@ -710,7 +710,7 @@ def _transform_model_input( :param x: Samples of shape (nb_samples, seq_length). Note that, it is allowable that sequences in the batch could have different lengths. A possible example of `x` could be: `x = np.ndarray([[0.1, 0.2, 0.1, 0.4], [0.3, 0.1]])`. - :param y: Target values of shape (nb_samples). Each sample in `y` is a string and it may possess different + :param y: Target values of shape (nb_samples). Each sample in `y` is a string, and it may possess different lengths. A possible example of `y` could be: `y = np.array(['SIXTY ONE', 'HELLO'])`. :param compute_gradient: Indicate whether to compute gradients for the input `x`. :param tensor_input: Indicate whether input is tensor. diff --git a/art/estimators/speech_recognition/pytorch_espresso.py b/art/estimators/speech_recognition/pytorch_espresso.py index b2db40838c..b1c6f77ba9 100644 --- a/art/estimators/speech_recognition/pytorch_espresso.py +++ b/art/estimators/speech_recognition/pytorch_espresso.py @@ -273,7 +273,7 @@ def loss_gradient(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray: :param x: Samples of shape (nb_samples, seq_length). Note that, it is allowable that sequences in the batch could have different lengths. A possible example of `x` could be: `x = np.array([np.array([0.1, 0.2, 0.1, 0.4]), np.array([0.3, 0.1])])`. - :param y: Target values of shape (nb_samples). Each sample in `y` is a string and it may possess different + :param y: Target values of shape (nb_samples). Each sample in `y` is a string, and it may possess different lengths. A possible example of `y` could be: `y = np.array(['SIXTY ONE', 'HELLO'])`. :return: Loss gradients of the same shape as `x`. """ @@ -332,7 +332,7 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in :param x: Samples of shape (nb_samples, seq_length). Note that, it is allowable that sequences in the batch could have different lengths. A possible example of `x` could be: `x = np.array([np.array([0.1, 0.2, 0.1, 0.4]), np.array([0.3, 0.1])])`. - :param y: Target values of shape (nb_samples). Each sample in `y` is a string and it may possess different + :param y: Target values of shape (nb_samples). Each sample in `y` is a string, and it may possess different lengths. A possible example of `y` could be: `y = np.array(['SIXTY ONE', 'HELLO'])`. :param batch_size: Size of batches. :param nb_epochs: Number of epochs to use for training. @@ -353,7 +353,7 @@ def _transform_model_input( :param x: Samples of shape (nb_samples, seq_length). Note that, it is allowable that sequences in the batch could have different lengths. A possible example of `x` could be: `x = np.ndarray([[0.1, 0.2, 0.1, 0.4], [0.3, 0.1]])`. - :param y: Target values of shape (nb_samples). Each sample in `y` is a string and it may possess different + :param y: Target values of shape (nb_samples). Each sample in `y` is a string, and it may possess different lengths. A possible example of `y` could be: `y = np.array(['SIXTY ONE', 'HELLO'])`. :param compute_gradient: Indicate whether to compute gradients for the input `x`. :return: A tuple of a dictionary of batch and a list representing the original order of the batch @@ -471,7 +471,7 @@ def _preprocess_transform_model_input( `_apply_preprocessing` function. :param x: Samples of shape (nb_samples, seq_length). - :param y: Target values of shape (nb_samples). Each sample in `y` is a string and it may possess different + :param y: Target values of shape (nb_samples). Each sample in `y` is a string, and it may possess different lengths. A possible example of `y` could be: `y = np.array(['SIXTY ONE', 'HELLO'])`. :param real_lengths: Real lengths of original sequences. :return: A tuple of inputs and targets in the model space with the original index @@ -508,7 +508,7 @@ def compute_loss_and_decoded_output( Compute loss function and decoded output. :param masked_adv_input: The perturbed inputs. - :param original_output: Target values of shape (nb_samples). Each sample in `original_output` is a string and + :param original_output: Target values of shape (nb_samples). Each sample in `original_output` is a string, and it may possess different lengths. A possible example of `original_output` could be: `original_output = np.array(['SIXTY ONE', 'HELLO'])`. :return: The loss and the decoded output. diff --git a/art/estimators/speech_recognition/speech_recognizer.py b/art/estimators/speech_recognition/speech_recognizer.py index 17a710d098..58d9bff8b0 100644 --- a/art/estimators/speech_recognition/speech_recognizer.py +++ b/art/estimators/speech_recognition/speech_recognizer.py @@ -50,7 +50,7 @@ def compute_loss_and_decoded_output( Compute loss function and decoded output. :param masked_adv_input: The perturbed inputs. - :param original_output: Target values of shape (nb_samples). Each sample in `original_output` is a string and + :param original_output: Target values of shape (nb_samples). Each sample in `original_output` is a string, and it may possess different lengths. A possible example of `original_output` could be: `original_output = np.array(['SIXTY ONE', 'HELLO'])`. :return: The loss and the decoded output. diff --git a/art/estimators/speech_recognition/tensorflow_lingvo.py b/art/estimators/speech_recognition/tensorflow_lingvo.py index 6d9bf31be9..fa66ca4a3f 100644 --- a/art/estimators/speech_recognition/tensorflow_lingvo.py +++ b/art/estimators/speech_recognition/tensorflow_lingvo.py @@ -237,7 +237,7 @@ def _load_model(self): ) # monkey-patch the lingvo.asr.decoder.AsrDecoderBase._ComputeMetrics method with patched method according - # to Qin et al + # to Qin et al. from lingvo.tasks.asr import decoder from asr import decoder_patched @@ -466,7 +466,7 @@ def loss_gradient(self, x: np.ndarray, y: np.ndarray, batch_mode: bool = False, :param x: Samples of shape `(nb_samples)`. Note that, it is allowable that sequences in the batch could have different lengths. A possible example of `x` could be: `x = np.ndarray([[0.1, 0.2, 0.1, 0.4], [0.3, 0.1]])`. - :param y: Target values of shape (nb_samples). Each sample in `y` is a string and it may possess different + :param y: Target values of shape (nb_samples). Each sample in `y` is a string, and it may possess different lengths. A possible example of `y` could be: `y = np.array(['SIXTY ONE', 'HELLO'])`. :param batch_mode: If `True` calculate gradient per batch or otherwise per sequence. :return: Loss gradients of the same shape as `x`. diff --git a/art/estimators/tensorflow.py b/art/estimators/tensorflow.py index 664237d2e4..d175d23fd8 100644 --- a/art/estimators/tensorflow.py +++ b/art/estimators/tensorflow.py @@ -154,7 +154,7 @@ def _check_params(self) -> None: def _apply_preprocessing(self, x, y, fit: bool = False) -> tuple[Any, Any]: """ - Apply all preprocessing defences of the estimator on the raw inputs `x` and `y`. This function is should + Apply all preprocessing defences of the estimator on the raw inputs `x` and `y`. This function should only be called from function `_apply_preprocessing`. The method overrides art.estimators.estimator::BaseEstimator._apply_preprocessing(). @@ -225,7 +225,7 @@ def _apply_preprocessing(self, x, y, fit: bool = False) -> tuple[Any, Any]: def _apply_preprocessing_gradient(self, x, gradients, fit=False): """ Apply the backward pass to the gradients through all preprocessing defences that have been applied to `x` - and `y` in the forward pass. This function is should only be called from function + and `y` in the forward pass. This function should only be called from function `_apply_preprocessing_gradient`. The method overrides art.estimators.estimator::LossGradientsMixin._apply_preprocessing_gradient(). diff --git a/art/metrics/gradient_check.py b/art/metrics/gradient_check.py index 3fef3d416a..36fababafc 100644 --- a/art/metrics/gradient_check.py +++ b/art/metrics/gradient_check.py @@ -42,7 +42,7 @@ def loss_gradient_check( :param x: Input with shape as expected by the classifier's model. :param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or indices of shape (nb_samples,). - :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode. + :param training_mode: `True` for model set to training mode and `False` for model set to evaluation mode. :param verbose: Show progress bars. :return: Array of booleans with the shape (len(x), 3). If true means the gradient of the loss w.r.t. the particular `x` was bad (zero, nan, inf). diff --git a/art/metrics/metrics.py b/art/metrics/metrics.py index f6d46ed24b..974858a45b 100644 --- a/art/metrics/metrics.py +++ b/art/metrics/metrics.py @@ -109,7 +109,7 @@ def adversarial_accuracy( :param attack_params: A dictionary with attack-specific parameters. If the attack has a norm attribute, then it will be used as the norm for calculating the robustness; otherwise the standard Euclidean distance is used (norm=2). - :param attack_crafter: EvasionAttack instance with `generate' method to apply on `x` to create adversarial examples. + :param attack_crafter: EvasionAttack instance with `generate` method to apply on `x` to create adversarial examples. :return: The adversarial accuracy of the classifier computed on `x`. """ diff --git a/art/metrics/verification_decisions_trees.py b/art/metrics/verification_decisions_trees.py index 0c7d7f5c01..31eab2a071 100644 --- a/art/metrics/verification_decisions_trees.py +++ b/art/metrics/verification_decisions_trees.py @@ -325,7 +325,7 @@ def _get_k_partite_clique( new_leaf_value = accessible_leaf.value cliques_old.append({"box": accessible_leaf.box, "value": new_leaf_value}) - # Loop over all all trees + # Loop over all trees for i_tree in range( start_tree + 1, min(len(accessible_leaves), start_tree + self.max_clique), diff --git a/art/preprocessing/image/image_square_pad/pytorch.py b/art/preprocessing/image/image_square_pad/pytorch.py index dea1c33120..b2fa3380ad 100644 --- a/art/preprocessing/image/image_square_pad/pytorch.py +++ b/art/preprocessing/image/image_square_pad/pytorch.py @@ -63,7 +63,7 @@ def __init__( :param width: The width of the resized image. :param channels_first: Set channels first or last. :param label_type: String defining the label type. Currently supported: `classification`, `object_detection` - :param pad_mode: String defining the padding method. Currently supported: `constant`, `reflect`, 'replicate`, + :param pad_mode: String defining the padding method. Currently supported: `constant`, `reflect`, `replicate`, `circular` :param pad_kwargs: A dictionary of additional keyword arguments used by the `torch.nn.functional.pad` function. :param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed diff --git a/art/preprocessing/image/image_square_pad/tensorflow.py b/art/preprocessing/image/image_square_pad/tensorflow.py index b65e5de3b2..54cceade49 100644 --- a/art/preprocessing/image/image_square_pad/tensorflow.py +++ b/art/preprocessing/image/image_square_pad/tensorflow.py @@ -62,7 +62,7 @@ def __init__( :param width: The width of the resized image. :param channels_first: Set channels first or last. :param label_type: String defining the label type. Currently supported: `classification`, `object_detection` - :param pad_mode: String defining the padding method. Currently supported: `CONSTANT`, `REFLECT`, 'SYMMETRIC` + :param pad_mode: String defining the padding method. Currently supported: `CONSTANT`, `REFLECT`, `SYMMETRIC` :param pad_kwargs: A dictionary of additional keyword arguments used by the `tf.pad` function. :param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed for features. diff --git a/art/summary_writer.py b/art/summary_writer.py index bf0e186308..f960abdb1b 100644 --- a/art/summary_writer.py +++ b/art/summary_writer.py @@ -61,7 +61,7 @@ def update( """ Update the summary writer. - :param batch_id: Id of the current mini-batch. + :param batch_id: ID of the current mini-batch. :param global_step: Global iteration step. :param grad: Loss gradients. :param patch: Adversarial patch. @@ -144,7 +144,7 @@ def update( """ Update the summary writer. - :param batch_id: Id of the current mini-batch. + :param batch_id: ID of the current mini-batch. :param global_step: Global iteration step. :param grad: Loss gradients. :param patch: Adversarial patch. diff --git a/art/utils.py b/art/utils.py index fa84c37a97..102720505d 100644 --- a/art/utils.py +++ b/art/utils.py @@ -331,7 +331,7 @@ def deprecated_keyword_arg(identifier: str, end_version: str, *, reason: str = " Deprecate a keyword argument and raise a `DeprecationWarning`. The `@deprecated_keyword_arg` decorator is used to deprecate keyword arguments. The deprecated keyword argument must - default to `Deprecated`. Several use cases are supported. For example one can use it to to rename a keyword + default to `Deprecated`. Several use cases are supported. For example one can use it to rename a keyword identifier. The following code examples provide different use cases of how to use the decorator. .. code-block:: python @@ -394,7 +394,7 @@ def projection_l1_1(values: np.ndarray, eps: int | float | np.ndarray) -> np.nda a[j+1] +...+ a[n-1] - a[j]*(n-j-1) >= eps. The ith coordinate of projection is equal to 0 if i=0,...,j. - :param values: A batch of m points, each an ndarray + :param values: A batch of m points, each a ndarray :param eps: The radii of the respective L1-balls :return: projections """ @@ -481,7 +481,7 @@ def projection_l1_2(values: np.ndarray, eps: int | float | np.ndarray) -> np.nda If t = (a1 + ... + an - 1)/n , then a' is the desired projection. Otherwise, the problem is reduced to finding the projection of (a1 - t, ... , a{n-1} - t ). - :param values: A batch of m points, each an ndarray + :param values: A batch of m points, each a ndarray :param eps: The radii of the respective L1-balls :return: projections """ @@ -911,7 +911,7 @@ def get_label_conf(y_vec: np.ndarray) -> tuple[np.ndarray, np.ndarray]: def get_labels_np_array(preds: np.ndarray) -> np.ndarray: """ - Returns the label of the most probable class given a array of class confidences. + Returns the label of the most probable class given an array of class confidences. :param preds: Array of class confidences, nb of instances as first dimension. :return: Labels. @@ -1774,7 +1774,7 @@ def performance_diff( :param perf_function: The performance metric to be used. One of ['accuracy', 'f1'] or a callable function `(true_labels, model_labels[, kwargs]) -> float`. :param kwargs: Arguments to add to performance function. - :return: The difference in performance performance(model1) - performance(model2). + :return: The difference in performance: performance(model1) - performance(model2). :raises `ValueError`: If an unsupported performance function is requested. """ from sklearn.metrics import accuracy_score, f1_score @@ -1817,9 +1817,9 @@ def is_probability(vector: np.ndarray) -> bool: def is_probability_array(array: np.ndarray) -> bool: """ - Check if a multi-dimensional array is an array of probabilities. + Check if a multidimensional array is an array of probabilities. - :param vector: A numpy array. + :param array: A numpy array. :return: True if it is an array of probabilities. """ if len(array.shape) == 1: From 2832e74ab79b607f19659ea9101914d938a26082 Mon Sep 17 00:00:00 2001 From: Beat Buesser Date: Mon, 26 Aug 2024 15:03:31 +0200 Subject: [PATCH 21/27] Fix typing Signed-off-by: Beat Buesser --- .github/workflows/ci-lingvo.yml | 6 +++--- .github/workflows/ci-tensorflow-v1.yml | 3 ++- art/estimators/classification/keras.py | 6 +++--- art/estimators/poison_mitigation/neural_cleanse/keras.py | 6 +++--- art/estimators/regression/keras.py | 6 +++--- art/estimators/speech_recognition/pytorch_deep_speech.py | 4 ++-- 6 files changed, 16 insertions(+), 15 deletions(-) diff --git a/.github/workflows/ci-lingvo.yml b/.github/workflows/ci-lingvo.yml index c68b910529..9db084dd8f 100644 --- a/.github/workflows/ci-lingvo.yml +++ b/.github/workflows/ci-lingvo.yml @@ -28,12 +28,12 @@ jobs: fail-fast: false matrix: include: - - name: TensorFlow+Lingvo 2.1.0v1 (Keras 2.3.1 Python 3.6) + - name: TensorFlow+Lingvo 2.4.4v1 (Keras 2.4.3 Python 3.9) framework: tensorflow2v1 python: 3.9 - tensorflow: 2.1.0 + tensorflow: 2.4.4 tf_version: v2 - keras: 2.3.1 + keras: 2.4.3 lingvo: 0.6.4 name: Run ${{ matrix.name }} Tests diff --git a/.github/workflows/ci-tensorflow-v1.yml b/.github/workflows/ci-tensorflow-v1.yml index 0334e5938e..941dacbf31 100644 --- a/.github/workflows/ci-tensorflow-v1.yml +++ b/.github/workflows/ci-tensorflow-v1.yml @@ -69,7 +69,8 @@ jobs: pip install lief==0.12.3 pip install statsmodels==0.13.5 pip install numba==0.56.4 - pip install pytest + pip install pytest==7.4.4 + pip install pytest-cov pip list - name: Run Tests run: ./run_tests.sh ${{ matrix.framework }} diff --git a/art/estimators/classification/keras.py b/art/estimators/classification/keras.py index 4f93473cfa..c3eaf40b53 100644 --- a/art/estimators/classification/keras.py +++ b/art/estimators/classification/keras.py @@ -99,9 +99,9 @@ def __init__( :param input_layer: The index of the layer to consider as input for models with multiple input layers. The layer with this index will be considered for computing gradients. For models with only one input layer this values is not required. - :param output_layer: Which layer to consider as the output when the models have multiple output layers. The layer - with this index will be considered for computing gradients. For models with only one output - layer this values is not required. + :param output_layer: Which layer to consider as the output when the models have multiple output layers. The + layer with this index will be considered for computing gradients. For models with only one + output layer this values is not required. """ super().__init__( model=model, diff --git a/art/estimators/poison_mitigation/neural_cleanse/keras.py b/art/estimators/poison_mitigation/neural_cleanse/keras.py index 9e23d955c3..117accba1b 100644 --- a/art/estimators/poison_mitigation/neural_cleanse/keras.py +++ b/art/estimators/poison_mitigation/neural_cleanse/keras.py @@ -106,9 +106,9 @@ def __init__( :param input_layer: The index of the layer to consider as input for models with multiple input layers. The layer with this index will be considered for computing gradients. For models with only one input layer this values is not required. - :param output_layer: Which layer to consider as the output when the models have multiple output layers. The layer - with this index will be considered for computing gradients. For models with only one output - layer this values is not required. + :param output_layer: Which layer to consider as the output when the models have multiple output layers. The + layer with this index will be considered for computing gradients. For models with only one + output layer this values is not required. :param steps: The maximum number of steps to run the Neural Cleanse optimization :param init_cost: The initial value for the cost tensor in the Neural Cleanse optimization :param norm: The norm to use for the Neural Cleanse optimization, can be 1, 2, or np.inf diff --git a/art/estimators/regression/keras.py b/art/estimators/regression/keras.py index 783d11c756..33244132c9 100644 --- a/art/estimators/regression/keras.py +++ b/art/estimators/regression/keras.py @@ -88,9 +88,9 @@ def __init__( :param input_layer: The index of the layer to consider as input for models with multiple input layers. The layer with this index will be considered for computing gradients. For models with only one input layer this values is not required. - :param output_layer: Which layer to consider as the output when the models have multiple output layers. The layer - with this index will be considered for computing gradients. For models with only one output - layer this values is not required. + :param output_layer: Which layer to consider as the output when the models have multiple output layers. The + layer with this index will be considered for computing gradients. For models with only one + output layer this values is not required. """ super().__init__( model=model, diff --git a/art/estimators/speech_recognition/pytorch_deep_speech.py b/art/estimators/speech_recognition/pytorch_deep_speech.py index 43640029c5..5ebeebecf7 100644 --- a/art/estimators/speech_recognition/pytorch_deep_speech.py +++ b/art/estimators/speech_recognition/pytorch_deep_speech.py @@ -107,8 +107,8 @@ def __init__( outputs. :param beta: Language model word bonus (all words). This parameter is only used when users want transcription outputs. - :param cutoff_top_n: Cutoff_top_n characters with the highest probs in vocabulary will be used in beam search. This - parameter is only used when users want transcription outputs. + :param cutoff_top_n: Cutoff_top_n characters with the highest probs in vocabulary will be used in beam search. + This parameter is only used when users want transcription outputs. :param cutoff_prob: Cutoff probability in pruning. This parameter is only used when users want transcription outputs. :param beam_width: The width of beam to be used. This parameter is only used when users want transcription From e8172bde675ceb879f2c9a4e8c6657a11465c149 Mon Sep 17 00:00:00 2001 From: Beat Buesser Date: Mon, 26 Aug 2024 15:56:53 +0200 Subject: [PATCH 22/27] Fix typing Signed-off-by: Beat Buesser --- .github/workflows/ci-lingvo.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci-lingvo.yml b/.github/workflows/ci-lingvo.yml index 9db084dd8f..1bbed5587a 100644 --- a/.github/workflows/ci-lingvo.yml +++ b/.github/workflows/ci-lingvo.yml @@ -28,10 +28,10 @@ jobs: fail-fast: false matrix: include: - - name: TensorFlow+Lingvo 2.4.4v1 (Keras 2.4.3 Python 3.9) + - name: TensorFlow+Lingvo 2.5.3v1 (Keras 2.4.3 Python 3.9) framework: tensorflow2v1 python: 3.9 - tensorflow: 2.4.4 + tensorflow: 2.5.3 tf_version: v2 keras: 2.4.3 lingvo: 0.6.4 From faf393fb3fef0e2467ddf1ed4cea0231bd8a544e Mon Sep 17 00:00:00 2001 From: Beat Buesser Date: Mon, 26 Aug 2024 16:05:04 +0200 Subject: [PATCH 23/27] Fix typing Signed-off-by: Beat Buesser --- .github/workflows/ci-lingvo.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-lingvo.yml b/.github/workflows/ci-lingvo.yml index 1bbed5587a..476aee6288 100644 --- a/.github/workflows/ci-lingvo.yml +++ b/.github/workflows/ci-lingvo.yml @@ -34,7 +34,7 @@ jobs: tensorflow: 2.5.3 tf_version: v2 keras: 2.4.3 - lingvo: 0.6.4 + lingvo: 0.13.1 name: Run ${{ matrix.name }} Tests steps: From 5454aca4ca5574616700e3e58fe7bb7561e536ec Mon Sep 17 00:00:00 2001 From: Beat Buesser Date: Mon, 26 Aug 2024 16:23:13 +0200 Subject: [PATCH 24/27] Fix typing Signed-off-by: Beat Buesser --- .github/workflows/ci-lingvo.yml | 44 +++++++++------------------------ requirements_test.txt | 6 +---- 2 files changed, 12 insertions(+), 38 deletions(-) diff --git a/.github/workflows/ci-lingvo.yml b/.github/workflows/ci-lingvo.yml index 476aee6288..509a329d9b 100644 --- a/.github/workflows/ci-lingvo.yml +++ b/.github/workflows/ci-lingvo.yml @@ -28,13 +28,14 @@ jobs: fail-fast: false matrix: include: - - name: TensorFlow+Lingvo 2.5.3v1 (Keras 2.4.3 Python 3.9) - framework: tensorflow2v1 - python: 3.9 - tensorflow: 2.5.3 - tf_version: v2 - keras: 2.4.3 + - name: TensorFlow+Lingvo 0.13.1 (Python 3.9) + framework: tensorflow + python: '3.10' lingvo: 0.13.1 + tensorflow: 2.14.0 + tf_version: v2 + keras: 2.14.0 + tf_addons: 0.21.0 name: Run ${{ matrix.name }} Tests steps: @@ -45,39 +46,16 @@ jobs: with: python-version: ${{ matrix.python }} - name: Pre-install Lingvo ASR - # scipy beyond 1.6.0 is not available on Python 3.6, therefore we adapt the installation of requirements_test.txt run: | sudo apt-get update sudo apt-get -y -q install ffmpeg libavcodec-extra python -m pip install --upgrade pip setuptools wheel - pip install -q -r <(sed '/^scipy/d;/^matplotlib/d;/^pandas/d;/^statsmodels/d;/^numba/d;/^jax/d;/^h5py/d;/^Pillow/d;/^pytest/d;/^pytest-mock/d;/^torch/d;/^torchaudio/d;/^torchvision/d;/^xgboost/d;/^requests/d;/^tensorflow/d;/^keras/d;/^kornia/d;/^librosa/d;/^tqdm/d;/^timm/d;/^catboost/d;/^scikit-learn/d;/^GPy/d;/^lief/d;/^ultralytics/d;/^ipython/d;/^pylint/d;/^mypy/d;/^pycodestyle/d;/^black/d;/^ruff/d;/^types-PyYAML/d;/^types-setuptools/d' requirements_test.txt) - pip install scipy==1.5.4 - pip install matplotlib==3.3.4 - pip install pandas==1.1.5 - pip install statsmodels==0.12.2 - pip install numba==0.53.1 + pip install -q -r requirements_test.txt + pip install -q -r <(sed '/^tensorflow/d;/^keras/d;/^tensorflow-addons/d;/^lingvo/d' requirements_test.txt) pip install tensorflow==${{ matrix.tensorflow }} pip install keras==${{ matrix.keras }} - pip install lingvo==${{ matrix.lingvo }} - pip install tensorflow-addons==0.9.1 - pip install model-pruning-google-research==0.0.3 - pip install h5py==2.10.0 - pip install pytest~=7.0.1 - pip install pytest-flake8~=1.1.0 - pip install pytest-mock - pip install pytest-cov~=3.0.0 - pip install torch==1.10.2+cpu --find-links https://download.pytorch.org/whl/cpu/torch_stable.html - pip install torchaudio==0.10.2+cpu --find-links https://download.pytorch.org/whl/cpu/torch_stable.html - pip install torchvision==0.11.3+cpu --find-links https://download.pytorch.org/whl/cpu/torch_stable.html - pip install xgboost==1.5.2 - pip install requests==2.27.1 - pip install kornia==0.6.8 - pip install librosa==0.9.2 - pip install tqdm==4.64.1 - pip install catboost==1.1.1 - pip install scikit-learn==0.24.2 - pip install GPy==1.10.0 - pip install lief==0.12.3 + pip install tensorflow-addons==${{ matrix.tf_addons }} + pip install lingvo==0.13.1 pip list - name: Run ${{ matrix.name }} Tests run: pytest --cov-report=xml --cov=art --cov-append -q -vv tests/estimators/speech_recognition/test_tensorflow_lingvo.py --framework=${{ matrix.framework }} --durations=0 diff --git a/requirements_test.txt b/requirements_test.txt index 45179c8b56..fd9c8ba124 100644 --- a/requirements_test.txt +++ b/requirements_test.txt @@ -48,11 +48,7 @@ tensorboardX==2.6.2.2 lief==0.14.1 jax[cpu]==0.4.26 -# Lingvo ASR dependencies -# supported versions: (lingvo==0.6.4 with tensorflow-gpu==2.1.0) -# note: due to conflicts with other TF1/2 version supported by ART, the dependencies are not installed by default -# tensorflow-gpu==2.1.0 -# lingvo==0.6.4 +lingvo==0.13.1 # tests and style checking pytest~=8.3.2 From 4bba80639818877930ab61b28f4c8d6136390211 Mon Sep 17 00:00:00 2001 From: Beat Buesser Date: Mon, 26 Aug 2024 16:33:38 +0200 Subject: [PATCH 25/27] Fix typing Signed-off-by: Beat Buesser --- .github/workflows/ci-lingvo.yml | 3 ++- requirements_test.txt | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci-lingvo.yml b/.github/workflows/ci-lingvo.yml index 509a329d9b..7426443c52 100644 --- a/.github/workflows/ci-lingvo.yml +++ b/.github/workflows/ci-lingvo.yml @@ -51,11 +51,12 @@ jobs: sudo apt-get -y -q install ffmpeg libavcodec-extra python -m pip install --upgrade pip setuptools wheel pip install -q -r requirements_test.txt - pip install -q -r <(sed '/^tensorflow/d;/^keras/d;/^tensorflow-addons/d;/^lingvo/d' requirements_test.txt) + pip install -q -r <(sed '/^tensorflow/d;/^keras/d;/^tensorflow-addons/d;/^lingvo/d;/^Pillow/d' requirements_test.txt) pip install tensorflow==${{ matrix.tensorflow }} pip install keras==${{ matrix.keras }} pip install tensorflow-addons==${{ matrix.tf_addons }} pip install lingvo==0.13.1 + pip install Pillow==10.0.0 pip list - name: Run ${{ matrix.name }} Tests run: pytest --cov-report=xml --cov=art --cov-append -q -vv tests/estimators/speech_recognition/test_tensorflow_lingvo.py --framework=${{ matrix.framework }} --durations=0 diff --git a/requirements_test.txt b/requirements_test.txt index fd9c8ba124..1d44c674d5 100644 --- a/requirements_test.txt +++ b/requirements_test.txt @@ -48,7 +48,7 @@ tensorboardX==2.6.2.2 lief==0.14.1 jax[cpu]==0.4.26 -lingvo==0.13.1 +# lingvo==0.13.1 # tests and style checking pytest~=8.3.2 From 1145d2f753987d60b315ff4ed69e357232c58b28 Mon Sep 17 00:00:00 2001 From: Beat Buesser Date: Mon, 26 Aug 2024 21:13:50 +0200 Subject: [PATCH 26/27] Fix typing Signed-off-by: Beat Buesser --- .github/workflows/ci-lingvo.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-lingvo.yml b/.github/workflows/ci-lingvo.yml index 7426443c52..f7a0a3aff8 100644 --- a/.github/workflows/ci-lingvo.yml +++ b/.github/workflows/ci-lingvo.yml @@ -28,7 +28,7 @@ jobs: fail-fast: false matrix: include: - - name: TensorFlow+Lingvo 0.13.1 (Python 3.9) + - name: TensorFlow+Lingvo 0.13.1 (Python 3.10) framework: tensorflow python: '3.10' lingvo: 0.13.1 From d75ec051052183c6944fbdc4bcec3227939baa93 Mon Sep 17 00:00:00 2001 From: Beat Buesser Date: Mon, 26 Aug 2024 21:14:13 +0200 Subject: [PATCH 27/27] Fix typos Signed-off-by: Beat Buesser --- tests/attacks/evasion/test_laser_attack.py | 10 +++++----- tests/attacks/evasion/test_lowprofool.py | 2 +- tests/attacks/evasion/test_pe_malware_attack.py | 8 ++++---- tests/attacks/poison/test_audio_perturbations.py | 4 ++-- tests/attacks/test_adversarial_patch.py | 12 ++++++------ tests/attacks/test_backdoor_attack.py | 2 +- .../attacks/test_targeted_universal_perturbation.py | 2 +- tests/defences/preprocessor/test_mp3_compression.py | 4 ++-- .../preprocessor/test_mp3_compression_pytorch.py | 2 +- .../defences/preprocessor/test_video_compression.py | 2 +- .../preprocessor/test_video_compression_pytorch.py | 2 +- tests/defences/test_adversarial_trainer.py | 2 +- tests/estimators/certification/test_deepz.py | 2 +- .../certification/test_derandomized_smoothing.py | 4 ++-- tests/estimators/certification/test_interval.py | 2 +- tests/metrics/test_metrics.py | 2 +- tests/utils.py | 10 +++++----- 17 files changed, 36 insertions(+), 36 deletions(-) diff --git a/tests/attacks/evasion/test_laser_attack.py b/tests/attacks/evasion/test_laser_attack.py index 1b5532b540..0b4bdabcc4 100644 --- a/tests/attacks/evasion/test_laser_attack.py +++ b/tests/attacks/evasion/test_laser_attack.py @@ -30,7 +30,7 @@ @pytest.fixture(name="close") def fixture_close() -> Callable: """ - Comparison function + Comparison function. :returns: function that checks if two float arrays are close. """ @@ -51,7 +51,7 @@ def close(x: np.ndarray, y: np.ndarray): @pytest.fixture(name="not_close") def fixture_not_close(close): """ - Comparison function + Comparison function. :returns: function that checks if values of two float arrays are not close. """ @@ -75,7 +75,7 @@ def not_close(x: np.ndarray, y: np.ndarray) -> bool: @pytest.fixture(name="less_or_equal") def fixture_less_or_equal(): """ - Comparison function + Comparison function. :returns: function that checks if first array is less or equal than the second. """ @@ -126,7 +126,7 @@ def fixture_max_laser_beam() -> LaserBeam: @pytest.fixture(name="laser_generator_fixture") def fixture_laser_generator_fixture(min_laser_beam, max_laser_beam) -> Callable: """ - Return a function that returns geneartor of the LaserBeam objects. + Return a function that returns generator of the LaserBeam objects. :param min_laser_beam: LaserBeam object with minimal acceptable properties. :param max_laser_beam: LaserBeam object with maximal acceptable properties. @@ -138,7 +138,7 @@ def fixture_laser_generator_fixture(min_laser_beam, max_laser_beam) -> Callable: @pytest.fixture(name="laser_generator") def fixture_laser_generator(min_laser_beam, max_laser_beam) -> LaserBeamGenerator: """ - Geneartor of the LaserBeam objects. + Generator of the LaserBeam objects. :param min_laser_beam: LaserBeam object with minimal acceptable properties. :param max_laser_beam: LaserBeam object with maximal acceptable properties. diff --git a/tests/attacks/evasion/test_lowprofool.py b/tests/attacks/evasion/test_lowprofool.py index 2bc9c1777d..3fbc1b22cb 100644 --- a/tests/attacks/evasion/test_lowprofool.py +++ b/tests/attacks/evasion/test_lowprofool.py @@ -410,7 +410,7 @@ def test_clipping(iris_dataset): top_custom = 3 clf_slr_custom = ScikitlearnLogisticRegression(model=lr_clf, clip_values=(bottom_custom, top_custom)) - # Setting up LowProFool classes with different hyper-parameters + # Setting up LowProFool classes with different hyperparameters lpf_min_max_default = LowProFool(classifier=clf_slr_min_max, n_steps=45, eta=0.02, lambd=1.5) lpf_min_max_high_eta = LowProFool(classifier=clf_slr_min_max, n_steps=45, eta=100000, lambd=1.5) lpf_custom_default = LowProFool(classifier=clf_slr_custom, n_steps=45, eta=0.02, lambd=1.5) diff --git a/tests/attacks/evasion/test_pe_malware_attack.py b/tests/attacks/evasion/test_pe_malware_attack.py index 57f4c7f1a5..aa47f244cc 100644 --- a/tests/attacks/evasion/test_pe_malware_attack.py +++ b/tests/attacks/evasion/test_pe_malware_attack.py @@ -58,7 +58,7 @@ def fix_make_dummy_model(): def get_prediction_model(param_dic): """ - Model going from embeddings to predictions so we can easily optimise the embedding malware embedding. + Model going from embeddings to predictions, so we can easily optimise the embedding malware embedding. Needs to have the same structure as the target model. Populated here with "standard" parameters. """ @@ -168,7 +168,7 @@ def test_append_attack(art_warning, fix_get_synthetic_data, fix_make_dummy_model # We should only have 2 files as the following cannot be converted to valid adv samples: # 2nd datapoint (file too large to support any modifications) - # 4th datapoint (file to large to support append attacks) + # 4th datapoint (file too large to support append attacks) # 5th datapoint (benign file) assert len(adv_x) == 2 @@ -360,7 +360,7 @@ def test_dos_header_attack(art_warning, fix_get_synthetic_data, fix_make_dummy_m ) # should have 3 files. Samples which are excluded are: - # 2nd datapoint (file to large to support any modifications) + # 2nd datapoint (file too large to support any modifications) # 5th datapoint (benign file) assert len(adv_x) == 3 @@ -511,7 +511,7 @@ def test_do_not_check_for_valid(art_warning, fix_get_synthetic_data, fix_make_du # We expect 2 files to have been made adversarial the following cannot be converted to valid adv samples: # 2nd datapoint (file too large to support any modifications) - # 4th datapoint (file to large to support append attacks) + # 4th datapoint (file too large to support append attacks) # 5th datapoint (benign file) for i, size in enumerate(size_of_files): if i in [0, 2]: diff --git a/tests/attacks/poison/test_audio_perturbations.py b/tests/attacks/poison/test_audio_perturbations.py index 9bce9478d4..ef31669b8f 100644 --- a/tests/attacks/poison/test_audio_perturbations.py +++ b/tests/attacks/poison/test_audio_perturbations.py @@ -39,7 +39,7 @@ def test_insert_tone_trigger(art_warning): assert np.max(audio) != 0 assert np.max(np.abs(audio)) <= 1.0 - # test single example with differet duration, frequency, and scale + # test single example with different duration, frequency, and scale trigger = CacheToneTrigger(sampling_rate=16000, frequency=16000, duration=0.2, scale=0.5) audio = trigger.insert(x=np.zeros(3200)) assert audio.shape == (3200,) @@ -88,7 +88,7 @@ def test_insert_audio_trigger(art_warning): assert np.max(audio) != 0 assert np.max(np.abs(audio)) <= 1.0 - # test single example with differet duration and scale + # test single example with different duration and scale trigger = CacheAudioTrigger( sampling_rate=16000, backdoor_path=file_path, diff --git a/tests/attacks/test_adversarial_patch.py b/tests/attacks/test_adversarial_patch.py index ca850ff2f1..52a7d867b4 100644 --- a/tests/attacks/test_adversarial_patch.py +++ b/tests/attacks/test_adversarial_patch.py @@ -97,7 +97,7 @@ def test_2_tensorflow_numpy(self): x_out = attack_ap.insert_transformed_patch( self.x_train_mnist[0], np.ones((14, 14, 1)), np.asarray([[2, 13], [2, 18], [12, 22], [8, 13]]) ) - x_out_expexted = np.array( + x_out_expected = np.array( [ 0.0, 0.0, @@ -130,7 +130,7 @@ def test_2_tensorflow_numpy(self): ], dtype=np.float32, ) - np.testing.assert_almost_equal(x_out[15, :, 0], x_out_expexted, decimal=3) + np.testing.assert_almost_equal(x_out[15, :, 0], x_out_expected, decimal=3) if sess is not None: sess.close() @@ -166,7 +166,7 @@ def test_3_tensorflow_v2_framework(self): x_out = attack_ap.insert_transformed_patch( self.x_train_mnist[0], np.ones((14, 14, 1)), np.asarray([[2, 13], [2, 18], [12, 22], [8, 13]]) ) - x_out_expexted = np.array( + x_out_expected = np.array( [ 0.0, 0.0, @@ -199,7 +199,7 @@ def test_3_tensorflow_v2_framework(self): ], dtype=np.float32, ) - np.testing.assert_almost_equal(x_out[15, :, 0], x_out_expexted, decimal=3) + np.testing.assert_almost_equal(x_out[15, :, 0], x_out_expected, decimal=3) mask = np.ones((1, 28, 28)).astype(bool) attack_ap.apply_patch(x=self.x_train_mnist, scale=0.1, mask=mask) @@ -240,7 +240,7 @@ def test_6_keras(self): x_out = attack_ap.insert_transformed_patch( self.x_train_mnist[0], np.ones((14, 14, 1)), np.asarray([[2, 13], [2, 18], [12, 22], [8, 13]]) ) - x_out_expexted = np.array( + x_out_expected = np.array( [ 0.0, 0.0, @@ -273,7 +273,7 @@ def test_6_keras(self): ], dtype=np.float32, ) - np.testing.assert_almost_equal(x_out[15, :, 0], x_out_expexted, decimal=3) + np.testing.assert_almost_equal(x_out[15, :, 0], x_out_expected, decimal=3) def test_4_pytorch(self): """ diff --git a/tests/attacks/test_backdoor_attack.py b/tests/attacks/test_backdoor_attack.py index 9192e8ae7d..dd0f53e573 100644 --- a/tests/attacks/test_backdoor_attack.py +++ b/tests/attacks/test_backdoor_attack.py @@ -140,7 +140,7 @@ def test_backdoor_pixel(self): def test_backdoor_image(self): """ - Test the backdoor attack with a image-based perturbation can be trained on classifier + Test the backdoor attack with an image-based perturbation can be trained on classifier """ krc = get_image_classifier_kr() (is_poison_train, x_poisoned_raw, y_poisoned_raw) = self.poison_dataset( diff --git a/tests/attacks/test_targeted_universal_perturbation.py b/tests/attacks/test_targeted_universal_perturbation.py index 39d5e09ada..5c9da96d66 100644 --- a/tests/attacks/test_targeted_universal_perturbation.py +++ b/tests/attacks/test_targeted_universal_perturbation.py @@ -42,7 +42,7 @@ class TestTargetedUniversalPerturbation(TestBase): This module tests the Targeted Universal Perturbation. - | Paper link: https://arxiv.org/abs/1911.06502) + | Paper link: https://arxiv.org/abs/1911.06502 """ @classmethod diff --git a/tests/defences/preprocessor/test_mp3_compression.py b/tests/defences/preprocessor/test_mp3_compression.py index 5f3cda94f9..8d5a44d2fe 100644 --- a/tests/defences/preprocessor/test_mp3_compression.py +++ b/tests/defences/preprocessor/test_mp3_compression.py @@ -84,7 +84,7 @@ def test_non_temporal_data_error(art_warning, image_batch_small): @pytest.mark.parametrize("channels_first", [True, False]) @pytest.mark.skip_framework("keras", "pytorch", "scikitlearn", "mxnet") -def test_mp3_compresssion(art_warning, audio_batch, channels_first): +def test_mp3_compression(art_warning, audio_batch, channels_first): try: test_input, test_output, sample_rate = audio_batch mp3compression = Mp3Compression(sample_rate=sample_rate, channels_first=channels_first) @@ -96,7 +96,7 @@ def test_mp3_compresssion(art_warning, audio_batch, channels_first): @pytest.mark.parametrize("channels_first", [True, False]) @pytest.mark.skip_framework("keras", "pytorch", "scikitlearn", "mxnet") -def test_mp3_compresssion_object(art_warning, audio_batch, channels_first): +def test_mp3_compression_object(art_warning, audio_batch, channels_first): try: test_input, test_output, sample_rate = audio_batch test_input_object = np.array([x for x in test_input], dtype=object) diff --git a/tests/defences/preprocessor/test_mp3_compression_pytorch.py b/tests/defences/preprocessor/test_mp3_compression_pytorch.py index 1b708594ad..b447dfd4ef 100644 --- a/tests/defences/preprocessor/test_mp3_compression_pytorch.py +++ b/tests/defences/preprocessor/test_mp3_compression_pytorch.py @@ -86,7 +86,7 @@ def test_non_temporal_data_error(art_warning, image_batch_small): @pytest.mark.parametrize("channels_first", [True, False]) @pytest.mark.skip_framework("tensorflow", "keras", "scikitlearn", "mxnet", "kerastf") -def test_mp3_compresssion(art_warning, audio_batch, channels_first): +def test_mp3_compression(art_warning, audio_batch, channels_first): try: test_input, test_output, sample_rate = audio_batch mp3compression = Mp3CompressionPyTorch(sample_rate=sample_rate, channels_first=channels_first) diff --git a/tests/defences/preprocessor/test_video_compression.py b/tests/defences/preprocessor/test_video_compression.py index f2188999e3..0f61552f5d 100644 --- a/tests/defences/preprocessor/test_video_compression.py +++ b/tests/defences/preprocessor/test_video_compression.py @@ -44,7 +44,7 @@ def video_batch(channels_first): @pytest.mark.parametrize("channels_first", [True, False]) @pytest.mark.skip_framework("keras", "pytorch", "scikitlearn", "mxnet") -def test_video_compresssion(art_warning, video_batch, channels_first): +def test_video_compression(art_warning, video_batch, channels_first): try: test_input, test_output = video_batch video_compression = VideoCompression(video_format="mp4", constant_rate_factor=0, channels_first=channels_first) diff --git a/tests/defences/preprocessor/test_video_compression_pytorch.py b/tests/defences/preprocessor/test_video_compression_pytorch.py index c8deab0a8d..3be48fd4b8 100644 --- a/tests/defences/preprocessor/test_video_compression_pytorch.py +++ b/tests/defences/preprocessor/test_video_compression_pytorch.py @@ -44,7 +44,7 @@ def video_batch(channels_first): @pytest.mark.parametrize("channels_first", [True, False]) @pytest.mark.skip_framework("tensorflow", "keras", "scikitlearn", "mxnet", "kerastf") -def test_video_compresssion(art_warning, video_batch, channels_first): +def test_video_compression(art_warning, video_batch, channels_first): try: test_input, test_output = video_batch video_compression = VideoCompressionPyTorch( diff --git a/tests/defences/test_adversarial_trainer.py b/tests/defences/test_adversarial_trainer.py index e4ba41ea16..e4820ea88d 100644 --- a/tests/defences/test_adversarial_trainer.py +++ b/tests/defences/test_adversarial_trainer.py @@ -67,7 +67,7 @@ def test_classifier_match(self): self.assertEqual(len(adv_trainer.attacks), 1) self.assertEqual(adv_trainer.attacks[0].estimator, adv_trainer.get_classifier()) - def test_excpetions(self): + def test_exceptions(self): with self.assertRaises(ValueError): _ = AdversarialTrainer(self.classifier, "attack") diff --git a/tests/estimators/certification/test_deepz.py b/tests/estimators/certification/test_deepz.py index 06ca002652..d9310e4346 100644 --- a/tests/estimators/certification/test_deepz.py +++ b/tests/estimators/certification/test_deepz.py @@ -32,7 +32,7 @@ @pytest.fixture() def fix_get_mnist_data(): """ - Get the first 100 samples of the mnist test set with channels first format + Get the first 100 samples of the mnist test set with channels first format. :return: First 100 sample/label pairs of the MNIST test dataset. """ nb_test = 100 diff --git a/tests/estimators/certification/test_derandomized_smoothing.py b/tests/estimators/certification/test_derandomized_smoothing.py index cee00eda4e..40908416fa 100644 --- a/tests/estimators/certification/test_derandomized_smoothing.py +++ b/tests/estimators/certification/test_derandomized_smoothing.py @@ -156,7 +156,7 @@ def build_model(input_shape): img_inputs = tf.keras.Input(shape=(input_shape[0], input_shape[1], input_shape[2] * 2)) x = tf.keras.layers.Conv2D(filters=32, kernel_size=(4, 4), strides=(2, 2), activation="relu")(img_inputs) x = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=2)(x) - # tensorflow uses channels last and we are loading weights from an originally trained pytorch model + # tensorflow uses channels last, and we are loading weights from an originally trained pytorch model x = tf.transpose(x, (0, 3, 1, 2)) x = tf.keras.layers.Flatten()(x) x = tf.keras.layers.Dense(100, activation="relu")(x) @@ -295,7 +295,7 @@ def build_model(input_shape): img_inputs = tf.keras.Input(shape=(input_shape[0], input_shape[1], input_shape[2] * 2)) x = tf.keras.layers.Conv2D(filters=32, kernel_size=(4, 4), strides=(2, 2), activation="relu")(img_inputs) x = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=2)(x) - # tensorflow uses channels last and we are loading weights from an originally trained pytorch model + # tensorflow uses channels last, and we are loading weights from an originally trained pytorch model x = tf.transpose(x, (0, 3, 1, 2)) x = tf.keras.layers.Flatten()(x) x = tf.keras.layers.Dense(100, activation="relu")(x) diff --git a/tests/estimators/certification/test_interval.py b/tests/estimators/certification/test_interval.py index 9214c6b7dc..2cd66809a5 100644 --- a/tests/estimators/certification/test_interval.py +++ b/tests/estimators/certification/test_interval.py @@ -66,7 +66,7 @@ def forward(self, x): @pytest.fixture() def fix_get_mnist_data(): """ - Get the first 100 samples of the mnist test set with channels first format + Get the first 100 samples of the mnist test set with channels first format. :return: First 100 sample/label pairs of the MNIST test dataset. """ nb_test = 100 diff --git a/tests/metrics/test_metrics.py b/tests/metrics/test_metrics.py index 3e5959b882..d169d70df4 100644 --- a/tests/metrics/test_metrics.py +++ b/tests/metrics/test_metrics.py @@ -113,7 +113,7 @@ def test_loss_sensitivity(self): # (x_train, y_train), (_, _), _, _ = load_mnist() # x_train, y_train = x_train[:NB_TRAIN], y_train[:NB_TRAIN] # - # # Get classifier + # # Get classifier. # classifier = self._cnn_mnist_k([28, 28, 1]) # classifier.fit(x_train, y_train, batch_size=BATCH_SIZE, nb_epochs=2) # diff --git a/tests/utils.py b/tests/utils.py index e5c521f487..c665ff843f 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -236,7 +236,7 @@ def get_image_classifier_tf_v1(from_logits=False, load_init=True, sess=None): """ Standard TensorFlow classifier for unit testing. - The following hyper-parameters were used to obtain the weights and biases: + The following hyperparameters were used to obtain the weights and biases: learning_rate: 0.01 batch size: 10 number of epochs: 2 @@ -413,7 +413,7 @@ def discriminator_loss_fct(real_output, generated_output): zeros (since these are the fake images). 3. Calculate the total_loss as the sum of real_loss and generated_loss. """ - # [1,1,...,1] with real output since it is true and we want our generated examples to look like it + # [1,1,...,1] with real output since it is true, and we want our generated examples to look like it real_loss = tf.compat.v1.losses.sigmoid_cross_entropy( multi_class_labels=tf.ones_like(real_output), logits=real_output ) @@ -442,7 +442,7 @@ def get_image_classifier_tf_v2(from_logits=False): """ Standard TensorFlow v2 classifier for unit testing. - The following hyper-parameters were used to obtain the weights and biases: + The following hyperparameters were used to obtain the weights and biases: learning_rate: 0.01 batch size: 10 number of epochs: 2 @@ -1576,7 +1576,7 @@ def get_tabular_classifier_tf_v1(load_init=True, sess=None): """ Standard TensorFlow classifier for unit testing. - The following hyper-parameters were used to obtain the weights and biases: + The following hyperparameters were used to obtain the weights and biases: * learning_rate: 0.01 * batch size: 5 @@ -1663,7 +1663,7 @@ def get_tabular_classifier_tf_v2(): """ Standard TensorFlow v2 classifier for unit testing. - The following hyper-parameters were used to obtain the weights and biases: + The following hyperparameters were used to obtain the weights and biases: * learning_rate: 0.01 * batch size: 5