Skip to content

Commit

Permalink
style corrections
Browse files Browse the repository at this point in the history
Signed-off-by: Muhammad Zaid Hameed <[email protected]>
  • Loading branch information
Muhammad Zaid Hameed authored and Muhammad Zaid Hameed committed Jul 21, 2023
1 parent 1d65b5d commit f85597f
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 19 deletions.
10 changes: 5 additions & 5 deletions art/defences/trainer/adversarial_trainer_awp_pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -375,7 +375,7 @@ def _batch_process(self, x_batch: np.ndarray, y_batch: np.ndarray) -> Tuple[floa
return train_loss, train_acc, train_n

def _weight_perturbation(
self, x_batch: np.ndarray, x_batch_pert: np.ndarray, y_batch: np.ndarray
self, x_batch: "torch.Tensor", x_batch_pert: "torch.Tensor", y_batch: "torch.Tensor"
) -> Dict[str, "torch.Tensor"]:
"""
Calculate wight perturbation for a batch of data.
Expand Down Expand Up @@ -416,15 +416,15 @@ def _weight_perturbation(
"Incorrect mode provided for base adversarial training. 'mode' must be among 'PGD' and 'TRADES'."
)

self._proxy_classifier._optimizer.zero_grad() # pylint: disable=W0212
self._proxy_classifier._optimizer.zero_grad() # type: ignore # pylint: disable=W0212
loss.backward()
self._proxy_classifier._optimizer.step() # pylint: disable=W0212
self._proxy_classifier._optimizer.step() # type: ignore # pylint: disable=W0212

params_dict_proxy, _ = self._calculate_model_params(self._proxy_classifier)

for name in list_keys:
perturbation = params_dict_proxy[name]["param"] - params_dict[name]["param"]
perturbation = perturbation.reshape(params_dict[name]["size"])
perturbation = torch.reshape(perturbation, params_dict[name]["size"]) # type: ignore
scale = params_dict[name]["norm"] / (perturbation.norm() + EPS)
w_perturb[name] = scale * perturbation

Expand All @@ -444,7 +444,7 @@ def _calculate_model_params(

import torch

params_dict = OrderedDict()
params_dict = OrderedDict() # type: ignore
list_params = []
for name, param in p_classifier._model.state_dict().items(): # pylint: disable=W0212
if len(param.size()) <= 1:
Expand Down
32 changes: 18 additions & 14 deletions examples/adversarial_training_awp.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,16 @@
from art.attacks.evasion import ProjectedGradientDescent

"""
For this example we choose the PreActResNet18 model as used in the paper
For this example we choose the PreActResNet18 model as used in the paper
(https://proceedings.neurips.cc/paper/2020/file/1ef91c212e30e14bf125e9374262401f-Paper.pdf)
The code for the model architecture has been adopted from
https://github.com/csdongxian/AWP/blob/main/AT_AWP/preactresnet.py
"""


class PreActBlock(nn.Module):
'''Pre-activation version of the BasicBlock.'''
"""Pre-activation version of the BasicBlock."""

expansion = 1

def __init__(self, in_planes, planes, stride=1):
Expand All @@ -36,22 +37,23 @@ def __init__(self, in_planes, planes, stride=1):
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)

if stride != 1 or in_planes != self.expansion*planes:
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False)
)

def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
shortcut = self.shortcut(out) if hasattr(self, "shortcut") else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out += shortcut
return out


class PreActBottleneck(nn.Module):
'''Pre-activation version of the original Bottleneck module.'''
"""Pre-activation version of the original Bottleneck module."""

expansion = 4

def __init__(self, in_planes, planes, stride=1):
Expand All @@ -61,16 +63,16 @@ def __init__(self, in_planes, planes, stride=1):
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)

if stride != 1 or in_planes != self.expansion*planes:
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False)
)

def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
shortcut = self.shortcut(out) if hasattr(self, "shortcut") else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out = self.conv3(F.relu(self.bn3(out)))
Expand All @@ -89,10 +91,10 @@ def __init__(self, block, num_blocks, num_classes=10):
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.bn = nn.BatchNorm2d(512 * block.expansion)
self.linear = nn.Linear(512*block.expansion, num_classes)
self.linear = nn.Linear(512 * block.expansion, num_classes)

def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
Expand All @@ -113,7 +115,7 @@ def forward(self, x):


def PreActResNet18(num_classes=10):
return PreActResNet(PreActBlock, [2,2,2,2], num_classes=num_classes)
return PreActResNet(PreActBlock, [2, 2, 2, 2], num_classes=num_classes)


class CIFAR10_dataset(Dataset):
Expand Down Expand Up @@ -202,7 +204,9 @@ def __len__(self):
)

# Step 4: Create the trainer object - AdversarialTrainerAWPPyTorch
trainer = AdversarialTrainerAWPPyTorch(classifier, proxy_classifier, attack, mode="PGD", gamma=gamma, beta=6.0, warmup=warmup)
trainer = AdversarialTrainerAWPPyTorch(
classifier, proxy_classifier, attack, mode="PGD", gamma=gamma, beta=6.0, warmup=warmup
)


# Build a Keras image augmentation object and wrap it in ART
Expand Down

0 comments on commit f85597f

Please sign in to comment.