Commit 866dc25c authored by Carl De Sousa Trias's avatar Carl De Sousa Trias
Browse files

Update README.md, Attacks/__init__.py, Attacks/fine_tuning.py,...

Update README.md, Attacks/__init__.py, Attacks/fine_tuning.py, Attacks/gaussian.py, Attacks/knowledge_distillation.py, Attacks/quantization.py, Attacks/pruning.py, Attacks/watermark_overwriting.py
parent f2292d43
from .gaussian import *
from .fine_tuning import *
from .pruning import *
from .quantization import *
from .knowledge_distillation import *
from .watermark_overwriting import *
\ No newline at end of file
# fine tuning
import matplotlib.pyplot as plt
from tqdm import tqdm
from utils import *
def finetuning(net,epochs,trainloader):
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
for epoch in tqdm(range(epochs)):
net.train()
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# split data into the image and its label
inputs, labels = data
inputs = inputs.to(device)
labels = labels.to(device)
# initialise the optimiser
optimizer.zero_grad()
# forward
outputs = net(inputs)
# backward
loss = criterion(outputs, labels)
loss.backward()
# update the optimizer
optimizer.step()
# loss
running_loss += loss.item()
return net
'''
M_ID=4
param={"E":1}
'''
from utils import *
def adding_noise(net, power, module_name):
'''add gausian noise to the parameter of the network'''
for name, parameters in net.named_parameters():
if module_name in name:
print("noise added")
calcul = nn.utils.parameters_to_vector(parameters)
sigma = torch.std(calcul, unbiased=False).item()
noise = torch.normal(mean=0, std=power*sigma, size=parameters.size())
parameters.data += noise.to(device)
return net
def adding_noise_global(net, power):
'''add gausian noise to the parameter of the network'''
for name, module in net.named_modules():
if isinstance(module, torch.nn.Conv2d) or isinstance(module, torch.nn.Linear):
parameters=module.weight.data
calcul = nn.utils.parameters_to_vector(parameters)
sigma = torch.std(calcul, unbiased=False).item()
noise = torch.normal(mean=0, std=power*sigma, size=parameters.size())
parameters.data += noise.to(device)
return net
'''
M_ID=0
param={'name':["features.17.w"],"S":5}
'''
\ No newline at end of file
import torch.nn.functional as F
from utils import *
def distill_unlabeled(y, teacher_scores, T):
return nn.KLDivLoss()(F.log_softmax(y/T), F.softmax(teacher_scores/T)) * T * T
def test_knowledge_dist(net, water_loss, file_weights, file_watermark, dataset='CIFAR10'):
epochs_list, test_list, water_test_list = [], [], []
trainset, testset, _ = CIFAR10_dataset()
trainloader, testloader = dataloader(trainset, testset, 100)
student_net = tv.models.vgg16()
student_net.classifier = nn.Linear(25088, 10)
student_net.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(student_net.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
watermarking_dict = np.load(file_watermark, allow_pickle='TRUE').item()
net.eval()
for param in net.parameters():
param.requires_grad = False
student_net.train()
for epoch in range(10):
net.train()
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# split data into the image and its label
inputs, labels = data
if dataset == 'MNIST':
inputs.squeeze_(1)
inputs = torch.stack([inputs, inputs, inputs], 1)
inputs = inputs.to(device)
labels = labels.to(device)
teacher_output = net(inputs)
teacher_output = teacher_output.detach()
_, labels_teacher = torch.max(F.log_softmax(teacher_output, dim=1),dim=1)
# initialise the optimiser
optimizer.zero_grad()
# forward
outputs = student_net(inputs)
# backward
loss = criterion(outputs, labels_teacher)
loss.backward()
# update the optimizer
optimizer.step()
# loss
running_loss += loss.item()
print(running_loss)
return epochs_list, test_list, water_test_list
def knowledge_distillation(net, epochs, trainloader,student_net):
student_net.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(student_net.parameters(), lr=0.001, momentum=0.9, weight_decay=5e-4)
net.eval()
for param in net.parameters():
param.requires_grad = False
student_net.train()
for epoch in range(epochs):
print('doing epoch', str(epoch + 1), ".....")
net.train()
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# split data into the image and its label
inputs, labels = data
inputs = inputs.to(device)
teacher_output = net(inputs)
teacher_output = teacher_output.detach()
_, labels_teacher = torch.max(F.log_softmax(teacher_output, dim=1), dim=1)
# initialise the optimiser
optimizer.zero_grad()
# forward
outputs = student_net(inputs)
# backward
loss = criterion(outputs, labels_teacher)
loss.backward()
# update the optimizer
optimizer.step()
# loss
running_loss += loss.item()
loss = (running_loss * 128 / len(trainloader.dataset))
print(' loss : %.5f ' % (loss))
'''
M_ID = 5
trainset, testset, inference_transform = CIFAR10_dataset()
trainloader, testloader = dataloader(trainset, testset, 128)
student = tv.models.vgg16()
student.classifier = nn.Linear(25088, 10)
param = {"E":5,"trainloader":trainloader,"student":student}
'''
\ No newline at end of file
# pruning
import matplotlib.pyplot as plt
import torch.nn.utils.prune as prune
from utils import *
def prune_model_l1_unstructured(new_model, proportion):
for name, module in new_model.named_modules():
if isinstance(module, torch.nn.Conv2d) or isinstance(module, torch.nn.Linear):
print("pruned")
prune.l1_unstructured(module, name='weight', amount=proportion)
prune.remove(module, 'weight')
return new_model
def random_mask(new_model, proportion):
# maybe add a dimension for the pruning to remove entirely the kernel
for name, module in new_model.named_modules():
if isinstance(module, torch.nn.Conv2d) or isinstance(module, torch.nn.Linear):
prune.random_unstructured(module, name='weight', amount=proportion)
return dict(new_model.named_buffers())
def prune_model_random_unstructured(new_model, proportion):
dict_mask=random_mask(new_model,proportion)
for name, module in new_model.named_modules():
if isinstance(module, torch.nn.Conv2d) or isinstance(module, torch.nn.Linear):
print("pruned")
weight_name = name + '.weight_mask'
module.weight = nn.Parameter(module.weight * dict_mask[weight_name])
return new_model
def train_pruning(net, optimizer, criterion, trainloader, number_epochs, value=None, mask=None):
# train
net.train()
for epoch in range(number_epochs):
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# split data into the image and its label
inputs, labels = data
inputs = inputs.to(device)
labels = labels.to(device)
if inputs.size()[1] == 1:
inputs.squeeze_(1)
inputs = torch.stack([inputs, inputs, inputs], 1)
# initialise the optimiser
optimizer.zero_grad()
# forward
outputs = net(inputs)
# backward
loss = criterion(outputs, labels)
loss.backward()
if value != None:
net = prune_model_l1_unstructured(net, value)
elif mask != None:
net = prune_model_random_unstructured(net, mask)
# update the optimizer
optimizer.step()
# loss
running_loss += loss.item()
'''
M_ID=1
param={"P":.99}
M_ID=2
param={"R":.99}
'''
import matplotlib.pyplot as plt
from utils import *
# quantization
def quantize_tensor(x, num_bits):
qmin = 0.
qmax = 2. ** num_bits - 1.
min_val, max_val = torch.min(x), torch.max(x)
scale = (max_val - min_val) / (qmax - qmin)
initial_zero_point = torch.min(max_val - min_val).round()
print(min_val, max_val, scale, initial_zero_point)
zero_point = 0
if initial_zero_point < qmin:
zero_point = qmin
elif initial_zero_point > qmax:
zero_point = qmax
else:
zero_point = initial_zero_point
zero_point = int(zero_point)
q_x = zero_point + x / scale
q_x.clamp_(qmin, qmax).round_()
q_x = q_x.byte()
return {'tensor': q_x, 'scale': scale, 'zero_point': zero_point}
def dequantize_tensor(q_x):
return q_x['scale'] * (q_x['tensor'].float() - q_x['zero_point'])
def fake_quantization(x, num_bits):
qmax = 2. ** num_bits - 1.
min_val, max_val = torch.min(x), torch.max(x)
scale = qmax / (max_val - min_val)
x_q = (x - min_val) * scale
x_q.clamp_(0, qmax).round_() #clamp = min(max(x,min_value),max_value)
x_q.byte()
x_f_q = x_q.float() / scale + min_val
return x_f_q
def quantization(net,num_bits):
with torch.no_grad():
for name, module in net.named_modules():
if isinstance(module, torch.nn.Conv2d)or isinstance(module, torch.nn.Linear):
print("quantized")
tensor = module.weight
tensor_q = fake_quantization(tensor, num_bits)
module.weight = nn.Parameter(tensor_q)
return net
# This is a sample Python script.
from utils import *
def overwriting(net,NNWmethod,nbr_watermark,watermarking_dict):
for i in range(nbr_watermark):
Embeds(watermarking_dict["types"],NNWmethod,net,watermarking_dict)
return net
def Embeds(types, tools, model, watermarking_dict):
if types == "1":
tools.init(model, watermarking_dict)
trainset, testset, inference_transform = CIFAR10_dataset()
# hyperparameter of training
criterion = nn.CrossEntropyLoss()
num_epochs = 5
batch_size = 128
trainloader, testloader = dataloader(trainset, testset, batch_size)
learning_rate, momentum, weight_decay = 0.01, .9, 5e-4
optimizer = optim.SGD([
{'params': model.parameters()}
], lr=learning_rate, momentum=momentum, weight_decay=weight_decay)
model.train()
epoch = 0
print("Launching injection.....")
while epoch < num_epochs:
print('doing epoch', str(epoch + 1), ".....")
loss, loss_nn, loss_w = tools.Embedder_one_step(model, trainloader, optimizer, criterion, watermarking_dict)
loss = (loss * batch_size / len(trainloader.dataset))
loss_nn = (loss_nn * batch_size / len(trainloader.dataset))
loss_w = (loss_w * batch_size / len(trainloader.dataset))
print(' loss : %.5f - loss_wm: %.5f, loss_nn: %.5f ' % (loss, loss_w, loss_nn))
epoch += 1
elif types=="0":
print("Launching injection.....")
model = tools.Embedder(model, watermarking_dict)
return model
'''
M_ID=6
param={"W":2,"watermarking_dict":watermarking_dict,"NNWmethods":tools}
'''
# MPAI-NNT v1.0 implementation # MPAI-NNT v1.0 implementation
## Case 1
This code refers to the implementation of the MPAI-NNT. This code refers to the implementation of the MPAI-NNT.
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment