Commit bca5eca1 authored by Carldst's avatar Carldst
Browse files

initial push

parents
Pipeline #34 failed with stages
in 0 seconds
import UCHIDA
import time
from utils import *
from Attacks import *
#####Changement de niveau AIMs doit etre quelque chose qui possède des ports
class AIM():
model=None
parameters=None
testingDataset=None
trainingDataset=None
output_0=None
def trainAIM(self, parameters, trainingDataset):
model = tv.models.vgg16()
model.classifier = nn.Linear(25088, 10)
model.load_state_dict(parameters["model_state_dict"])
batch_size = 128
while batch_size > 0:
try:
trainloader = torch.utils.data.DataLoader(trainingDataset, batch_size=batch_size, shuffle=False,
num_workers=2)
except:
batch_size = int(batch_size / 2)
criterion = nn.CrossEntropyLoss()
learning_rate, momentum, weight_decay = 0.01, .9, 5e-4
optimizer = optim.SGD([
{'params': model.parameters()}
], lr=learning_rate, momentum=momentum, weight_decay=weight_decay)
model.train()
model.to(device)
for epoch in range(10):
for i, data in enumerate(trainloader, 0):
# split data into the image and its label
inputs, labels = data
inputs = inputs.to(device)
labels = labels.to(device)
if inputs.size()[1] == 1:
inputs.squeeze_(1)
inputs = torch.stack([inputs, inputs, inputs], 1)
# initialise the optimiser
optimizer.zero_grad()
# forward
outputs = model(inputs)
# backward
loss= criterion(outputs, labels)
# watermark
loss.backward()
# update the optimizer
optimizer.step()
return model
def funcAIM(self, model, parameters, testingDataset):
'''
return the performance of the model in the testing dataset
:param parameters: parameter of the network
:param testingdataset: testing data
:return:
'''
model.load_state_dict(parameters["model_state_dict"])
batch_size=128
while batch_size>0:
try :
testloader=torch.utils.data.DataLoader(testingDataset, batch_size=batch_size, shuffle=False,num_workers=2)
return self.testingAIM(model,testloader)
except:
batch_size=int(batch_size/2)
def testingAIM(self,AIM,testloader):
correct = 0
total = 0
AIM.to(device)
AIM.eval()
# torch.no_grad do not train the network
with torch.no_grad():
for data in testloader:
inputs, labels = data
if inputs.size()[1] == 1:
inputs.squeeze_(1)
inputs = torch.stack([inputs, inputs, inputs], 1)
inputs = inputs.to(device)
labels = labels.to(device)
outputs = AIM(inputs)
if len(outputs) == 2: outputs, _ = outputs
_, predicted = torch.max(outputs, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
return 100 - (100 * float(correct) / total)
def run(self):
self.output_0=self.funcAIM(self.model, self.parameters, self.testingDataset)
class WatermarkEmbedder():
AIM=None
##
output_0 = None
def funcWatermarkEmbedder(self,model):
### differentiate this function based on trainloader none or not
model.to(device)
#### to be adapted by
tools = UCHIDA.Uchi_tools()
weight_name = 'features.19.weight'
T = 64
watermark = torch.tensor(np.random.choice([0, 1], size=(T), p=[1. / 3, 2. / 3]), device=device)
watermarking_dict = {'lambd': 0.1, 'weight_name': weight_name, 'watermark': watermark, "types": 1}
if watermarking_dict["types"] == 1:
trainset, testset, inference_transform = CIFAR10_dataset()
# hyperparameter of training
num_epochs = 25
batch_size = 128
tools.init(model,watermarking_dict)
trainloader, testloader = dataloader(trainset, testset, batch_size)
criterion = nn.CrossEntropyLoss()
learning_rate, momentum, weight_decay = 0.01, .9, 5e-4
optimizer = optim.SGD([
{'params': model.parameters()}
], lr=learning_rate, momentum=momentum, weight_decay=weight_decay)
model.train()
epoch = 0
print("Launching injection.....")
while epoch < num_epochs:
print('doing epoch', str(epoch + 1), ".....")
loss, loss_nn, loss_w = tools.Embedder_one_step(model, trainloader, optimizer, criterion,
watermarking_dict)
loss = (loss * batch_size / len(trainloader.dataset))
loss_nn = (loss_nn * batch_size / len(trainloader.dataset))
loss_w = (loss_w * batch_size / len(trainloader.dataset))
print(' loss : %.5f - loss_wm: %.5f, loss_nn: %.5f ' % (loss, loss_w, loss_nn))
epoch += 1
else:
tools.init(model,watermarking_dict)
model = tools.Embedder(model, watermarking_dict)
## save the chekcpoints
np.save('watermarking_dict.npy', watermarking_dict)
torch.save({
'model_state_dict': model.state_dict(),
}, 'weights')
## load (checkpoint)
checkpoints=torch.load('weights', map_location=torch.device('cpu'))
return checkpoints
def run(self):
self.output_0=self.funcWatermarkEmbedder(self.AIM)
class Comparator():
unwatermarked=None
watermarked=None
testingDataset = None
##
output_0=None
def funcComparator(self,unwatermarked,watermarked,testingDataset):
undertestAIM=AIM()
res_wm=undertestAIM.funcAIM(watermarked,testingDataset)
res_unwm=undertestAIM.funcAIM(unwatermarked,testingDataset)
return np.abs(res_unwm-res_wm)/res_wm, res_wm
def run(self):
self.output_0=self.funcComparator(self.unwatermarked, self.watermarked,self.testingDataset)
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment