Commit 5d6523f6 authored by Carl De Sousa Trias's avatar Carl De Sousa Trias
Browse files

initial push

parent 46c44bb3
# fine tuning
import matplotlib.pyplot as plt
from tqdm import tqdm
from utils import *
def finetuning(net,epochs,trainloader):
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
for epoch in tqdm(range(epochs)):
net.train()
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# split data into the image and its label
inputs, labels = data
inputs = inputs.to(device)
labels = labels.to(device)
# initialise the optimiser
optimizer.zero_grad()
# forward
outputs = net(inputs)
# backward
loss = criterion(outputs, labels)
loss.backward()
# update the optimizer
optimizer.step()
# loss
running_loss += loss.item()
return net
'''
M_ID=4
param={"E":1}
'''
from utils import *
def adding_noise(net, power, module_name):
'''add gausian noise to the parameter of the network'''
for name, parameters in net.named_parameters():
if module_name in name:
print("noise added")
calcul = nn.utils.parameters_to_vector(parameters)
sigma = torch.std(calcul, unbiased=False).item()
noise = torch.normal(mean=0, std=power*sigma, size=parameters.size())
parameters.data += noise.to(device)
return net
def adding_noise_global(net, power):
'''add gausian noise to the parameter of the network'''
for name, module in net.named_modules():
if isinstance(module, torch.nn.Conv2d) or isinstance(module, torch.nn.Linear):
parameters=module.weight.data
calcul = nn.utils.parameters_to_vector(parameters)
sigma = torch.std(calcul, unbiased=False).item()
noise = torch.normal(mean=0, std=power*sigma, size=parameters.size())
parameters.data += noise.to(device)
return net
'''
M_ID=0
param={'name':["features.17.w"],"S":5}
'''
\ No newline at end of file
import torch.nn.functional as F
from utils import *
def distill_unlabeled(y, teacher_scores, T):
return nn.KLDivLoss()(F.log_softmax(y/T), F.softmax(teacher_scores/T)) * T * T
def test_knowledge_dist(net, water_loss, file_weights, file_watermark, dataset='CIFAR10'):
epochs_list, test_list, water_test_list = [], [], []
trainset, testset, _ = CIFAR10_dataset()
trainloader, testloader = dataloader(trainset, testset, 100)
student_net = tv.models.vgg16()
student_net.classifier = nn.Linear(25088, 10)
student_net.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(student_net.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
watermarking_dict = np.load(file_watermark, allow_pickle='TRUE').item()
net.eval()
for param in net.parameters():
param.requires_grad = False
student_net.train()
for epoch in range(10):
net.train()
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# split data into the image and its label
inputs, labels = data
if dataset == 'MNIST':
inputs.squeeze_(1)
inputs = torch.stack([inputs, inputs, inputs], 1)
inputs = inputs.to(device)
labels = labels.to(device)
teacher_output = net(inputs)
teacher_output = teacher_output.detach()
_, labels_teacher = torch.max(F.log_softmax(teacher_output, dim=1),dim=1)
# initialise the optimiser
optimizer.zero_grad()
# forward
outputs = student_net(inputs)
# backward
loss = criterion(outputs, labels_teacher)
loss.backward()
# update the optimizer
optimizer.step()
# loss
running_loss += loss.item()
print(running_loss)
return epochs_list, test_list, water_test_list
def knowledge_distillation(net, epochs, trainloader,student_net):
student_net.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(student_net.parameters(), lr=0.001, momentum=0.9, weight_decay=5e-4)
net.eval()
for param in net.parameters():
param.requires_grad = False
student_net.train()
for epoch in range(epochs):
print('doing epoch', str(epoch + 1), ".....")
net.train()
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# split data into the image and its label
inputs, labels = data
inputs = inputs.to(device)
teacher_output = net(inputs)
teacher_output = teacher_output.detach()
_, labels_teacher = torch.max(F.log_softmax(teacher_output, dim=1), dim=1)
# initialise the optimiser
optimizer.zero_grad()
# forward
outputs = student_net(inputs)
# backward
loss = criterion(outputs, labels_teacher)
loss.backward()
# update the optimizer
optimizer.step()
# loss
running_loss += loss.item()
loss = (running_loss * 128 / len(trainloader.dataset))
print(' loss : %.5f ' % (loss))
'''
M_ID = 5
trainset, testset, inference_transform = CIFAR10_dataset()
trainloader, testloader = dataloader(trainset, testset, 128)
student = tv.models.vgg16()
student.classifier = nn.Linear(25088, 10)
param = {"E":5,"trainloader":trainloader,"student":student}
'''
\ No newline at end of file
# pruning
import matplotlib.pyplot as plt
import torch.nn.utils.prune as prune
from utils import *
def prune_model_l1_unstructured(new_model, proportion):
for name, module in new_model.named_modules():
if isinstance(module, torch.nn.Conv2d) or isinstance(module, torch.nn.Linear):
print("pruned")
prune.l1_unstructured(module, name='weight', amount=proportion)
prune.remove(module, 'weight')
return new_model
def random_mask(new_model, proportion):
# maybe add a dimension for the pruning to remove entirely the kernel
for name, module in new_model.named_modules():
if isinstance(module, torch.nn.Conv2d) or isinstance(module, torch.nn.Linear):
prune.random_unstructured(module, name='weight', amount=proportion)
return dict(new_model.named_buffers())
def prune_model_random_unstructured(new_model, proportion):
dict_mask=random_mask(new_model,proportion)
for name, module in new_model.named_modules():
if isinstance(module, torch.nn.Conv2d) or isinstance(module, torch.nn.Linear):
print("pruned")
weight_name = name + '.weight_mask'
module.weight = nn.Parameter(module.weight * dict_mask[weight_name])
return new_model
def train_pruning(net, optimizer, criterion, trainloader, number_epochs, value=None, mask=None):
# train
net.train()
for epoch in range(number_epochs):
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# split data into the image and its label
inputs, labels = data
inputs = inputs.to(device)
labels = labels.to(device)
if inputs.size()[1] == 1:
inputs.squeeze_(1)
inputs = torch.stack([inputs, inputs, inputs], 1)
# initialise the optimiser
optimizer.zero_grad()
# forward
outputs = net(inputs)
# backward
loss = criterion(outputs, labels)
loss.backward()
if value != None:
net = prune_model_l1_unstructured(net, value)
elif mask != None:
net = prune_model_random_unstructured(net, mask)
# update the optimizer
optimizer.step()
# loss
running_loss += loss.item()
'''
M_ID=1
param={"P":.99}
M_ID=2
param={"R":.99}
'''
import matplotlib.pyplot as plt
from utils import *
# quantization
def quantize_tensor(x, num_bits):
qmin = 0.
qmax = 2. ** num_bits - 1.
min_val, max_val = torch.min(x), torch.max(x)
scale = (max_val - min_val) / (qmax - qmin)
initial_zero_point = torch.min(max_val - min_val).round()
print(min_val, max_val, scale, initial_zero_point)
zero_point = 0
if initial_zero_point < qmin:
zero_point = qmin
elif initial_zero_point > qmax:
zero_point = qmax
else:
zero_point = initial_zero_point
zero_point = int(zero_point)
q_x = zero_point + x / scale
q_x.clamp_(qmin, qmax).round_()
q_x = q_x.byte()
return {'tensor': q_x, 'scale': scale, 'zero_point': zero_point}
def dequantize_tensor(q_x):
return q_x['scale'] * (q_x['tensor'].float() - q_x['zero_point'])
def fake_quantization(x, num_bits):
qmax = 2. ** num_bits - 1.
min_val, max_val = torch.min(x), torch.max(x)
scale = qmax / (max_val - min_val)
x_q = (x - min_val) * scale
x_q.clamp_(0, qmax).round_() #clamp = min(max(x,min_value),max_value)
x_q.byte()
x_f_q = x_q.float() / scale + min_val
return x_f_q
def quantization(net,num_bits):
with torch.no_grad():
for name, module in net.named_modules():
if isinstance(module, torch.nn.Conv2d)or isinstance(module, torch.nn.Linear):
print("quantized")
tensor = module.weight
tensor_q = fake_quantization(tensor, num_bits)
module.weight = nn.Parameter(tensor_q)
return net
# This is a sample Python script.
from utils import *
def overwriting(net,NNWmethod,nbr_watermark,watermarking_dict):
for i in range(nbr_watermark):
Embeds(watermarking_dict["types"],NNWmethod,net,watermarking_dict)
return net
def Embeds(types, tools, model, watermarking_dict):
if types == "1":
tools.init(model, watermarking_dict)
trainset, testset, inference_transform = CIFAR10_dataset()
# hyperparameter of training
criterion = nn.CrossEntropyLoss()
num_epochs = 5
batch_size = 128
trainloader, testloader = dataloader(trainset, testset, batch_size)
learning_rate, momentum, weight_decay = 0.01, .9, 5e-4
optimizer = optim.SGD([
{'params': model.parameters()}
], lr=learning_rate, momentum=momentum, weight_decay=weight_decay)
model.train()
epoch = 0
print("Launching injection.....")
while epoch < num_epochs:
print('doing epoch', str(epoch + 1), ".....")
loss, loss_nn, loss_w = tools.Embedder_one_step(model, trainloader, optimizer, criterion, watermarking_dict)
loss = (loss * batch_size / len(trainloader.dataset))
loss_nn = (loss_nn * batch_size / len(trainloader.dataset))
loss_w = (loss_w * batch_size / len(trainloader.dataset))
print(' loss : %.5f - loss_wm: %.5f, loss_nn: %.5f ' % (loss, loss_w, loss_nn))
epoch += 1
elif types=="0":
print("Launching injection.....")
model = tools.Embedder(model, watermarking_dict)
return model
'''
M_ID=6
param={"W":2,"watermarking_dict":watermarking_dict,"NNWmethods":tools}
'''
\ No newline at end of file
# MPAI-NNW v1.2 implementation
## Case 2
This code refers to the implementation of the MPAI-NNW under MPAI-AIF, as described in https://mpai.community/wp-content/uploads/2023/10/Reference-Software-Neural-Network-Watermarking-V1.pdf.
All the code is based in Python.
**Implemented APIs**
1. MPAI_AIFS_GetAndParseArchive, unzip and parse json and AIMs.
2. MPAI_AIFM_AIM_{Start,Pause,Resume,Stop,GetStatus}, to process on AIM/AIW.
3. MPAI_AIFM_Port_Input_{Write,Read,Reset}, to process on the port of the AIMs.
**Controller/User Agent**
1. Controller is deployed under the socket library (waiting request from _input.py_).
2. User Agent can trigger and run command by sending inputs.
3. _config.py_ shares some variables among the different files.
**Folders**
- **all_AIW** stores all the different AIW that are implemented
- NNWImp,NNWRob for the controller_NNW
- **resources** external elements for some use cases (uncorrelated images for ADI, context/question of the MQA, ...)
- **Attacks** contains all the specified attacks of MPAI-NNW under the PyTorch Framework.
**Specificity to MPAI-NNW**
1. _utils.py_ contains function link to the dataset/dataloader under the PyToch framework.
2. _UCHIDA.py_ / _ADI.py_ correspond to the Neural Network Watermarking technology under evaluation.
3. AIW.zip is composed of the corresponding .json and the AIMs as Python file.
## Installation
Code was designed and tested on an Ubuntu 20.04 operating system using anaconda 23.7.2 and Python 3.9.
An environment with all the necessary libraries can be created using:
```bash
conda create --name <env> --file requirements.txt
```
## Run
**Initialisation**
First the Controller should be initialized (the command '-W ignore' can be added to avoid warning message during execution):
```bash
conda activate <env>
python controller.py
Controller Initialized
```
To send commands to the controller as a user agent,
a second terminal should be open, and run:
```bash
conda activate <env>
python input.py
input: <your command>
```
**Emulation of MPAI Store**
Emulate the folder of the computer as a website using the command:
```bash
python3 -m http.server
```
Then the command simulate the downloading of the AIW from a website:
```bash
conda activate <env>
python input.py
input: wget http://0.0.0.0:8000/[yourpath]/AIW.zip
```
### **List of command for controller_NNW**
This command will open a window for the selection of the AIW.zip folder:
```bash
(env) python input.py
input: getparse
```
This command will open set the Computational Cost flag ON:
```bash
(env) python input.py
input: ComputationalCost True
```
This command will open run the Robustness AIW with the **1** Modification and **{"P":0.5}** Parameters
```bash
(env) python input.py
input: run robustness 1 {"P":0.5}
```
This command will open run the Imperceptibility AIW with **vgg16** as watermarked AIM and trained on the **CIFAR10** dataset
```bash
(env) python input.py
input: run imperceptibility vgg16 cifar10
```
### Some warnings
1. The AIW should be named AIW.zip and contained the .json and the needed AIMs.
2. The code does not permit misspelling.
# Licence
[Licence](https://mpai.community/standards/mpai-cui/framework-licence/) information are detailed in the MPAI website.
'''
Implementation of the method presented in Yusuke Uchida, Yuki Nagai, Shigeyuki Sakazawa, and Shin’ichi Satoh,
“Embedding watermarks into deep neural networks,”
in Proceedings of the 2017 ACM on International Conference on Multimedia Retrieval,
2017, pp. 269–277.
'''
from utils import *
import time
class Uchi_tools():
def __init__(self) -> None:
super(Uchi_tools, self).__init__()
def Embedder_one_step(self, net, trainloader, optimizer, criterion, watermarking_dict):
'''
:param watermarking_dict: dictionary with all watermarking elements
:return: the different losses ( global loss, task loss, watermark loss)
'''
running_loss = 0
running_loss_nn = 0
running_loss_watermark = 0
for i, data in enumerate(trainloader, 0):
# split data into the image and its label
inputs, labels = data
inputs = inputs.to(device)
labels = labels.to(device)
if inputs.size()[1] == 1:
inputs.squeeze_(1)
inputs = torch.stack([inputs, inputs, inputs], 1)
# initialise the optimiser
optimizer.zero_grad()
# forward
outputs = net(inputs)
# backward
loss_nn = criterion(outputs, labels)
# watermark
loss_watermark = self.loss(net, watermarking_dict['weight_name'], watermarking_dict['X'], watermarking_dict['watermark'])
loss = loss_nn + watermarking_dict['lambd'] * loss_watermark # Uchida
loss.backward()
# update the optimizer
optimizer.step()
# loss
running_loss += loss.item()
running_loss_nn += loss_nn.item()
running_loss_watermark += loss_watermark.item()
return running_loss, running_loss_nn, running_loss_watermark
def Decoder(self, net, watermarking_dict):
"""
:param file_watermark: file that contain our saved watermark elements
:return: the extracted watermark, the hamming distance compared to the original watermark
"""
# watermarking_dict = np.load(file_watermark, allow_pickle='TRUE').item() #retrieve the dictionary
watermark = watermarking_dict['watermark'].to(device)
X = watermarking_dict['X'].to(device)
weight_name = watermarking_dict["weight_name"]
extraction = self.extraction(net, weight_name, X)
extraction_r = torch.round(extraction) # <.5 = 0 and >.5 = 1
res = self.hamming(watermark, extraction_r)/len(watermark)
time.sleep(1)
return extraction_r
def init(self, net, watermarking_dict):
'''
:param net: network
:param watermarking_dict: dictionary with all watermarking elements
:param save: file's name to save the watermark
:return: watermark_dict with a new entry: the secret key matrix X
'''
M = self.size_of_M(net, watermarking_dict['weight_name'])
T = len(watermarking_dict['watermark'])
X = torch.randn((T, M), device=device)
watermarking_dict['X']=X
watermarking_dict["types"]=1
return watermarking_dict
def projection(self, X, w):
'''
:param X: secret key matrix
:param w: flattened weight
:return: sigmoid of the matrix multiplication of the 2 inputs
'''
sigmoid_func = nn.Sigmoid()
res = torch.matmul(X, w)
sigmoid = sigmoid_func(res)
return sigmoid
def flattened_weight(self, net, weights_name):
'''
:param net: aimed network
:param weights_name: aimed layer's name
:return: a vector of dimension CxKxK (flattened weight)
'''
for name, parameters in net.named_parameters():
if weights_name in name:
f_weights = torch.mean(parameters, dim=0)
f_weights = f_weights.view(-1, )
return f_weights
def extraction(self, net, weights_name, X):
'''
:param net: aimed network
:param weights_name: aimed layer's name
:param X: secret key matrix
:return: a binary vector (watermark)
'''
W = self.flattened_weight(net, weights_name)
return self.projection(X, W)
def hamming(self, s1,s2):
'''
:param s1: sequence 1
:param s2: sequence 2
:return: the hamming distance between 2 vectors
'''
assert len(s1) == len(s2)
return sum(c1 != c2 for c1, c2 in zip(s1, s2))
def size_of_M(self, net, weight_name):
'''
:param net: aimed network
:param weights_name: aimed layer's name
:return: the 2nd dimension of the secret key matrix X
'''
for name, parameters in net.named_parameters():
if weight_name in name:
return parameters.size()[1] * parameters.size()[2] * parameters.size()[3]
def loss(self, net, weights_name, X, watermark):
'''
:param net: aimed network
:param weights_name: aimed layer's name
:param X: secret key matrix
:param watermark: the watermark
:return: Uchida's loss
'''
loss = 0
W = self.flattened_weight(net, weights_name)
yj = self.projection(X, W)
for i in range(len(watermark)):
loss += watermark[i] * torch.log2(yj[i]) + (1 - watermark[i]) * torch.log2(1 - yj[i])
return -loss/len(watermark)
# you can copy-paste this section into main to test Uchida's method
'''
tools=Uchi_tools()
weight_name = 'features.19.weight'
T = 64
watermark = torch.tensor(np.random.choice([0, 1], size=(T), p=[1. / 3, 2. / 3]), device=device)
watermarking_dict={'lambd':0.1, 'weight_name':weight_name,'watermark':watermark, "types":1}
'''
\ No newline at end of file
import UCHIDA
import time
from utils import *
from Attacks import *
#####Changement de niveau AIMs doit etre quelque chose qui possède des ports
class AIM():
model=None
parameters=None
testingDataset=None
trainingDataset=None
output_0=None
def trainAIM(self, parameters, trainingDataset):
model = tv.models.vgg16()
model.classifier = nn.Linear(25088, 10)
model.load_state_dict(parameters["model_state_dict"])
batch_size = 128
while batch_size > 0:
try:
trainloader = torch.utils.data.DataLoader(trainingDataset, batch_size=batch_size, shuffle=False,
num_workers=2)
except:
batch_size = int(batch_size / 2)
criterion = nn.CrossEntropyLoss()
learning_rate, momentum, weight_decay = 0.01, .9, 5e-4
optimizer = optim.SGD([
{'params': model.parameters()}
], lr=learning_rate, momentum=momentum, weight_decay=weight_decay)
model.train()
model.to(device)
for epoch in range(10):
for i, data in enumerate(trainloader, 0):
# split data into the image and its label
inputs, labels = data
inputs = inputs.to(device)
labels = labels.to(device)
if inputs.size()[1] == 1:
inputs.squeeze_(1)
inputs = torch.stack([inputs, inputs, inputs], 1)
# initialise the optimiser
optimizer.zero_grad()
# forward
outputs = model(inputs)
# backward
loss= criterion(outputs, labels)
# watermark
loss.backward()
# update the optimizer
optimizer.step()
return model
def funcAIM(self, model, parameters, testingDataset):
'''
return the performance of the model in the testing dataset
:param parameters: parameter of the network
:param testingdataset: testing data
:return:
'''
model.load_state_dict(parameters["model_state_dict"])
batch_size=128
while batch_size>0:
try :
testloader=torch.utils.data.DataLoader(testingDataset, batch_size=batch_size, shuffle=False,num_workers=2)
return self.testingAIM(model,testloader)
except:
batch_size=int(batch_size/2)
def testingAIM(self,AIM,testloader):
correct = 0
total = 0
AIM.to(device)
AIM.eval()
# torch.no_grad do not train the network
with torch.no_grad():
for data in testloader:
inputs, labels = data
if inputs.size()[1] == 1:
inputs.squeeze_(1)
inputs = torch.stack([inputs, inputs, inputs], 1)
inputs = inputs.to(device)
labels = labels.to(device)
outputs = AIM(inputs)
if len(outputs) == 2: outputs, _ = outputs
_, predicted = torch.max(outputs, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
return 100 - (100 * float(correct) / total)
def run(self):
self.output_0=self.funcAIM(self.model, self.parameters, self.testingDataset)
class WatermarkEmbedder():
AIM=None
##
output_0 = None
def funcWatermarkEmbedder(self,model):
### differentiate this function based on trainloader none or not
model.to(device)
#### to be adapted by
tools = UCHIDA.Uchi_tools()
weight_name = 'features.19.weight'
T = 64
watermark = torch.tensor(np.random.choice([0, 1], size=(T), p=[1. / 3, 2. / 3]), device=device)
watermarking_dict = {'lambd': 0.1, 'weight_name': weight_name, 'watermark': watermark, "types": 1}
if watermarking_dict["types"] == 1:
trainset, testset, inference_transform = CIFAR10_dataset()
# hyperparameter of training
num_epochs = 25
batch_size = 128
tools.init(model,watermarking_dict)
trainloader, testloader = dataloader(trainset, testset, batch_size)
criterion = nn.CrossEntropyLoss()
learning_rate, momentum, weight_decay = 0.01, .9, 5e-4
optimizer = optim.SGD([
{'params': model.parameters()}
], lr=learning_rate, momentum=momentum, weight_decay=weight_decay)
model.train()
epoch = 0
print("Launching injection.....")
while epoch < num_epochs:
print('doing epoch', str(epoch + 1), ".....")
loss, loss_nn, loss_w = tools.Embedder_one_step(model, trainloader, optimizer, criterion,
watermarking_dict)
loss = (loss * batch_size / len(trainloader.dataset))
loss_nn = (loss_nn * batch_size / len(trainloader.dataset))
loss_w = (loss_w * batch_size / len(trainloader.dataset))
print(' loss : %.5f - loss_wm: %.5f, loss_nn: %.5f ' % (loss, loss_w, loss_nn))
epoch += 1
else:
tools.init(model,watermarking_dict)
model = tools.Embedder(model, watermarking_dict)
## save the chekcpoints
np.save('watermarking_dict.npy', watermarking_dict)
torch.save({
'model_state_dict': model.state_dict(),
}, 'weights')
## load (checkpoint)
checkpoints=torch.load('weights', map_location=torch.device('cpu'))
return checkpoints
def run(self):
self.output_0=self.funcWatermarkEmbedder(self.AIM)
class Comparator():
unwatermarked=None
watermarked=None
testingDataset = None
##
output_0=None
def funcComparator(self,unwatermarked,watermarked,testingDataset):
undertestAIM=AIM()
res_wm=undertestAIM.funcAIM(watermarked,testingDataset)
res_unwm=undertestAIM.funcAIM(unwatermarked,testingDataset)
return np.abs(res_unwm-res_wm)/res_wm, res_wm
def run(self):
self.output_0=self.funcComparator(self.unwatermarked, self.watermarked,self.testingDataset)
{
"$schema": "",
"$id": "",
"title": "NNW NNW v1 AIW/AIM",
"Identifier": {
"ImplementerID": "/* String assigned by IIDRA */",
"Specification": {
"Standard": "MPAI-NNW",
"AIW": "NNW-imperceptibility",
"AIM": "NNW-imperceptibility",
"Version": "1"
}
},
"APIProfile": "basic",
"Description": "This AIF is used to call the AIW of NNW imperceptibility evaluation ",
"Types": [
{
"Name": "dataset",
"Type": "dataset"
},
{
"Name": "parameters",
"Type": "tensor[]"
},
{
"Name": "inference",
"Type": "output"
},
{
"Name": "bitstring",
"Type": "uint8[]"
}
],
"Ports": [
{
"Name": "training dataset",
"Direction": "Input",
"RecordType": "dataset"
},
{
"Name": "watermarked parameter",
"Direction": "InputOutput",
"RecordType": "parameters"
},
{
"Name": "watermarked inference",
"Direction": "InputOutput",
"RecordType": "inference"
},
{
"Name": "testing dataset",
"Direction": "Input",
"RecordType": "dataset"
},
{
"Name": "unwatermarked parameter",
"Direction": "InputOutput",
"RecordType": "parameters"
},
{
"Name": "unwatermarked inference",
"Direction": "InputOutput",
"RecordType": "inference"
},
{
"Name": "payload",
"Direction": "Input",
"RecordType": " bitstring "
}
],
"SubAIMs": [
{
"Name": "AIM",
"Identifier": {
"ImplementerID": "/* String assigned by IIDRA */",
"Specification": {
"Standard": "MPAI-NNW",
"AIW": "NNW-imperceptibility",
"AIM": "AIM",
"Version": "1"
}
}
},
{
"Name": "WatermarkEmbedder",
"Identifier": {
"ImplementerID": "/* String assigned by IIDRA */",
"Specification": {
"Standard": "MPAI-NNW",
"AIW": "NNW-imperceptibility",
"AIM": "WatermarkEmbedder",
"Version": "1"
}
}
},
{
"Name": "AIMtrainer",
"Identifier": {
"ImplementerID": "/* String assigned by IIDRA */",
"Specification": {
"Standard": "MPAI-NNW",
"AIW": "NNW-imperceptibility",
"AIM": "AIMtrainer",
"Version": "1"
}
}
},
{
"Name": "Comparator",
"Identifier": {
"ImplementerID": "/* String assigned by IIDRA */",
"Specification": {
"Standard": "MPAI-NNW",
"AIW": "NNW-imperceptibility",
"AIM": "Comparator",
"Version": "1"
}
}
}
],
"Topology": [
{
"Output": {
"AIMName": "",
"PortName": "Training dataset_1"
},
"Input": {
"AIMName": "AIMtrainer",
"PortName": " Training dataset_1"
}
},
{
"Output": {
"AIMName": "AIMtrainer",
"PortName": "unwatermarked parameter"
},
"Input": {
"AIMName": "AIM",
"PortName": "unwatermarked parameter"
}
},
{
"Output": {
"AIMName": "",
"PortName": "Testing dataset_1"
},
"Input": {
"AIMName": "AIM",
"PortName": "Testing dataset_1"
}
},
{
"Output": {
"AIMName": "AIM",
"PortName": "unwatermarked inference"
},
"Input": {
"AIMName": "Measure",
"PortName": "unwatermarked inference"
}
},
{
"Output": {
"AIMName": "",
"PortName": "Training dataset_2"
},
"Input": {
"AIMName": "WatermarkEmbedder",
"PortName": " Training dataset_2"
}
},
{
"Output": {
"AIMName": "",
"PortName": "payload"
},
"Input": {
"AIMName": "WatermarkEmbedder",
"PortName": "payload"
}
},
{
"Output": {
"AIMName": "WatermarkEmbedder",
"PortName": "watermarked parameter"
},
"Input": {
"AIMName": "AIM",
"PortName": "watermarked parameter"
}
},
{
"Output": {
"AIMName": "",
"PortName": "Testing dataset_2"
},
"Input": {
"AIMName": "AIM",
"PortName": "Testing dataset_2"
}
},
{
"Output": {
"AIMName": "AIM",
"PortName": "watermarked inference"
},
"Input": {
"AIMName": "Measure",
"PortName": "watermarked inference"
}
}
]
}
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment