Commit 5d6523f6 authored by Carl De Sousa Trias's avatar Carl De Sousa Trias
Browse files

initial push

parent 46c44bb3
import os
import json
from zipfile import ZipFile
import psutil
global error_t
from multiprocessing import Process
import config
error_t=True
def MPAI_AIFS_GetAndParseArchive(filename):
'''filename is a zipfld with at least a ".json"
:return the data structure'''
i = filename.find(".")
os.makedirs(filename[:i],exist_ok=True)
with ZipFile(filename, 'r') as zObject:
# Extracting all the members of the zip
# into a specific location.
zObject.extractall(
path=filename[:i])
for files in os.listdir(filename[:i]):
if '.json' in files:
json_file = open(filename[:i]+"/"+files)
return json.load(json_file)
return error_t
def MPAI_AIFU_Controller_Initialize():
'''initialize the controller and switch it on'''
return
def MPAI_AIFU_Controller_Destroy():
'''switch of controller'''
return
def MPAI_AIFM_AIM_Start(name):
'''start AIW named name (after parse),
:return AIW_ID (int)'''
p1 = Process(target=config.AIMs[name].run())
p1.start() ### run it somewhere
config.dict_process[name.lower()] = p1
return
def MPAI_AIFM_AIM_Pause(name):
'''Pause AIW named name with AIW_ID'''
if name in config.dict_process:
temp_p = psutil.Process(config.dict_process[name].pid)
temp_p.suspend()
print(name, "paused")
else:
print(name, "isn't running")
return error_t
def MPAI_AIFM_AIM_Resume(name):
'''Resume AIW named name with AIW_ID'''
if name.lower() in config.dict_process:
temp_p = psutil.Process(config.dict_process[name].pid)
temp_p.resume()
print(name, "resumed")
else:
print(name, "isn't running")
return error_t
def MPAI_AIFM_AIM_Stop(name):
'''Stop AIW named name with AIW_ID'''
if name.lower() in config.dict_process:
config.dict_process[name].terminate()
print(name, "stopped")
else:
print(name, "isn't running")
return
def MPAI_AIFM_AIM_GetStatus(name):
'''current state of the AIM named name in AIW_ID
:return status(int) [MPAI_AIM_ALIVE, MPAI_AIM_DEAD]'''
if name.lower() in config.dict_process:
print("status of %s: %s" % (name, str(config.dict_process[name].is_alive())))
else:
print(name, "was never initiated")
return error_t
def MPAI_AIFM_Port_Input_Write(AIM_name,port_name, message):
setattr(config.AIMs[AIM_name],port_name,message)
return error_t
def MPAI_AIFM_Port_Output_Read(AIM_name,port_name):
result=getattr(config.AIMs[AIM_name],port_name)
return result
def MPAI_AIFM_Port_Reset(AIM_name,port_name):
setattr(config.AIMs[AIM_name],port_name,None)
return error_t
\ No newline at end of file
# MPAI-NNW v1.2 implementation
## Case 3
This code refers to the implementation of the MPAI-NNW under MPAI-AIF, as described in [TBC] https://mpai.community/wp-content/uploads/2023/10/Reference-Software-Neural-Network-Watermarking-V1.pdf.
All the code is based in Python.
**Implemented APIs**
1. MPAI_AIFS_GetAndParseArchive, unzip and parse json and AIMs.
2. MPAI_AIFM_AIM_{Start,Pause,Resume,Stop,GetStatus}, to process on AIM/AIW.
3. MPAI_AIFM_Port_Input_{Write,Read,Reset}, to process on the port of the AIMs.
**Controller/User Agent**
1. Controller is deployed under the socket library (waiting request from _input.py_).
2. User Agent can trigger and run command by sending inputs.
3. _config.py_ shares some variables among the different files.
**Folders**
- **all_AIW** stores all the different AIW that are implemented
- NNW_NNW-QAM,NNW_NNW-QAM-Checker for the Multimodal Question Answering watermarking use case
- **resources** external elements for some use cases (uncorrelated images for ADI, context/question of the MQA, ...)
## Installation
Code was designed and tested on an Ubuntu 20.04 operating system using anaconda 23.7.2 and Python 3.9.
An environment with all the necessary libraries can be created using:
```bash
conda create --name <env> --file requirements.txt
```
## Run
**Initialisation**
First the Controller should be initialized (the command '-W ignore' can be added to avoid warning message during execution):
```bash
conda activate <env>
python controller.py
Controller Initialized
```
To send commands to the controller as a user agent,
a second terminal should be open, and run:
```bash
conda activate <env>
python input.py
input: <your command>
```
**Emulation of MPAI Store**
Emulate the folder of the computer as a website using the command:
```bash
python3 -m http.server
```
Then the command simulate the downloading of the AIW from a website:
```bash
conda activate <env>
python input.py
input: wget http://0.0.0.0:8000/[yourpath]/AIW.zip
```
### **List of command for controller**
This command will open a window for the selection of the AIW.zip folder:
```bash
(env) python input.py
input: getparse
```
This command will run the AIW (after being parsed):
```bash
(env) python input.py
input: run all
```
windows using tkinter will ask for the different files.
### Some warnings
1. The AIW should be named AIW.zip and contained the .json and the needed AIMs.
2. The code does not permit misspelling.
# Licence
[Licence](https://mpai.community/standards/mpai-cui/framework-licence/) information are detailed in the MPAI website
import torch
from transformers import AutoModelForQuestionAnswering
from datasets import load_dataset
from transformers import AutoTokenizer, pipeline
from tqdm.auto import tqdm
import numpy as np
import collections
import evaluate
import torch
from datasets import load_dataset
from transformers import AutoTokenizer, pipeline
from evaluate import load
from scipy.io.wavfile import write
from transformers.models.whisper.english_normalizer import BasicTextNormalizer
from tqdm import tqdm
from transformers.pipelines.pt_utils import KeyDataset
from utils import *
import wavmark
from wavmark.utils import file_reader
class NNWProof():
Answer = None
##
Proof = False
def funcNNWProof(self, input):
'''
Verify the inference
'''
payload = [0,1,1,1,1,0,0,0,0,1,1,0,1,0,1,1]
model = wavmark.load_model().to(device)
signal = file_reader.read_as_single_channel(input, aim_sr=16000)
# 5.decode watermark
payload_decoded, _ = wavmark.decode_watermark(model, signal, show_progress=True)
if isinstance(payload_decoded,type(None)): return False
BER=(payload != payload_decoded).mean() * 100
return BER==0
def run(self):
self.Proof = self.funcNNWProof(self.Answer)
\ No newline at end of file
{
"$schema": "",
"$id": "",
"title": "WaterChecker",
"Identifier": {
"ImplementerID": "/* String assigned by IIDRA */",
"Specification": {
"Standard": "MPAI-NNW",
"AIW": "NNW-WaterChecker",
"AIM": "NNW-WaterChecker",
"Version": "1"
}
},
"APIProfile": "basic",
"Description": "This AIF check the answer produce by an NN",
"Types": [
{
"Name":"answer_t",
"Type":"uint8[]"
},
{
"Name":"proof_t",
"Type":"boolean"
}
],
"Ports": [
{
"Name":"Answer",
"Direction":"InputOutput",
"RecordType":"answer_t"
},
{
"Name":"Proof",
"Direction":"OutputInput",
"RecordType":"proof_t"
}
],
"SubAIMs": [
{
"Name": "NNWProof",
"Identifier": {
"ImplementerID": "/* String assigned by IIDRA */",
"Specification": {
"Standard": "MPAI-NNW",
"AIW": "NNW-QAUsage",
"AIM": "NNWProof",
"Version": "1"
}
}
}
],
"Topology": [
{
"Output":{
"AIMName":"",
"PortName":"Answer"
},
"Input":{
"AIMName":"NNWProof",
"PortName":"Answer"
}
},
{
"Output":{
"AIMName":"NNWProof",
"PortName":"Proof"
},
"Input":{
"AIMName":"",
"PortName":"Proof"
}
}
]
}
import torch
from transformers import AutoModelForQuestionAnswering
from datasets import load_dataset
from transformers import AutoTokenizer, pipeline
from tqdm.auto import tqdm
import numpy as np
import collections
import evaluate
import torch
from datasets import load_dataset
from transformers import AutoTokenizer, pipeline
from evaluate import load
from scipy.io.wavfile import write
from transformers.models.whisper.english_normalizer import BasicTextNormalizer
from tqdm import tqdm
from transformers.pipelines.pt_utils import KeyDataset
from PIL import Image
import soundfile as sf
from lavis.models import load_model_and_preprocess
from utils import *
import wavmark
from playsound import playsound
class QuestionAnswering():
QuestionText = None
RawImage = None
##
AnswerText = None
def funcQuestionAnswering(self, raw_image_path,question):
'''
Apply an NN to answer the question
'''
raw_image=Image.open(raw_image_path).convert("RGB")
pipe = pipeline("visual-question-answering", model="Salesforce/blip-vqa-base")
output = pipe(raw_image, question, top_k=1)[0]
return output['answer']
def run(self):
self.AnswerText = self.funcQuestionAnswering(self.RawImage, self.QuestionText)
class SpeechRecognition():
QuestionAudio = None
##
QuestionText = None
def funcSpeechRecognition(self, input):
'''
Verify the inference
'''
if self.QuestionText == None:
playsound(input)
speech_reco = pipeline(
"automatic-speech-recognition", model="openai/whisper-base", device=device
)
res = speech_reco(input)
return res["text"]
def run(self):
self.QuestionText = self.funcSpeechRecognition(self.QuestionAudio)
class SpeechSynthesis():
AnswerText = None
AnswerAudio= None
def funcSpeechSynthesis(self,input):
synthesiser = pipeline("text-to-speech", "microsoft/speecht5_tts")
embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
speaker_embedding = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)
# You can replace this embedding with your own as well.
speech = synthesiser("The answer to your question is:"+input,
forward_params={"speaker_embeddings": speaker_embedding})
payload = [0,1,1,1,1,0,0,0,0,1,1,0,1,0,1,1]
model = wavmark.load_model().to(device)
signal, sample_rate = speech["audio"],speech["sampling_rate"]
watermarked_signal, _ = wavmark.encode_watermark(model, signal, payload, show_progress=True)
# you can save it as a new wav:
path_output = "AudioAnswer.wav"
sf.write(path_output, watermarked_signal, samplerate=16000)
playsound(path_output)
return path_output
def run(self):
self.AnswerAudio = self.funcSpeechSynthesis(self.AnswerText)
\ No newline at end of file
{
"$schema": "",
"$id": "",
"title": "QASRUsage",
"Identifier": {
"ImplementerID": "/* String assigned by IIDRA */",
"Specification": {
"Standard": "MPAI-NNW",
"AIW": "NNW-QASRUsage",
"AIM": "NNW-QASRUsage",
"Version": "1"
}
},
"APIProfile": "basic",
"Description": "This AIF is an example of an integrated NNW use case",
"Types": [
{
"Name":"audio_t",
"Type":"uint8[]"
},
{
"Name":"question_t",
"Type":"uint8[]"
},
{
"Name":"image_t",
"Type":"uint8[]"
},
{
"Name":"answer_t",
"Type":"uint8[]"
},
{
"Name":"answer_audio_t",
"Type":"uint8[]"
}
],
"Ports": [
{
"Name":"QuestionAudio",
"Direction":"InputOutput",
"RecordType":"audio_t"
},
{
"Name":"QuestionText",
"Direction":"InputOutput",
"RecordType":"question_t"
},
{
"Name":"RawImage",
"Direction":"InputOutput",
"RecordType":"image_t"
},
{
"Name":"AnswerText",
"Direction":"InputOutput",
"RecordType":"answer_t"
},
{
"Name":"AnswerAudio",
"Direction":"InputOutput",
"RecordType":"answer_audio_t"
}
],
"SubAIMs": [
{
"Name": "QuestionAnswering",
"Identifier": {
"ImplementerID": "/* String assigned by IIDRA */",
"Specification": {
"Standard": "MPAI-NNW",
"AIW": "NNW-QAUsage",
"AIM": "QuestionAnswering",
"Version": "1"
}
}
},
{
"Name": "SpeechRecognition",
"Identifier": {
"ImplementerID": "/* String assigned by IIDRA */",
"Specification": {
"Standard": "MPAI-NNW",
"AIW": "NNW-QAUsage",
"AIM": "SpeechRecognition",
"Version": "1"
}
}
},
{
"Name": "SpeechSynthesis",
"Identifier": {
"ImplementerID": "/* String assigned by IIDRA */",
"Specification": {
"Standard": "MPAI-NNW",
"AIW": "NNW-QAUsage",
"AIM": "SpeechSynthesis",
"Version": "1"
}
}
}
],
"Topology": [
{
"Output":{
"AIMName":"",
"PortName":"QuestionAudio"
},
"Input":{
"AIMName":"SpeechRecognition",
"PortName":"QuestionAudio"
}
},
{
"Output":{
"AIMName":"SpeechRecognition",
"PortName":"QuestionText"
},
"Input":{
"AIMName":"QuestionAnswering",
"PortName":"QuestionText"
}
},
{
"Output":{
"AIMName":"",
"PortName":"RawImage"
},
"Input":{
"AIMName":"QuestionAnswering",
"PortName":"RawImage"
}
},
{
"Output":{
"AIMName":"QuestionAnswering",
"PortName":"AnswerText"
},
"Input":{
"AIMName":"SpeechSynthesis",
"PortName":"AnswerText"
}
},
{
"Output":{
"AIMName":"SpeechSynthesis",
"PortName":"AnswerAudio"
},
"Input":{
"AIMName":"",
"PortName":"AnswerAudio"
}
},
{
"Output":{
"AIMName":"SpeechRecognition",
"PortName":"QuestionText"
},
"Input":{
"AIMName":"",
"PortName":"QuestionText"
}
}
]
}
message=[]
dict_process={}
AIMs={}
AIM_dict={}
# controller.py https://www.bogotobogo.com/python/python_network_programming_server_client.php
import socket
import time
from multiprocessing import Process
from APIs import *
import psutil
import os
import ast
import tkinter as tk
from tkinter import filedialog, simpledialog
import wget
import config
from PIL import Image
# create a socket object
serversocket = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
# get local machine name
host = socket.gethostname()
port = 12468
# bind to the port
serversocket.bind((host, port))
# queue up to 5 requests
serversocket.listen(5)
print("Controller Initialized")
CompCostFlag=False
while True:
# establish a connection
clientsocket, addr = serversocket.accept()
# print("Got a connection from %s" % str(addr))
# currentTime = time.ctime(time.time()) + "\r\n"
data=clientsocket.recv(1024)
message=data.decode()
message = message.split()
if not data: break
if "help" in message[0].lower():
### to be updated
print(" ----------------------------------------")
print(" this program is the implementation of NNW in the AIF")
print(" you can run AIM/AIW by sending 'run XX' ")
print(" you can pause AIM/AIW by sending 'stop XX' ")
print(" you can resume AIM/AIW by sending 'resume XX' ")
print(" you can obtain the status of AIM/AIW by sending 'status XX' ")
print(" you can end the program by typing 'exit'")
print(" ----------------------------------------")
elif "wget" in message[0].lower():
test=wget.download(message[1])
print(type(test))
elif "getparse" in message[0].lower():
#print(type(message[1]),message[1]) #always str
root = tk.Tk()
root.withdraw()
filename = filedialog.askopenfilename(title='Select the zip (json and AIMs)', filetypes=(("Text files",
"*.zip"),
("all files",
"*.*")))
json_dict = MPAI_AIFS_GetAndParseArchive(filename)
time.sleep(.5)
import AIW.AIMs_files as AIMs_file
config.AIM_dict = json_dict['SubAIMs']
config.Topology = json_dict['Topology'] ### topology
for i in range(len(json_dict['SubAIMs'])):
config.AIMs[config.AIM_dict[i]["Name"]] = getattr(AIMs_file, config.AIM_dict[i]["Name"])()
### AIMs file should be in the .zip
# print(AIMs.keys())
# print(config.Topology)
print(".json parsed")
elif 'write' in message[0].lower():
## message[1] AIM_name, message[2] port_name, message[3] what to write
MPAI_AIFM_Port_Input_Write(message[1],message[2],message[3])
elif "read" in message[0].lower():
## message[1] AIM_name, message[2] port_name
result=MPAI_AIFM_Port_Output_Read(message[1],message[2])
print(message[2], "of", message[1], ":", result, type(result))
elif "reset" in message[0].lower():
## message[1] AIM_name, message[2] port_name
MPAI_AIFM_Port_Reset(message[1],message[2])
elif 'run' in message[0].lower():
for elements in config.Topology:
# print(elements)
if elements["Output"]["AIMName"]=="": ## no outputs means it's an input
### TBD better: if conditions link to the port reading
root = tk.Tk()
root.withdraw()
path = filedialog.askopenfilename(title='Select '+str(elements["Input"]["PortName"]))
MPAI_AIFM_Port_Input_Write(elements["Input"]["AIMName"], elements["Input"]["PortName"],
path)
else:
MPAI_AIFM_AIM_Start(elements["Output"]["AIMName"])
if elements["Input"]["AIMName"]=="": ## no inputs means it's an output
print("Output of",elements["Output"]["AIMName"],"- port",elements["Output"]["PortName"] )
print()
print(MPAI_AIFM_Port_Output_Read(elements["Output"]["AIMName"],elements["Input"]["PortName"]))
print()
else:
MPAI_AIFM_Port_Input_Write(elements["Input"]["AIMName"], elements["Input"]["PortName"],
MPAI_AIFM_Port_Output_Read(elements["Output"]["AIMName"],elements["Output"]["PortName"]))
elif 'status' in message[0].lower():
print(config.dict_process)
MPAI_AIFM_AIM_GetStatus(message[1])
elif 'pause' in message[0].lower():
MPAI_AIFM_AIM_Pause(message[1])
elif 'resume' in message[0].lower():
MPAI_AIFM_AIM_Resume(message[1])
elif 'stop' in message[0].lower():
if message[1].lower() in config.dict_process:
config.dict_process[message[1]].terminate()
print( message[1], "stopped")
else:
print(message[1], "isn't running")
elif "exit" in message[0].lower():
print("ending session...")
break
else:
print("input not implemented")
clientsocket.close()
print("session ended")
### TO DO https://docs.python.org/3/library/multiprocessing.html
# client.py
import socket
import time
# create a socket object
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# get local machine name
host = socket.gethostname()
port = 12345
# connection to hostname on the port.
s.connect((host, port))
message = input('input:')
b_message=str.encode(message)
s.sendall(b_message)
# This file may be used to create an environment using:
# $ conda create --name <env> --file <this file>
# platform: linux-64
_libgcc_mutex=0.1=main
_openmp_mutex=5.1=1_gnu
blas=1.0=mkl
brotli=1.0.9=h5eee18b_7
brotli-bin=1.0.9=h5eee18b_7
brotlipy=0.7.0=py39h27cfd23_1003
bzip2=1.0.8=h7b6447c_0
ca-certificates=2023.08.22=h06a4308_0
certifi=2023.7.22=py39h06a4308_0
cffi=1.15.1=py39h5eee18b_3
charset-normalizer=2.0.4=pyhd3eb1b0_0
contourpy=1.0.5=py39hdb19cb5_0
cryptography=41.0.2=py39h22a60cf_0
cuda-cudart=11.8.89=0
cuda-cupti=11.8.87=0
cuda-libraries=11.8.0=0
cuda-nvrtc=11.8.89=0
cuda-nvtx=11.8.86=0
cuda-runtime=11.8.0=0
cycler=0.11.0=pyhd3eb1b0_0
cyrus-sasl=2.1.28=h52b45da_1
dbus=1.13.18=hb2f20db_0
expat=2.4.9=h6a678d5_0
ffmpeg=4.3=hf484d3e_0
filelock=3.9.0=py39h06a4308_0
fontconfig=2.14.1=h4c34cd2_2
fonttools=4.25.0=pyhd3eb1b0_0
freetype=2.12.1=h4a9f257_0
giflib=5.2.1=h5eee18b_3
glib=2.69.1=he621ea3_2
gmp=6.2.1=h295c915_3
gmpy2=2.1.2=py39heeb90bb_0
gnutls=3.6.15=he1e5248_0
gst-plugins-base=1.14.1=h6a678d5_1
gstreamer=1.14.1=h5eee18b_1
icu=58.2=he6710b0_3
idna=3.4=py39h06a4308_0
importlib_resources=5.2.0=pyhd3eb1b0_1
intel-openmp=2023.1.0=hdb19cb5_46305
jinja2=3.1.2=py39h06a4308_0
jpeg=9e=h5eee18b_1
kiwisolver=1.4.4=py39h6a678d5_0
krb5=1.20.1=h143b758_1
lame=3.100=h7b6447c_0
lcms2=2.12=h3be6417_0
ld_impl_linux-64=2.38=h1181459_1
lerc=3.0=h295c915_0
libbrotlicommon=1.0.9=h5eee18b_7
libbrotlidec=1.0.9=h5eee18b_7
libbrotlienc=1.0.9=h5eee18b_7
libclang=14.0.6=default_hc6dbbc7_1
libclang13=14.0.6=default_he11475f_1
libcublas=11.11.3.6=0
libcufft=10.9.0.58=0
libcufile=1.7.1.12=0
libcups=2.4.2=h2d74bed_1
libcurand=10.3.3.129=0
libcusolver=11.4.1.48=0
libcusparse=11.7.5.86=0
libdeflate=1.17=h5eee18b_0
libedit=3.1.20221030=h5eee18b_0
libevent=2.1.12=hdbd6064_1
libffi=3.4.4=h6a678d5_0
libgcc-ng=11.2.0=h1234567_1
libgomp=11.2.0=h1234567_1
libiconv=1.16=h7f8727e_2
libidn2=2.3.4=h5eee18b_0
libllvm14=14.0.6=hdb19cb5_3
libnpp=11.8.0.86=0
libnvjpeg=11.9.0.86=0
libpng=1.6.39=h5eee18b_0
libpq=12.15=hdbd6064_1
libstdcxx-ng=11.2.0=h1234567_1
libtasn1=4.19.0=h5eee18b_0
libtiff=4.5.0=h6a678d5_2
libunistring=0.9.10=h27cfd23_0
libuuid=1.41.5=h5eee18b_0
libwebp=1.2.4=h11a3e52_1
libwebp-base=1.2.4=h5eee18b_1
libxcb=1.15=h7f8727e_0
libxkbcommon=1.0.1=h5eee18b_1
libxml2=2.10.4=hcbfbd50_0
libxslt=1.1.37=h2085143_0
lz4-c=1.9.4=h6a678d5_0
markupsafe=2.1.1=py39h7f8727e_0
matplotlib=3.7.1=py39h06a4308_1
matplotlib-base=3.7.1=py39h417a72b_1
mkl=2023.1.0=h213fc3f_46343
mkl-service=2.4.0=py39h5eee18b_1
mkl_fft=1.3.6=py39h417a72b_1
mkl_random=1.2.2=py39h417a72b_1
mpc=1.1.0=h10f8cd9_1
mpfr=4.0.2=hb69a4c5_1
mpmath=1.3.0=py39h06a4308_0
munkres=1.1.4=py_0
mysql=5.7.24=h721c034_2
ncurses=6.4=h6a678d5_0
nettle=3.7.3=hbbd107a_1
networkx=3.1=py39h06a4308_0
nspr=4.35=h6a678d5_0
nss=3.89.1=h6a678d5_0
numpy=1.25.2=py39h5f9d8c6_0
numpy-base=1.25.2=py39hb5e798b_0
openh264=2.1.1=h4ff587b_0
openssl=3.0.10=h7f8727e_2
packaging=23.0=py39h06a4308_0
pcre=8.45=h295c915_0
pillow=9.4.0=py39h6a678d5_0
pip=23.2.1=py39h06a4308_0
ply=3.11=py39h06a4308_0
psutil=5.9.0=py39h5eee18b_0
pycparser=2.21=pyhd3eb1b0_0
pyopenssl=23.2.0=py39h06a4308_0
pyparsing=3.0.9=py39h06a4308_0
pyqt=5.15.7=py39h6a678d5_1
pyqt5-sip=12.11.0=py39h6a678d5_1
pysocks=1.7.1=py39h06a4308_0
python=3.9.17=h955ad1f_0
python-dateutil=2.8.2=pyhd3eb1b0_0
pytorch=2.0.1=py3.9_cuda11.8_cudnn8.7.0_0
pytorch-cuda=11.8=h7e8668a_5
pytorch-mutex=1.0=cuda
qt-main=5.15.2=h7358343_9
qt-webengine=5.15.9=h9ab4d14_7
qtwebkit=5.212=h3fafdc1_5
readline=8.2=h5eee18b_0
requests=2.31.0=py39h06a4308_0
setuptools=68.0.0=py39h06a4308_0
sip=6.6.2=py39h6a678d5_0
six=1.16.0=pyhd3eb1b0_1
sqlite=3.41.2=h5eee18b_0
sympy=1.11.1=py39h06a4308_0
tbb=2021.8.0=hdb19cb5_0
tk=8.6.12=h1ccaba5_0
tmux=3.3a=h5eee18b_1
toml=0.10.2=pyhd3eb1b0_0
torchaudio=2.0.2=py39_cu118
torchtriton=2.0.0=py39
torchvision=0.15.2=py39_cu118
tornado=6.3.2=py39h5eee18b_0
tqdm=4.65.0=py39hb070fc8_0
typing_extensions=4.7.1=py39h06a4308_0
tzdata=2023c=h04d1e81_0
urllib3=1.26.16=py39h06a4308_0
wget=3.2=pypi_0
wheel=0.38.4=py39h06a4308_0
xz=5.4.2=h5eee18b_0
zipp=3.11.0=py39h06a4308_0
zlib=1.2.13=h5eee18b_0
zstd=1.5.5=hc292b87_0
import torch
import os
import wavmark
from wavmark.utils import file_reader
from PyQt5 import QtCore, QtGui, QtWidgets
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
global file_path_g
def funcNNWProof( input):
'''
Verify the inference
'''
payload = [0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1]
model = wavmark.load_model().to(device)
signal = file_reader.read_as_single_channel(input, aim_sr=16000)
# 5.decode watermark
payload_decoded, _ = wavmark.decode_watermark(model, signal, show_progress=True)
if isinstance(payload_decoded, type(None)): return False
BER = (payload != payload_decoded).mean() * 100
return BER == 0
class DragDropMainWindow(QtWidgets.QMainWindow):
fileDropped = QtCore.pyqtSignal(str)
def __init__(self, parent=None):
super(DragDropMainWindow, self).__init__(parent)
self.setAcceptDrops(True)
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.accept()
else:
event.ignore()
def dropEvent(self, event):
for url in event.mimeData().urls():
file_path = str(url.toLocalFile())
self.fileDropped.emit(file_path)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(300, 470, 221, 61))
self.pushButton.setObjectName("pushButton")
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(590, 60, 191, 81))
self.pushButton_2.setObjectName("pushButton_2")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(70, 110, 721, 391))
self.label.setObjectName("label")
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
# Connect button clicks to functions
self.pushButton.clicked.connect(self.run_UseCase)
self.pushButton_2.clicked.connect(self.watermark_proof)
self.file_path_g=None
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.pushButton.setText(_translate("MainWindow", "Running the UseCase"))
self.pushButton_2.setText(_translate("MainWindow", "Watermarking proof"))
self.label.setText(_translate("MainWindow", "<html><head/><body><p><img src=\"MPAI_NNW-MQA.png\"/></p></body></html>"))
def run_UseCase(self):
# Function to execute when the "Running the UseCase" button is clicked
print("Openning new Window")
os.system("gnome-terminal & disown")
def watermark_proof(self):
# Function to execute when the "WaterMarking proof" button is clicked
if self.file_path_g==None:
print("Please, first drag an audio file")
else:
print("Processing...")
answer=funcNNWProof(self.file_path_g)
if answer:
print("This audio is watermarked")
else:
print("This audio is not watermarked")
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = DragDropMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
def file_dropped(file_path):
ui.file_path_g=file_path
MainWindow.fileDropped.connect(file_dropped)
MainWindow.show()
sys.exit(app.exec_())
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment