import pennylane as qml
import pennylane.numpy as np
import matplotlib.pyplot as plt
= np.linspace(0, 2*np.pi , 10)
X = False
X.requires_grad
= np.sin(X) Y
Przykłady prostych modeli uczenia maszynowego
Model regresji
X
Y
= np.linspace(0.2, 2*np.pi+0.2, 5)
X_test = np.sin(X_test) Y_test
= qml.device('default.qubit', wires=1)
dev
@qml.qnode(dev)
def qc(datapoint, params):
# zakodujemy dane w bramke RX
=0)
qml.RX(datapoint, wires# model to ogólna bramka unitarna zalezna od 3 parametrów
0], params[1], params[2], wires=0)
qml.Rot(params[# bedziemy zwracali wartosc oczekiwana operatora Z
return qml.expval(qml.PauliZ(wires=0))
def loss_func(predictions):
= 0
total_losses for i in range(len(Y)):
= Y[i]
output = predictions[i]
prediction = (prediction - output)**2
loss += loss
total_losses return total_losses
def cost_fn(params):
= [qc(x, params) for x in X]
predictions = loss_func(predictions)
cost return cost
= qml.GradientDescentOptimizer()
opt
= np.array([0.01, 0.1, 0.01], requires_grad=True) params
= 100
epochs
for epoch in range(epochs):
= opt.step_and_cost(cost_fn, params)
params, prev_cost if (epoch+1)%10 == 0:
print(f"Step = {epoch+1} Cost = {cost_fn(params)} for params: {params}")
Step = 10 Cost = 9.037222062479499 for params: [-0.14333094 0.32519527 0.01 ]
Step = 20 Cost = 3.39452528591374 for params: [-0.5135331 0.96321395 0.01 ]
Step = 30 Cost = 0.5405856987920626 for params: [-0.79676991 1.40502266 0.01 ]
Step = 40 Cost = 0.18055568063658878 for params: [-0.9377581 1.52754136 0.01 ]
Step = 50 Cost = 0.09683220912558343 for params: [-1.02350548 1.55910856 0.01 ]
Step = 60 Cost = 0.06151965453001599 for params: [-1.08250661 1.56755217 0.01 ]
Step = 70 Cost = 0.04255032144989283 for params: [-1.12615645 1.56987864 0.01 ]
Step = 80 Cost = 0.031128244489378672 for params: [-1.16006661 1.57053313 0.01 ]
Step = 90 Cost = 0.023728811931929196 for params: [-1.18735893 1.57072005 0.01 ]
Step = 100 Cost = 0.0186699696065793 for params: [-1.20992122 1.57077404 0.01 ]
= []
test_predictions for x_test in X_test:
= qc(x_test,params)
prediction
test_predictions.append(prediction)
= plt.figure()
fig = fig.add_subplot(111)
ax1
=30, c='b', marker="s", label='Train outputs')
ax1.scatter(X, Y, s=60, c='r', marker="o", label='Test outputs')
ax1.scatter(X_test,Y_test, s=30, c='k', marker="x", label='Test predicitons')
ax1.scatter(X_test,test_predictions, s"Inputs")
plt.xlabel("Outputs")
plt.ylabel("QML results")
plt.title(
='upper right');
plt.legend(loc plt.show()
Variational quantum classifiers
Obwód kwantowy, który można trenować na podstawie danych z targetem w cely klasyfikacji nowej próby.
Publikacje - przykład wariacyjnego algorytmu polegającego na optymalizacji poprzez ewaluacje funkcji parity
Definicja funkcji - jej wartość równa się 1 tylko gdy zmienna na której jest stosowana posiada nieparzystą liczbę jedynek. Porównaj z bramką XOR.
Ponieważ funkjca ta przyjmuje tylko ciągi binarne (bit string) możemy rozpatrzyć przykład w którym nasze dane będą zapisane w takim formacie.
Jest to tak zwane kodowanie binarne za pomocą bazwy (basis encoding
).
import pennylane as qml
import pennylane.numpy as np
from pennylane.optimize import NesterovMomentumOptimizer
= qml.device('default.qubit') dev
Algorytmy wariacyjne to zazwyczaj jeden (elementarny) obwód, który można wiele razy powtarzać tworząc tzw layer
lub block
.
= 4
n_qubits def layer(layer_weights):
for wire in range(n_qubits):
*layer_weights[wire], wires=wire)
qml.Rot(
for wires in ([0,1],[1,2],[2,3],[3,0]):
qml.CNOT(wires)
Oprócz części modelu musimy mieć możliwość kodować nasze dane. W tym przykładzie będą to bitstringi, które chcemy zakodować w stanie kubitów.
\[ x = 0101 \to \ket{\psi} = \ket{0101}\]
Możemy oczywiście sami uzupełniać kubity korzystając z bramki \(X\). Lepszym sposobem będzie użycie qml.BasisState(x, wires)
dla której wektor wejściowy x to lista złożona ze zbioru \(\{0,1\}\)
def state_preparation(x):
=range(4)) qml.BasisState(x, wires
@qml.qnode(dev)
def circ(weigths, x):
state_preparation(x)
for layer_weights in weigths:
=layer_weights)
layer(layer_weights
return qml.expval(qml.PauliZ(0))
Możemy dodać również parametr klasyczny bias
.
def variational_classifier(weights, bias, x):
return circ(weights,x) + bias
Obliczenie kosztu
def loss_fn(labels, predictions):
return np.mean((labels - qml.math.stack(predictions))**2)
def accuracy(labels, predictions):
= sum(abs(l-p) < 1e-5 for l, p in zip(labels,predictions))
acc = acc / len(labels)
acc return acc
def cost(weights, bias, X, Y):
= [variational_classifier(weights, bias, x) for x in X]
predictions return loss_fn(Y, predictions)
= """0 0 0 1 1
train 0 0 1 0 1
0 1 0 0 1
0 1 0 1 0
0 1 1 0 0
0 1 1 1 1
1 0 0 0 1
1 0 0 1 0
1 0 1 1 1
1 1 1 1 0"""
= """0 0 0 0 0
test 0 0 1 1 0
1 0 1 0 0
1 1 1 0 1
1 1 0 0 0
1 1 0 1 1"""
= np.loadtxt('parity_train.txt', dtype=int) data
= np.array(data[:, :-1])
X = np.array(data[:, -1])
Y = Y*2 -1 Y
for x,y in zip(X, Y):
print(f"x = {x}, y = {y}")
x = [0 0 0 1], y = 1
x = [0 0 1 0], y = 1
x = [0 1 0 0], y = 1
x = [0 1 0 1], y = -1
x = [0 1 1 0], y = -1
x = [0 1 1 1], y = 1
x = [1 0 0 0], y = 1
x = [1 0 0 1], y = -1
x = [1 0 1 1], y = 1
x = [1 1 1 1], y = -1
0)
np.random.seed(= 4
num_qubits = 2
num_layers = 0.01 * np.random.randn(num_layers, num_qubits, 3, requires_grad=True)
weights_init = np.array(0.0, requires_grad=True)
bias_init
print("Weights:", weights_init)
print("Bias: ", bias_init)
Weights: [[[ 0.01764052 0.00400157 0.00978738]
[ 0.02240893 0.01867558 -0.00977278]
[ 0.00950088 -0.00151357 -0.00103219]
[ 0.00410599 0.00144044 0.01454274]]
[[ 0.00761038 0.00121675 0.00443863]
[ 0.00333674 0.01494079 -0.00205158]
[ 0.00313068 -0.00854096 -0.0255299 ]
[ 0.00653619 0.00864436 -0.00742165]]]
Bias: 0.0
= NesterovMomentumOptimizer(0.5)
opt = 5 batch_size
= weights_init
weights = bias_init
bias for it in range(100):
# Update the weights by one optimizer step, using only a limited batch of data
= np.random.randint(0, len(X), (batch_size,))
batch_index = X[batch_index]
X_batch = Y[batch_index]
Y_batch = opt.step(cost, weights, bias, X=X_batch, Y=Y_batch)
weights, bias
# Compute accuracy
= [np.sign(variational_classifier(weights, bias, x)) for x in X]
predictions
= cost(weights, bias, X, Y)
current_cost = accuracy(Y, predictions)
acc
print(f"Iter: {it+1:4d} | Cost: {current_cost:0.7f} | Accuracy: {acc:0.7f}")
Iter: 1 | Cost: 2.3147651 | Accuracy: 0.5000000
Iter: 2 | Cost: 1.9664866 | Accuracy: 0.5000000
Iter: 3 | Cost: 1.9208589 | Accuracy: 0.5000000
Iter: 4 | Cost: 2.6276126 | Accuracy: 0.5000000
Iter: 5 | Cost: 0.9323119 | Accuracy: 0.6000000
Iter: 6 | Cost: 1.1903549 | Accuracy: 0.5000000
Iter: 7 | Cost: 2.0508989 | Accuracy: 0.4000000
Iter: 8 | Cost: 1.1275531 | Accuracy: 0.6000000
Iter: 9 | Cost: 1.1659803 | Accuracy: 0.6000000
Iter: 10 | Cost: 1.1349618 | Accuracy: 0.6000000
Iter: 11 | Cost: 0.9994063 | Accuracy: 0.6000000
Iter: 12 | Cost: 1.0812559 | Accuracy: 0.6000000
Iter: 13 | Cost: 1.2863155 | Accuracy: 0.6000000
Iter: 14 | Cost: 2.2658259 | Accuracy: 0.4000000
Iter: 15 | Cost: 1.1323724 | Accuracy: 0.6000000
Iter: 16 | Cost: 1.3439737 | Accuracy: 0.8000000
Iter: 17 | Cost: 2.0076168 | Accuracy: 0.6000000
Iter: 18 | Cost: 1.2685760 | Accuracy: 0.5000000
Iter: 19 | Cost: 1.6762475 | Accuracy: 0.5000000
Iter: 20 | Cost: 1.1868237 | Accuracy: 0.6000000
Iter: 21 | Cost: 1.4784687 | Accuracy: 0.6000000
Iter: 22 | Cost: 1.4599473 | Accuracy: 0.6000000
Iter: 23 | Cost: 0.9573269 | Accuracy: 0.6000000
Iter: 24 | Cost: 1.1657424 | Accuracy: 0.5000000
Iter: 25 | Cost: 1.0877087 | Accuracy: 0.4000000
Iter: 26 | Cost: 1.1683687 | Accuracy: 0.6000000
Iter: 27 | Cost: 2.1141689 | Accuracy: 0.6000000
Iter: 28 | Cost: 1.0272966 | Accuracy: 0.5000000
Iter: 29 | Cost: 0.9664085 | Accuracy: 0.5000000
Iter: 30 | Cost: 1.1287654 | Accuracy: 0.6000000
Iter: 31 | Cost: 1.4202360 | Accuracy: 0.4000000
Iter: 32 | Cost: 1.1286000 | Accuracy: 0.5000000
Iter: 33 | Cost: 1.9594333 | Accuracy: 0.4000000
Iter: 34 | Cost: 1.2811832 | Accuracy: 0.4000000
Iter: 35 | Cost: 0.8522775 | Accuracy: 0.7000000
Iter: 36 | Cost: 1.4765281 | Accuracy: 0.6000000
Iter: 37 | Cost: 0.9603287 | Accuracy: 0.6000000
Iter: 38 | Cost: 1.6031314 | Accuracy: 0.6000000
Iter: 39 | Cost: 1.1700888 | Accuracy: 0.4000000
Iter: 40 | Cost: 1.7571779 | Accuracy: 0.4000000
Iter: 41 | Cost: 1.9608116 | Accuracy: 0.6000000
Iter: 42 | Cost: 2.0802752 | Accuracy: 0.6000000
Iter: 43 | Cost: 1.1904884 | Accuracy: 0.3000000
Iter: 44 | Cost: 0.9941585 | Accuracy: 0.6000000
Iter: 45 | Cost: 1.0709609 | Accuracy: 0.5000000
Iter: 46 | Cost: 0.9780625 | Accuracy: 0.6000000
Iter: 47 | Cost: 1.1573709 | Accuracy: 0.6000000
Iter: 48 | Cost: 1.0235239 | Accuracy: 0.6000000
Iter: 49 | Cost: 1.2842469 | Accuracy: 0.5000000
Iter: 50 | Cost: 0.8549226 | Accuracy: 0.6000000
Iter: 51 | Cost: 0.5136787 | Accuracy: 1.0000000
Iter: 52 | Cost: 0.2488031 | Accuracy: 1.0000000
Iter: 53 | Cost: 0.0461277 | Accuracy: 1.0000000
Iter: 54 | Cost: 0.0293518 | Accuracy: 1.0000000
Iter: 55 | Cost: 0.0205454 | Accuracy: 1.0000000
Iter: 56 | Cost: 0.0352514 | Accuracy: 1.0000000
Iter: 57 | Cost: 0.0576767 | Accuracy: 1.0000000
Iter: 58 | Cost: 0.0291305 | Accuracy: 1.0000000
Iter: 59 | Cost: 0.0127137 | Accuracy: 1.0000000
Iter: 60 | Cost: 0.0058108 | Accuracy: 1.0000000
Iter: 61 | Cost: 0.0018002 | Accuracy: 1.0000000
Iter: 62 | Cost: 0.0014089 | Accuracy: 1.0000000
Iter: 63 | Cost: 0.0017489 | Accuracy: 1.0000000
Iter: 64 | Cost: 0.0021282 | Accuracy: 1.0000000
Iter: 65 | Cost: 0.0029876 | Accuracy: 1.0000000
Iter: 66 | Cost: 0.0035331 | Accuracy: 1.0000000
Iter: 67 | Cost: 0.0035540 | Accuracy: 1.0000000
Iter: 68 | Cost: 0.0025639 | Accuracy: 1.0000000
Iter: 69 | Cost: 0.0019459 | Accuracy: 1.0000000
Iter: 70 | Cost: 0.0015856 | Accuracy: 1.0000000
Iter: 71 | Cost: 0.0008439 | Accuracy: 1.0000000
Iter: 72 | Cost: 0.0005960 | Accuracy: 1.0000000
Iter: 73 | Cost: 0.0003122 | Accuracy: 1.0000000
Iter: 74 | Cost: 0.0002446 | Accuracy: 1.0000000
Iter: 75 | Cost: 0.0001745 | Accuracy: 1.0000000
Iter: 76 | Cost: 0.0001215 | Accuracy: 1.0000000
Iter: 77 | Cost: 0.0001141 | Accuracy: 1.0000000
Iter: 78 | Cost: 0.0001538 | Accuracy: 1.0000000
Iter: 79 | Cost: 0.0001871 | Accuracy: 1.0000000
Iter: 80 | Cost: 0.0001330 | Accuracy: 1.0000000
Iter: 81 | Cost: 0.0001380 | Accuracy: 1.0000000
Iter: 82 | Cost: 0.0001336 | Accuracy: 1.0000000
Iter: 83 | Cost: 0.0001483 | Accuracy: 1.0000000
Iter: 84 | Cost: 0.0001234 | Accuracy: 1.0000000
Iter: 85 | Cost: 0.0001359 | Accuracy: 1.0000000
Iter: 86 | Cost: 0.0001268 | Accuracy: 1.0000000
Iter: 87 | Cost: 0.0002270 | Accuracy: 1.0000000
Iter: 88 | Cost: 0.0000865 | Accuracy: 1.0000000
Iter: 89 | Cost: 0.0000774 | Accuracy: 1.0000000
Iter: 90 | Cost: 0.0000759 | Accuracy: 1.0000000
Iter: 91 | Cost: 0.0000607 | Accuracy: 1.0000000
Iter: 92 | Cost: 0.0000523 | Accuracy: 1.0000000
Iter: 93 | Cost: 0.0000536 | Accuracy: 1.0000000
Iter: 94 | Cost: 0.0000444 | Accuracy: 1.0000000
Iter: 95 | Cost: 0.0000384 | Accuracy: 1.0000000
Iter: 96 | Cost: 0.0000497 | Accuracy: 1.0000000
Iter: 97 | Cost: 0.0000263 | Accuracy: 1.0000000
Iter: 98 | Cost: 0.0000229 | Accuracy: 1.0000000
Iter: 99 | Cost: 0.0000339 | Accuracy: 1.0000000
Iter: 100 | Cost: 0.0000174 | Accuracy: 1.0000000
= np.loadtxt("parity_test.txt", dtype=int)
data = np.array(data[:, :-1])
X_test = np.array(data[:, -1])
Y_test = Y_test * 2 - 1 # shift label from {0, 1} to {-1, 1}
Y_test
= [np.sign(variational_classifier(weights, bias, x)) for x in X_test]
predictions_test
for x,y,p in zip(X_test, Y_test, predictions_test):
print(f"x = {x}, y = {y}, pred = {p}")
= accuracy(Y_test, predictions_test)
acc_test print("Accuracy on unseen data:", acc_test)
x = [0 0 0 0], y = -1, pred = -1.0
x = [0 0 1 1], y = -1, pred = -1.0
x = [1 0 1 0], y = -1, pred = -1.0
x = [1 1 1 0], y = 1, pred = 1.0
x = [1 1 0 0], y = -1, pred = -1.0
x = [1 1 0 1], y = 1, pred = 1.0
Accuracy on unseen data: 1.0
Inne kodowanie bitowe
= 4
n_wires = qml.device('default.qubit', wires=n_wires)
dev
@qml.qnode(dev)
def basis_circuit_1(features):
#qml.BasisEmbedding(features=features, wires=range(len(features)))
for i in range(len(features)):
if features[i] == 1:
qml.X(i)
qml.Barrier()1)
qml.Hadamard(1,3])
qml.CNOT([return qml.state()
= [1,0,1,1]
X import matplotlib.pyplot as plt
"pennylane_sketch")
qml.drawer.use_style(= qml.draw_mpl(basis_circuit_1)(X)
fig, ax plt.show()
= basis_circuit_1(X)
state state.real
@qml.qnode(dev)
def basis_circuit_2(features):
for i in range(len(features)):
if features[i] == 1: qml.X(i)
return [qml.expval(qml.PauliZ(m)) for m in range(len(features))]
= [1, 0, 1, 1]
X
import matplotlib.pyplot as plt
"pennylane_sketch")
qml.drawer.use_style(= qml.draw_mpl(basis_circuit_2)(X)
fig, ax plt.show()
= basis_circuit_2(X)
expvals print(f'\nExpectation values: {np.array(expvals)}\n')
@qml.qnode(dev)
def basis_circuit_3(features):
for i in range(len(features)):
if features[i] == 1: qml.X(i)
qml.Barrier()1)
qml.Hadamard(1, 3])
qml.CNOT([return qml.probs(range(len(features)))
import matplotlib.pyplot as plt
"pennylane_sketch")
qml.drawer.use_style(= qml.draw_mpl(basis_circuit_3)(X)
fig, ax
plt.show()
= basis_circuit_3(X)
probs print('\n', probs, '\n')
@qml.qnode(dev)
def basis_circuit_4(features):
for i in range(len(features)):
if features[i] == 1: qml.X(i)
qml.Barrier()return [qml.probs(range(len(features) // 2)), qml.probs(range(len(features) // 2, len(features)))]
= basis_circuit_4(X)
probs print('\n', probs, '\n')
prawdziwe dane
Przygotowanie stanu dla prawdziwych danych nie zawsze jest tak proste i oczywiste jak w przypadku bitstringow.
Każdy zbiór zmiennych musi zostać przetworzony na zbiór kątów dla odpowiednich bramek.
def get_angles(x):
= 2 * np.arcsin(np.sqrt(x[1] ** 2) / np.sqrt(x[0] ** 2 + x[1] ** 2 + 1e-12))
beta0 = 2 * np.arcsin(np.sqrt(x[3] ** 2) / np.sqrt(x[2] ** 2 + x[3] ** 2 + 1e-12))
beta1 = 2 * np.arcsin(np.linalg.norm(x[2:]) / np.linalg.norm(x))
beta2
return np.array([beta2, -beta1 / 2, beta1 / 2, -beta0 / 2, beta0 / 2])
def state_preparation(a):
0], wires=0)
qml.RY(a[
=[0, 1])
qml.CNOT(wires1], wires=1)
qml.RY(a[=[0, 1])
qml.CNOT(wires2], wires=1)
qml.RY(a[
=0)
qml.PauliX(wires=[0, 1])
qml.CNOT(wires3], wires=1)
qml.RY(a[=[0, 1])
qml.CNOT(wires4], wires=1)
qml.RY(a[=0) qml.PauliX(wires
= np.array([0.53896774, 0.79503606, 0.27826503, 0.0], requires_grad=False)
x = get_angles(x)
ang
@qml.qnode(dev)
def test(angles):
state_preparation(angles)
return qml.state()
= test(ang)
state
print("x : ", np.round(x, 6))
print("angles : ", np.round(ang, 6))
print("amplitude vector: ", np.round(np.real(state), 6))
x : [0.538968 0.795036 0.278265 0. ]
angles : [ 0.563975 -0. 0. -0.975046 0.975046]
amplitude vector: [ 0.538968 0.795036 0.278265 -0. ]
def square_loss(labels, predictions):
# We use a call to qml.math.stack to allow subtracting the arrays directly
return np.mean((labels - qml.math.stack(predictions)) ** 2)
def layer(layer_weights):
for wire in range(2):
*layer_weights[wire], wires=wire)
qml.Rot(=[0, 1])
qml.CNOT(wires
def cost(weights, bias, X, Y):
# Transpose the batch of input data in order to make the indexing
# in state_preparation work
= variational_classifier(weights, bias, X.T)
predictions return square_loss(Y, predictions)
= np.loadtxt("iris.txt")
data = data[:, 0:2]
X print(f"First X sample (original) : {X[0]}")
# pad the vectors to size 2^2=4 with constant values
= np.ones((len(X), 2)) * 0.1
padding = np.c_[X, padding]
X_pad print(f"First X sample (padded) : {X_pad[0]}")
# normalize each input
= np.sqrt(np.sum(X_pad**2, -1))
normalization = (X_pad.T / normalization).T
X_norm print(f"First X sample (normalized): {X_norm[0]}")
# the angles for state preparation are the features
= np.array([get_angles(x) for x in X_norm], requires_grad=False)
features print(f"First features sample : {features[0]}")
= data[:, -1] Y
First X sample (original) : [0.4 0.75]
First X sample (padded) : [0.4 0.75 0.1 0.1 ]
First X sample (normalized): [0.46420708 0.87038828 0.11605177 0.11605177]
First features sample : [ 0.32973573 -0.78539816 0.78539816 -1.080839 1.080839 ]
import matplotlib.pyplot as plt
plt.figure()0][Y == 1], X[:, 1][Y == 1], c="b", marker="o", ec="k")
plt.scatter(X[:, 0][Y == -1], X[:, 1][Y == -1], c="r", marker="o", ec="k")
plt.scatter(X[:, "Original data")
plt.title(
plt.show()
plt.figure()= 0
dim1 = 1
dim2 == 1], X_norm[:, dim2][Y == 1], c="b", marker="o", ec="k")
plt.scatter(X_norm[:, dim1][Y == -1], X_norm[:, dim2][Y == -1], c="r", marker="o", ec="k")
plt.scatter(X_norm[:, dim1][Y f"Padded and normalised data (dims {dim1} and {dim2})")
plt.title(
plt.show()
plt.figure()= 0
dim1 = 3
dim2 == 1], features[:, dim2][Y == 1], c="b", marker="o", ec="k")
plt.scatter(features[:, dim1][Y == -1], features[:, dim2][Y == -1], c="r", marker="o", ec="k")
plt.scatter(features[:, dim1][Y f"Feature vectors (dims {dim1} and {dim2})")
plt.title( plt.show()
0)
np.random.seed(= len(Y)
num_data = int(0.75 * num_data)
num_train = np.random.permutation(range(num_data))
index = features[index[:num_train]]
feats_train = Y[index[:num_train]]
Y_train = features[index[num_train:]]
feats_val = Y[index[num_train:]]
Y_val
# We need these later for plotting
= X[index[:num_train]]
X_train = X[index[num_train:]] X_val
= 2
num_qubits = 6
num_layers
= 0.01 * np.random.randn(num_layers, num_qubits, 3, requires_grad=True)
weights_init = np.array(0.0, requires_grad=True) bias_init
= NesterovMomentumOptimizer(0.01)
opt = 5
batch_size
# train the variational classifier
= weights_init
weights = bias_init
bias for it in range(60):
# Update the weights by one optimizer step
= np.random.randint(0, num_train, (batch_size,))
batch_index = feats_train[batch_index]
feats_train_batch = Y_train[batch_index]
Y_train_batch = opt.step(cost, weights, bias, feats_train_batch, Y_train_batch)
weights, bias, _, _
# Compute predictions on train and validation set
= np.sign(variational_classifier(weights, bias, feats_train.T))
predictions_train = np.sign(variational_classifier(weights, bias, feats_val.T))
predictions_val
# Compute accuracy on train and validation set
= accuracy(Y_train, predictions_train)
acc_train = accuracy(Y_val, predictions_val)
acc_val
if (it + 1) % 2 == 0:
= cost(weights, bias, features, Y)
_cost print(
f"Iter: {it + 1:5d} | Cost: {_cost:0.7f} | "
f"Acc train: {acc_train:0.7f} | Acc validation: {acc_val:0.7f}"
)
Iter: 2 | Cost: 1.6589456 | Acc train: 0.4800000 | Acc validation: 0.5600000
Iter: 4 | Cost: 1.2054273 | Acc train: 0.4933333 | Acc validation: 0.5600000
Iter: 6 | Cost: 0.9740740 | Acc train: 0.4933333 | Acc validation: 0.7200000
Iter: 8 | Cost: 0.9660872 | Acc train: 0.6400000 | Acc validation: 0.6400000
Iter: 10 | Cost: 0.9569019 | Acc train: 0.6000000 | Acc validation: 0.6000000
Iter: 12 | Cost: 0.9445863 | Acc train: 0.4933333 | Acc validation: 0.7200000
Iter: 14 | Cost: 1.0339978 | Acc train: 0.4800000 | Acc validation: 0.5600000
Iter: 16 | Cost: 1.0774217 | Acc train: 0.4933333 | Acc validation: 0.5600000
Iter: 18 | Cost: 0.9984426 | Acc train: 0.4800000 | Acc validation: 0.5600000
Iter: 20 | Cost: 0.8975279 | Acc train: 0.5600000 | Acc validation: 0.7600000
Iter: 22 | Cost: 0.8451699 | Acc train: 0.6400000 | Acc validation: 0.6400000
Iter: 24 | Cost: 0.8337489 | Acc train: 0.5600000 | Acc validation: 0.5200000
Iter: 26 | Cost: 0.7832025 | Acc train: 0.6000000 | Acc validation: 0.6000000
Iter: 28 | Cost: 0.7397515 | Acc train: 0.6133333 | Acc validation: 0.6000000
Iter: 30 | Cost: 0.6690522 | Acc train: 0.6666667 | Acc validation: 0.6400000
Iter: 32 | Cost: 0.5640186 | Acc train: 0.8266667 | Acc validation: 0.8000000
Iter: 34 | Cost: 0.4765597 | Acc train: 0.8933333 | Acc validation: 0.8800000
Iter: 36 | Cost: 0.4144135 | Acc train: 0.9200000 | Acc validation: 0.9600000
Iter: 38 | Cost: 0.3569566 | Acc train: 0.9600000 | Acc validation: 1.0000000
Iter: 40 | Cost: 0.3186159 | Acc train: 0.9866667 | Acc validation: 1.0000000
Iter: 42 | Cost: 0.2853043 | Acc train: 0.9866667 | Acc validation: 1.0000000
Iter: 44 | Cost: 0.2652725 | Acc train: 1.0000000 | Acc validation: 1.0000000
Iter: 46 | Cost: 0.2525848 | Acc train: 1.0000000 | Acc validation: 1.0000000
Iter: 48 | Cost: 0.2444278 | Acc train: 1.0000000 | Acc validation: 1.0000000
Iter: 50 | Cost: 0.2436316 | Acc train: 0.9866667 | Acc validation: 1.0000000
Iter: 52 | Cost: 0.2376316 | Acc train: 1.0000000 | Acc validation: 1.0000000
Iter: 54 | Cost: 0.2307475 | Acc train: 1.0000000 | Acc validation: 1.0000000
Iter: 56 | Cost: 0.2341245 | Acc train: 1.0000000 | Acc validation: 1.0000000
Iter: 58 | Cost: 0.2292663 | Acc train: 1.0000000 | Acc validation: 1.0000000
Iter: 60 | Cost: 0.2241948 | Acc train: 1.0000000 | Acc validation: 1.0000000
plt.figure()= plt.cm.RdBu
cm
# make data for decision regions
= np.meshgrid(np.linspace(0.0, 1.5, 30), np.linspace(0.0, 1.5, 30))
xx, yy = [np.array([x, y]) for x, y in zip(xx.flatten(), yy.flatten())]
X_grid
# preprocess grid points like data inputs above
= 0.1 * np.ones((len(X_grid), 2))
padding = np.c_[X_grid, padding] # pad each input
X_grid = np.sqrt(np.sum(X_grid**2, -1))
normalization = (X_grid.T / normalization).T # normalize each input
X_grid = np.array([get_angles(x) for x in X_grid]) # angles are new features
features_grid = variational_classifier(weights, bias, features_grid.T)
predictions_grid = np.reshape(predictions_grid, xx.shape)
Z
# plot decision regions
= np.arange(-1, 1.1, 0.1)
levels = plt.contourf(xx, yy, Z, levels=levels, cmap=cm, alpha=0.8, extend="both")
cnt =[0.0], colors=("black",), linestyles=("--",), linewidths=(0.8,))
plt.contour(xx, yy, Z, levels=[-1, 0, 1])
plt.colorbar(cnt, ticks
# plot data
for color, label in zip(["b", "r"], [1, -1]):
= X_train[:, 0][Y_train == label]
plot_x = X_train[:, 1][Y_train == label]
plot_y =color, marker="o", ec="k", label=f"class {label} train")
plt.scatter(plot_x, plot_y, c= (X_val[:, 0][Y_val == label],)
plot_x = (X_val[:, 1][Y_val == label],)
plot_y =color, marker="^", ec="k", label=f"class {label} validation")
plt.scatter(plot_x, plot_y, c
plt.legend() plt.show()
Jeszcze inaczej
import numpy as np
import torch
from torch.nn.functional import relu
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import pennylane as qml
from pennylane.templates import AngleEmbedding, StronglyEntanglingLayers
import matplotlib.pyplot as plt
42) np.random.seed(
= load_iris(return_X_y=True)
X, y
# pick inputs and labels from the first two classes only,
# corresponding to the first 100 samples
= X[:100]
X = y[:100]
y
# scaling the inputs is important since the embedding we use is periodic
= StandardScaler().fit(X)
scaler = scaler.transform(X)
X_scaled
# scaling the labels to -1, 1 is important for the SVM and the
# definition of a hinge loss
= 2 * (y - 0.5)
y_scaled
= train_test_split(X_scaled, y_scaled) X_train, X_test, y_train, y_test
= len(X_train[0])
n_qubits n_qubits
4
= qml.device("default.qubit", wires=n_qubits)
dev_kernel
= np.zeros((2 ** n_qubits, 2 ** n_qubits))
projector 0, 0] = 1
projector[
@qml.qnode(dev_kernel)
def kernel(x1, x2):
"""The quantum kernel."""
=range(n_qubits))
AngleEmbedding(x1, wires=range(n_qubits))
qml.adjoint(AngleEmbedding)(x2, wiresreturn qml.expval(qml.Hermitian(projector, wires=range(n_qubits)))
0], X_train[0]) kernel(X_train[
tensor(1., requires_grad=True)
def kernel_matrix(A, B):
"""Compute the matrix whose entries are the kernel
evaluated on pairwise data from sets A and B."""
return np.array([[kernel(a, b) for b in B] for a in A])
= SVC(kernel=kernel_matrix).fit(X_train, y_train) svm
with dev_kernel.tracker:
= svm.predict(X_test)
predictions accuracy_score(predictions, y_test)
ale mozna tez tak
@qml.qnode(dev, diff_method="parameter-shift")
def quantum_model(x, params):
"""A variational quantum model."""
# embedding
=range(n_qubits))
AngleEmbedding(x, wires
# trainable measurement
=range(n_qubits))
StronglyEntanglingLayers(params, wiresreturn qml.expval(qml.PauliZ(0))
def quantum_model_plus_bias(x, params, bias):
"""Adding a bias."""
return quantum_model(x, params) + bias
def hinge_loss(predictions, targets):
"""Implements the hinge loss."""
= torch.ones_like(targets)
all_ones = all_ones - predictions * targets
hinge_loss # trick: since the max(0,x) function is not differentiable,
# use the mathematically equivalent relu instead
= relu(hinge_loss)
hinge_loss return hinge_loss
def quantum_model_train(n_layers, steps, batch_size):
"""Train the quantum model defined above."""
= np.random.random((n_layers, n_qubits, 3))
params = torch.tensor(params, requires_grad=True)
params_torch = torch.tensor(0.0)
bias_torch
= torch.optim.Adam([params_torch, bias_torch], lr=0.1)
opt
= []
loss_history for i in range(steps):
= np.random.choice(len(X_train), batch_size)
batch_ids
= X_train[batch_ids]
X_batch = y_train[batch_ids]
y_batch
= torch.tensor(X_batch, requires_grad=False)
X_batch_torch = torch.tensor(y_batch, requires_grad=False)
y_batch_torch
def closure():
opt.zero_grad()= torch.stack(
preds for x in X_batch_torch]
[quantum_model_plus_bias(x, params_torch, bias_torch)
)= torch.mean(hinge_loss(preds, y_batch_torch))
loss
# bookkeeping
= loss.detach().numpy().item()
current_loss
loss_history.append(current_loss)if i % 10 == 0:
print("step", i, ", loss", current_loss)
loss.backward()return loss
opt.step(closure)
return params_torch, bias_torch, loss_history
def quantum_model_predict(X_pred, trained_params, trained_bias):
"""Predict using the quantum model defined above."""
= []
p for x in X_pred:
= torch.tensor(x)
x_torch = quantum_model_plus_bias(x_torch, trained_params, trained_bias)
pred_torch = pred_torch.detach().numpy().item()
pred if pred > 0:
= 1
pred else:
= -1
pred
p.append(pred)return p
= 2
n_layers = 20
batch_size = 100
steps
with dev.tracker:
= quantum_model_train(n_layers, steps, batch_size)
trained_params, trained_bias, loss_history = quantum_model_predict(X_test, trained_params, trained_bias)
pred_test
print("accuracy on test set:", accuracy_score(pred_test, y_test))
plt.plot(loss_history)0, 1))
plt.ylim(("steps")
plt.xlabel("cost")
plt.ylabel( plt.show()
step 0 , loss 1.046987877386652
step 10 , loss 0.6283218587126969
step 20 , loss 0.4609234606645739
step 30 , loss 0.575389668233003
step 40 , loss 0.5128422872323696
step 50 , loss 0.4457781583090726
step 60 , loss 0.5189765728028315
step 70 , loss 0.4848118165707457
step 80 , loss 0.4488463923314871
step 90 , loss 0.46302412863961334
accuracy on test set: 0.96