2022/5/9
1 2048
修改和优化代码。
def fill_grid_2(self, fill_steps, nums_list, difficulty_level_changge, num_path_last, now_num_path):
num_max = np.max(self.Matrics)
min = auto.matrix_min(self.Matrics)
if np.log2(num_max) - np.log2(min) > 7:
nums_list = [min, min*2, min*4, min*8, min*16, min*32]
if len(nums_list) < 6 :
nums_list = [min, min*2, min*4, min*8, min*16, min*32]
if difficulty_level_changge == 1:
self.random_num_list.append(self.random_num_list[-1]*2)
length = len(nums_list)
if len(fill_steps) > 4:
random_list = fill_steps[0:len(fill_steps) - 4]
control_list = fill_steps[len(fill_steps) - 4:]
for i, j in random_list:
new_number_index = random.randint(0, length - 1)
new_number = nums_list[new_number_index]
self.Matrics[i][j] = new_number
else:
control_list = fill_steps
all_matrix_control_action = auto.all_action_numlist(nums_list, len(control_list))
index = 0
socre = 0
total_num_path = []
total_smooth = []
total_num = []
total_num_value = []
total_mat_score = []
total_mat_score_less = []
total_mat_score_bigger = []
for i in range(len(all_matrix_control_action)):
mat = self.Matrics.copy()
mat = auto.matrix_action_pad(mat, control_list, all_matrix_control_action[i])
smooth, num_path = auto.matrix_smooth(mat)
num_1 = 0
num_2 = 0
for j in range(len(all_matrix_control_action[i])):
if np.sum(np.where(self.Matrics == all_matrix_control_action[i][j])[0]) == 0:
num_1 = num_1
else:
num_1 = num_1 + (25 - len(control_list)) / np.sum(
np.where(self.Matrics == all_matrix_control_action[i][j])[0])
num_2 = num_2 + np.max(self.Matrics) / all_matrix_control_action[i][j]
total_num.append(num_1)
total_num_value.append(num_2)
total_num_path.append(num_path)
total_smooth.append(smooth)
for i in range(len(all_matrix_control_action)):
mat = self.Matrics.copy()
mat = auto.matrix_action_pad(mat, control_list, all_matrix_control_action[i])
if auto.feature_score(mat) < now_num_path:
mat_socre = 0.3 * auto.Standardization(total_num_path, i) + 0.8 * auto.Standardization(
total_smooth,
i) + 0.3 * auto.Standardization(
total_num, i) + 0.2 * auto.Standardization(total_num_value, i)
total_mat_score.append((mat_socre, i))
total_mat_score_less.append((mat_socre, i))
elif auto.feature_score(mat) >= now_num_path:
mat_socre = 0.3 * auto.Standardization(total_num_path, i) + 0.8 * auto.Standardization(
total_smooth,
i) + 0.3 * auto.Standardization(
total_num, i) + 0.2 * auto.Standardization(total_num_value, i)
total_mat_score.append((mat_socre, i))
total_mat_score_bigger.append((mat_socre, i))
total_mat_score_bigger.sort()
total_mat_score_less.sort()
if difficulty_level_changge == 1:
if len(total_mat_score_less) != 0:
index = total_mat_score_less[-1][1]
else:
index = total_mat_score_bigger[0][1]
elif difficulty_level_changge != 1:
if len(total_mat_score_bigger) != 0:
index = total_mat_score_bigger[0][1]
else:
index = total_mat_score_less[-1][1]
auto.matrix_action_pad(self.Matrics, control_list, all_matrix_control_action[index])
print('--------------------------------------------')
print('上一步连接性:', num_path_last)
print('当前连接性:', auto.feature_score(self.Matrics))
def matrix_smooth(mat):
mat_index = np.array([[0] * 5] * 5)
num = 0
smooth = 0
for i in range(mat.shape[0]):
for j in range(mat.shape[1]):
smooth1, num1, mat_index = matrix_move(mat, i, j, 1, mat_index)
smooth2, num2, mat_index = matrix_move(mat, i, j, 2, mat_index)
smooth3, num3, mat_index = matrix_move(mat, i, j, 3, mat_index)
smooth = smooth - smooth1 - smooth2 - smooth3
num = num + num1 + num2 + num3
return smooth, num
0 2022/5/10
1 2048
优化代码之后,可控制的数字增加到了5个。
2 联邦学习
2.1 复现联邦学习FedAvg
model
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation,Flatten
from keras.models import model_from_json
import tensorflow as tf
class Model:
def __init__(self):
self.model = Sequential()
self.model.add(Flatten())
self.model.add(Dense(128, activation='relu'))
self.model.add(Dense(10, activation='softmax'))
self.model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
def saveModel(self):
model_json = self.model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
self.model.save_weights("model.h5")
print("Saved model to disk")
def loadModel(self):
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights("model.h5")
print("Loaded model from disk")
loaded_model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy'])
return loaded_model
def run(self, X, Y, load=True):
if (load):
self.model = self.loadModel()
self.model.fit(X, Y, epochs=5)
def evaluate(self, X, Y, verbose=2):
return self.model.evaluate(X, Y, verbose=verbose)
def loss(self, X, Y):
return self.model.evaluate(X, Y)[0]
def predict(self, X):
return self.model.predict(X)
def getWeights(self):
return self.model.get_weights()
def setWeights(self, weight):
self.model.set_weights(weight)
data
from tensorflow.python.keras.datasets import cifar10, mnist, fashion_mnist
def Mnist_data():
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(-1, 28 * 28) / 255
x_test = x_test.reshape(-1, 28 * 28) / 255
return x_train, y_train, x_test, y_test
def generate_client_data(num_clients=10):
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(-1, 28 * 28) / 255
x_test = x_test.reshape(-1, 28 * 28) / 255
data = list(zip(x_train, y_train))
size = len(data) // num_clients
shards = [data[i:i+size] for i in range(0, size * num_clients, size)]
return data, shards, x_test, y_test
aggregater
import numpy as np
class FedAvg:
def __init__(self, global_model, different_client_values, client_count):
global_weights = np.array(global_model.getWeights())
for i in range(len(different_client_values)):
global_weights -= different_client_values[i] / client_count
global_model.setWeights(global_weights)
main
import numpy as np
import data
import model
from tensorflow import keras
import tensorflow as tf
import aggregator
if __name__ == '__main__':
data, shards, x_test, y_test = data.generate_client_data(4)
model1 = model.Model()
model2 = model.Model()
model3 = model.Model()
global_model = model.Model()
x, y = zip(*shards[0])
x_train = np.array(x)
y_train = np.array(y)
x_1, y_1 = zip(*shards[1])
x_1 = np.array(x_1)
y_1 = np.array(y_1)
x_2, y_2 = zip(*shards[2])
x_2 = np.array(x_2)
y_2 = np.array(y_2)
x_3, y_3 = zip(*shards[3])
x_3 = np.array(x_3)
y_3 = np.array(y_3)
a_0 = np.argmax(global_model.predict(x_3[0:784]))
global_model.saveModel()
a_1 = np.argmax(model1.predict(x_3[0:784]))
a_2 = np.argmax(model2.predict(x_3[0:784]))
a_3 = np.argmax(model3.predict(x_3[0:784]))
model1.setWeights(global_model.getWeights())
model2.setWeights(global_model.getWeights())
model3.setWeights(global_model.getWeights())
model1.run(x_1, y_1)
model2.run(x_2, y_2)
model3.run(x_3, y_3)
client_difference_value = [np.array(global_model.getWeights()) - np.array(model1.getWeights()),
np.array(global_model.getWeights()) - np.array(model2.getWeights()),
np.array(global_model.getWeights()) - np.array(model3.getWeights())]
fedavg = aggregator.FedAvg(global_model, client_difference_value, 3)
global_model.saveModel()
test_loss, test_acc = global_model.evaluate(x_test, y_test, verbose=2)
print('\nTest accuracy:', test_acc)
|