The purpose of this network is to see in the dark. I am a novice in deep learning and I have not had time to debug this network and test it. Although I would appreciate any advice.

Here is the code:

I used Tensorflow and sympy

```
class network():
def __init__(self, inputs, width, height, kernel_size=(5, 5)):
super(fully_convolution, self).__init__()
self.kernel_size = kernel_size
self.batch_size = 256
trial = 0
def elu(x, alpha=1):
return np.where(x < 0, alpha * (np.exp(x) - 1), alpha*x) #-1+ae^x{x<0}, ax{x>0}
def squeeze_net(input, squeeze_depth, expand_depth, scope):
# fire module used for keeping the same map size and reducing the number of parameters which keeps the neurons from being heavily affected
with tf.variable_scope(name, "squeeze", values=(inputs)):
squeezed = tf.nn.conv2d(input, squeeze_depth, (1, 1), name="squeeze")
x = tf.nn.conv2d(squeezed, expand_depth, (1, 1), name="1x1")
y = tf.nn.conv2d(squeezed, expand_depth, (3, 3), name="3x3")
return tf.concat((x, y), axis=0)
def count_sketch(img, x):
a, b = img.shape
c = np.zeros((a, x))
hash_indices = np.random.choice(x, b, replace=True) # memory table that convert one data to another
rand_sign = np.random.choice(2, b, replace=True) * 2 - 1 # generate random samples
matrix_a = img * rand_sign.reshape(1, b) # flip the signs of 50% columns of A
for i in range(x):
index = (hash_indices == i)
c(:, i) = np.sum(matrix_a(:, index), 1)
return c
def bilinear(x1, x2, output_size):
p1 = count_sketch(x1, output_size)
p2 = count_sketch(x2, output_size)
pc1 = tf.complex(p1, tf.zeros_like(p1))
pc2 = tf.complex(p2, tf.zeros_like(p2))
conved = tf.batch_ifft(tf.batch_fft(pc1) * tf.batch_fft(pc2))
return tf.real(conved)
def deconv_network(layer1, layer2, channel, pool_size=2):
filter = np.array((None, None), (None, None), dtype=np.int64)
layer = tf.nn.conv2d_transpose(layer1, filter, tf.shape(layer2), strides=(pool_size, pool_size))
bilinear = bilinear((layer, layer2), 3)
bilinear.set_shape((50, 50))
return bilinear
def bayes_prob(layer):
with tf.compact.v1.name_scope(“bayesian_prob”, values=(layer)):
model = tf.keras.Sequential((
tfp.layers.DenseFlipout(512, activation=tf.nn.relu),
tfp.layers.DenseFlipout(10),
))
logits = model(features)
neg_log_likelihood = tf.nn.softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
kl = sum(model.losses)
loss = neg_log_likelihood + kl
train_op = tf.train.AdamOptimizer().minimize(loss)
return model
def refine_net(x1, x2, num_hidden):
n = int(num_hidden)
image = bilinear(x1, x2, 50)
x = tf.nn.conv2d(image, n*2, pool_size=2, padding=“valid”, activation_function=tf.nn.ReLU)
y = bayes_prob(x)
#______________________*light_enhancement*_________________________
image = tf.image.resize_nearest_neighbor(y, (50, 50))
dark = squeeze_net(image, n, n*2, scope=“dark”)
bright = tf.nn.conv2d(dark, n, 3, stride = (3, 3), activation=tf.nn.ReLU, padding='same')
bright2 = tf.nn.conv2d(n=n*2, 3, stride = (3, 3), activation=tf.nn.ReLU, padding='same')(bright)
bright3 = tf.nn.conv2d(n=n*2, 3, stride = (3, 3), activation=tf.nn.ReLU, padding='same')(bright2)
conv_resize = tf.image.resize_nearest_neighbor(bright3, (50, 50))
layer1 = tf.nn.conv2d(n*2, 3, stride(1, 1), activation=tf.nn.ReLU, padding='same')(conv_resize)
layer2 = deconv_network(layer1, bright3, n*2)
layer3 = tf.nn.conv2d(n*2, 3, stride(1, 1), activation=tf.nn.ReLU, padding='same')(conv_resize)
layer4 = deconv_network(layer3, bright2, n*2)
output_size = tf.image.resize_nearest_neighbor(layer4, (tf.shape(y)(1), tf.shape(y)(2)))
light_image = bilinear(output_size, y, 50)
enhancement = Model(input=dark, output=light_image)
n = int(num_hidden)
#_____________________*deblurring*________________________
blur_net = squeeze_net(y, n, n*2, scope="blur")
deblur_net = tf.nn.conv2d(blur_pixel_net, n, strides, pool_size=2, padding="same", tf.nn.ReLU)
deblur_squeeze = tf.squeeze(deblur_net, (10, 10), scope="depixel")
deblur_layer = deconv_network(deblur_squeeze, deblur_net, n=n*2, pool_size=3)
deblur_layer2 = deconv_network(deblur_layer, deblur_net, n*2, pool_size=3)
deblur_output = deconv_network(deblur_layer2, deblur_net, n*2, pool_size=3)
deblur = Model(input=blur_net, output=deblur_output)
n = int(number_hidden)
#_______________________*denoising*________________________
# Encoder: Uses activation techniques; Sigmoid for accuracy
bias = tf.Variable(tf.random_normal(n))
encode = tf.nn.sigmoid(tf.add(tf.matmul(y, weight(model))))
# Decoder
decode = tf.nn.sigmoid(tf.add(tf.matmul(encode, weight(model))))
if trial == 0:
encode = tf.nn.sigmoid(tf.add(tf.matmul(y, weights)))
weights = tf.Variable(tf.math.abs(tf.random_normal(n, n_=2*n)))
decode = tf.nn.sigmoid(tf.add(tf.matmul(encode, weights)))
weights = tf.Variable(tf.math.abs(tf.random_normal(n_, n)))
decode = tf.nn.sigmoid(tf.add(tf.matmul(decode, weights)))
autoencoder = Model(input=encode, output=decode)
#----------------------------------------------------------
#uses parameter sharing and breaks down into certain tasks
dropout = layers.Dropout(rate=0.12, noise_shape=(batch_size, 1, features), seed=None)
merge = bilinear(deblur, decoder, n)
merge = bilinear(merge, enhancement, n)
deconv_output = deconv_network(merge, y, n*2, n*4, pool_size=5)
upsampling_output = tf.nn.upsampling2d(pool_size=(3, 3), interpolation='bilinear')(deconv_output)
global_step = tf.Variable(0, trainable = False)
loss_light = enhancement.compile(loss='sparse_categorical_crossentropy',
metrics=('accuracy'), optimizer=Adam(learning_rate=1e-3, decay=0.99))
loss_deblur = deblur.compile(loss='sparse_categorical_crossentropy',
metrics=('accuracy'), optimizer=Adam(learning_rate=1e-3, decay=0.99))
input = tf.placeholder(tf.float32, (None, None, None, 3), name='input')
loss_noise = tf.reduce_mean(tf.pow(input - decode, 2))
lr = tf.train.exponential_decay(1e-3, global_step, 100, 0.96)
optimizer_light = tf.train.AdamOptimizer(lr, name='AdamOptimizer')
train_op_light = optimizer_light.minimize(loss_light, global_step=global_step)
optimizer_deblur = tf.train.AdamOptimizer(lr, name='AdamOptimizer')
train_op_deblur = optimizer_deblur.minimize(loss_deblur, global_step=global_step)
optimizer_noise = tf.train.AdamOptimizer(lr, name='AdamOptimizer')
train_op_noise = optimizer_noise.minimize(loss_noise, global_step=global_step)
return upsampling_output
#Makes Completely Dark Image Contain Less Noise & Black Level:Encode Check For Filter/ Neuron Size
def model(input=self.input):
Conv1 = tf.nn.conv2d(input, 16, 2, strides=(1,1), padding='same', activation=elu)
Conv1_ = tf.nn.conv2d(Conv1, 16, 2, strides=(1,1), padding='same', activation=elu)
Pool1 = MaxPooling2D(pool_size = kernel_size)(Conv1_)
Conv2 = tf.nn.conv2d(Pool1, 32, 3, strides=(1,1), padding='same', activation=elu)
Conv2_ = tf.nn.conv2d(Conv2, 32, 3, strides=(1,1), padding='same', activation=elu )
Pool2 = MaxPooling2D(pool_size = kernel_size)(Conv2_)
Conv3 = tf.nn.conv2d(Pool2, 96, 3, strides=(1,1), padding='same', activation=elu)
Conv3_ = tf.nn.conv2d(Conv3, 96, 3, strides=(1,1), padding='same', activation=elu )
Pool3 = MaxPooling2D(pool_size = kernel_size)(Conv3_)
Refine = refine_net(Pool2, Pool3, 8)
Conv4 = tf.nn.conv2d(Refine, 128, 3, strides=(1,1), padding='same', activation=elu)
Conv4_ = tf.nn.conv2d(Conv4, 128, 3, strides=(1,1), padding='same', activation=elu )
Pool4 = MaxPooling2D(pool_size = kernel_size)(Conv4_)
Refine2 = refine_net(Pool3, Pool4, 16)
Conv5 = tf.nn.conv2d(Refine2, 256, 3, strides=(1,1), padding='same', activation=elu)
Conv5_ = tf.nn.conv2d(Conv5, 256, 3, strides=(1,1), padding='same', activation=elu)
Pool5 = MaxPooling2D(pool_size=kernel_size)(Conv5_)
output = refine_net(Pool4, Pool5, 32)
return output
```