-
Notifications
You must be signed in to change notification settings - Fork 10
/
loss_functions.py
54 lines (39 loc) · 1.95 KB
/
loss_functions.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import tensorflow as tf
import numpy as np
def flow_loss(gen_flows, gt_flows):
print(gen_flows['flow'])
return tf.reduce_mean(tf.abs(gen_flows['flow'] - gt_flows['flow']))
def intensity_loss(gen_frames, gt_frames, l_num):
"""
Calculates the sum of lp losses between the predicted and ground truth frames.
@param gen_frames: The predicted frames at each scale.
@param gt_frames: The ground truth frames at each scale
@param l_num: 1 or 2 for l1 and l2 loss, respectively).
@return: The lp loss.
"""
return tf.reduce_mean(tf.abs((gen_frames - gt_frames) ** l_num))
def gradient_loss(gen_frames, gt_frames, alpha):
"""
Calculates the sum of GDL losses between the predicted and ground truth frames.
@param gen_frames: The predicted frames at each scale.
@param gt_frames: The ground truth frames at each scale
@param alpha: The power to which each gradient term is raised.
@return: The GDL loss.
"""
# calculate the loss for each scale
# create filters [-1, 1] and [[1],[-1]] for diffing to the left and down respectively.
channels = gen_frames.get_shape().as_list()[-1]
pos = tf.constant(np.identity(channels), dtype=tf.float32) # 3 x 3
neg = -1 * pos
filter_x = tf.expand_dims(tf.stack([neg, pos]), 0) # [-1, 1]
filter_y = tf.stack([tf.expand_dims(pos, 0), tf.expand_dims(neg, 0)]) # [[1],[-1]]
strides = [1, 1, 1, 1] # stride of (1, 1)
padding = 'SAME'
gen_dx = tf.abs(tf.nn.conv2d(gen_frames, filter_x, strides, padding=padding))
gen_dy = tf.abs(tf.nn.conv2d(gen_frames, filter_y, strides, padding=padding))
gt_dx = tf.abs(tf.nn.conv2d(gt_frames, filter_x, strides, padding=padding))
gt_dy = tf.abs(tf.nn.conv2d(gt_frames, filter_y, strides, padding=padding))
grad_diff_x = tf.abs(gt_dx - gen_dx)
grad_diff_y = tf.abs(gt_dy - gen_dy)
# condense into one tensor and avg
return tf.reduce_mean(grad_diff_x ** alpha + grad_diff_y ** alpha)