This repository has been archived by the owner on Aug 19, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
11 changed files
with
597 additions
and
525 deletions.
There are no files selected for viewing
This file was deleted.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
# OM NAMO NARAYANA |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,166 @@ | ||
# OM NAMO NARAYANA | ||
|
||
import numpy as np | ||
import matplotlib.pyplot as plt | ||
import torch | ||
from skimage import io, transform | ||
from torch.utils.data import Dataset, DataLoader | ||
from torchvision import transforms, utils | ||
from PIL import Image, ImageOps | ||
from torch.nn import ReplicationPad2d | ||
import tensorflow as tf | ||
import sys | ||
|
||
sys.path.insert(1, "./") | ||
|
||
from data.darksight_dataset import DarkSightDataset | ||
|
||
|
||
class MatchSize(object): | ||
def __init__(self, cam_shape, therm_shape): | ||
self.ratio = min(cam_shape[0] / therm_shape[1], cam_shape[1] / therm_shape[0]) | ||
self.therm_shape = ( | ||
int(therm_shape[0] * self.ratio), | ||
int(therm_shape[1] * self.ratio), | ||
) | ||
lpad = int((cam_shape[0] - self.therm_shape[1]) / 2) | ||
rpad = cam_shape[0] - lpad - self.therm_shape[1] | ||
upad = int((cam_shape[1] - self.therm_shape[0]) / 2) | ||
dpad = cam_shape[1] - upad - self.therm_shape[0] | ||
# self.padding = (upad, lpad, dpad, rpad) #for ImageOps.expand | ||
self.padding = (upad, dpad, lpad, rpad) # for RepllicationPad2d | ||
self.shape = cam_shape | ||
|
||
def __call__(self, sample): | ||
long_exp, short_exp, therm = ( | ||
sample["long_exposure"], | ||
sample["short_exposure"], | ||
sample["thermal_response"], | ||
) | ||
therm = therm.resize(self.therm_shape) | ||
# therm = ImageOps.expand(therm, self.padding) | ||
# print(self.padding) | ||
m = ReplicationPad2d(self.padding) | ||
therm_tensor = transforms.ToTensor()(therm).unsqueeze_(0) | ||
therm_tensor = m(therm_tensor) | ||
therm = transforms.ToPILImage()(therm_tensor.squeeze_(0)) | ||
return { | ||
"long_exposure": long_exp, | ||
"short_exposure": short_exp, | ||
"thermal_response": therm, | ||
} | ||
|
||
|
||
class RandomCrop(object): | ||
def __init__(self, ps=512, hbuf=550, wbuf=550): | ||
self.ps = ps # patch size | ||
self.hbuf = hbuf | ||
self.wbuf = wbuf | ||
|
||
def __call__(self, sample, color_percent=50): | ||
iter_times = 0 | ||
while True: | ||
iter_times += 1 | ||
long_exp, short_exp, therm = ( | ||
sample["long_exposure"], | ||
sample["short_exposure"], | ||
sample["thermal_response"], | ||
) | ||
W = short_exp.shape[1] | ||
H = short_exp.shape[0] | ||
ps = self.ps | ||
xx = np.random.randint(self.wbuf, W - ps - self.wbuf) | ||
yy = np.random.randint(self.hbuf, H - ps - self.hbuf) | ||
short_exp = short_exp[yy : yy + ps, xx : xx + ps, :] | ||
therm = therm.crop((xx, yy, xx + ps, yy + ps)) | ||
long_exp = long_exp[yy * 2 : yy * 2 + ps * 2, xx * 2 : xx * 2 + ps * 2, :] | ||
cnt = 0 | ||
# #commented for the sake of testing | ||
# for i in range(0, 512): | ||
# for j in range(0, 512): | ||
# r = long_exp[i][j][0] * 255 | ||
# g = long_exp[i][j][1] * 255 | ||
# b = long_exp[i][j][2] * 255 | ||
# if r < 40 and g < 40 and b < 40: | ||
# cnt += 1 | ||
# print(cnt) | ||
# color_precent added | ||
break # for fast testing; remove this; | ||
if cnt < ((512 * 512) * color_percent / 100) or iter_times > 10: | ||
print("resampling..iteration:{}".format(iter_times)) | ||
break | ||
return { | ||
"long_exposure": long_exp, | ||
"short_exposure": short_exp, | ||
"thermal_response": therm, | ||
} | ||
|
||
|
||
class RandomFlip(object): | ||
def __call__(self, sample): | ||
long_exp, short_exp, therm = ( | ||
sample["long_exposure"], | ||
sample["short_exposure"], | ||
sample["thermal_response"], | ||
) | ||
therm = np.array(therm) | ||
if np.random.randint(2, size=1)[0] == 1: # random flip | ||
short_exp = np.flip(short_exp / 255.0, axis=0) | ||
therm = np.flip(therm, axis=0) | ||
long_exp = np.flip(long_exp, axis=0) | ||
if np.random.randint(2, size=1)[0] == 1: | ||
short_exp = np.flip(short_exp, axis=1) | ||
therm = np.flip(therm, axis=1) | ||
long_exp = np.flip(long_exp, axis=1) | ||
if np.random.randint(2, size=1)[0] == 1: # random transpose | ||
short_exp = np.transpose(short_exp, (1, 0, 2)) | ||
therm = np.transpose(therm, (1, 0)) | ||
long_exp = np.transpose(long_exp, (1, 0, 2)) | ||
return { | ||
"long_exposure": long_exp, | ||
"short_exposure": short_exp, | ||
"thermal_response": therm, | ||
} | ||
|
||
|
||
class ConcatTherm(object): | ||
def __call__(self, sample): | ||
long_exp, short_exp, therm = ( | ||
sample["long_exposure"], | ||
sample["short_exposure"], | ||
sample["thermal_response"], | ||
) | ||
# print('short_exp.shape: ', short_exp.shape, 'therm.shape: ', therm.shape) | ||
input_sample = np.transpose(short_exp, (2, 0, 1)) | ||
input_sample = np.append(input_sample, np.expand_dims(therm, axis=0), axis=0) | ||
# print("input_sample.shape: ", input_sample.shape) | ||
return {"input_sample": input_sample, "output_sample": long_exp} | ||
|
||
|
||
def my_transform(train=True, cam_shape=(2010, 3012), therm_shape=(32, 24)): | ||
transform = [] | ||
transform.append(MatchSize(cam_shape, therm_shape)) | ||
transform.append(RandomCrop()) | ||
transform.append(RandomFlip()) | ||
transform.append(ConcatTherm()) | ||
return transform | ||
|
||
|
||
class DarkSighDataLoader: | ||
def __init__(self): | ||
self.dataset_dir = "./dataset/dataset/" # ARVINTH | ||
# dataset_dir = './dataset/' HARSHITH | ||
self.transformed_dataset = DarkSightDataset( | ||
self.dataset_dir, transform=my_transform(True) | ||
) | ||
self.data = list(self.transformed_dataset) | ||
|
||
def load(self, batch_size=1, shuffle=True): | ||
dataloader = DataLoader(self.data, batch_size, shuffle=shuffle) | ||
return dataloader | ||
|
||
|
||
if __name__ == "__main__": | ||
data = DarkSighDataLoader().load() | ||
data = iter(data) | ||
print(next(data)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,160 @@ | ||
# OM NAMO NARAYANA | ||
|
||
import glob | ||
import rawpy | ||
import numpy as np | ||
import matplotlib.pyplot as plt | ||
from skimage import io, transform | ||
from torch.utils.data import Dataset, DataLoader | ||
from torchvision import transforms, utils | ||
from PIL import Image, ImageOps | ||
from torch.nn import ReplicationPad2d | ||
import tensorflow as tf | ||
import sys | ||
import torch | ||
|
||
sys.path.insert(1, "./") | ||
|
||
from DarkNet.models.sid_unet import sidUnet | ||
|
||
# Ignore warnings | ||
import warnings | ||
|
||
from torchvision.transforms.functional import pil_to_tensor | ||
|
||
warnings.filterwarnings("ignore") | ||
|
||
|
||
def pack_raw(raw, blevel=512): | ||
# pack Bayer image to 4 channels | ||
im = raw.raw_image_visible.astype(np.float32) | ||
# subtract the black level | ||
im = np.maximum(im - blevel, 0) / (16383 - blevel) | ||
|
||
im = np.expand_dims(im, axis=2) | ||
img_shape = im.shape | ||
H = img_shape[0] | ||
W = img_shape[1] | ||
|
||
out = np.concatenate( | ||
( | ||
im[0:H:2, 0:W:2, :], | ||
im[0:H:2, 1:W:2, :], | ||
im[1:H:2, 1:W:2, :], | ||
im[1:H:2, 0:W:2, :], | ||
), | ||
axis=2, | ||
) | ||
return out | ||
|
||
|
||
class PreprocessRaw(object): | ||
def __call__(self, sample): | ||
long_exp, short_exp, therm = ( | ||
sample["long_exposure"], | ||
sample["short_exposure"], | ||
sample["thermal_response"], | ||
) | ||
long_exp = long_exp.postprocess( | ||
use_camera_wb=True, half_size=False, no_auto_bright=True, output_bps=16 | ||
) | ||
long_exp = np.float32(long_exp / 65535.0) | ||
short_exp = pack_raw(short_exp) | ||
return { | ||
"long_exposure": long_exp, | ||
"short_exposure": short_exp, | ||
"thermal_response": therm, | ||
} | ||
|
||
|
||
class DarkSightDataset(Dataset): | ||
"""DarkSight dataset.""" | ||
|
||
def __init__(self, root_dir, transform=None): | ||
""" | ||
Args: | ||
root_dir (string): Directory with all the datapoints. | ||
transform (callable, optional): Optional transform to be applied | ||
on a sample. | ||
""" | ||
self.transform = [PreprocessRaw()] | ||
if transform: | ||
for t in transform: | ||
self.transform.append(t) | ||
self.transform = transforms.Compose(self.transform) | ||
|
||
self.root_dir = root_dir | ||
self.long_exp_list = glob.glob(root_dir + "**/*long*.CR3", recursive=True) | ||
self.short_exp_list = glob.glob(root_dir + "**/*short*.CR3", recursive=True) | ||
self.therm_list = glob.glob(root_dir + "**/temp.jpg", recursive=True) | ||
print("no. of datapoints", len(self.long_exp_list)) | ||
assert len(self.long_exp_list) == len(self.short_exp_list) and len( | ||
self.long_exp_list | ||
) == len(self.therm_list) | ||
|
||
def __len__(self): | ||
return len(self.long_exp_list) | ||
|
||
def __getitem__(self, idx): | ||
print("data index", idx) | ||
if torch.is_tensor(idx): | ||
idx = idx.tolist() | ||
long_exp = rawpy.imread(self.long_exp_list[idx]) | ||
short_exp = rawpy.imread(self.short_exp_list[idx]) | ||
therm = Image.open(self.therm_list[idx]) | ||
therm = ImageOps.grayscale(therm) | ||
sample = { | ||
"long_exposure": long_exp, | ||
"short_exposure": short_exp, | ||
"thermal_response": therm, | ||
} | ||
|
||
sample = self.transform(sample) | ||
|
||
try: | ||
return [ | ||
torch.tensor(sample["input_sample"].copy()), | ||
torch.tensor(sample["output_sample"].copy()), | ||
] | ||
|
||
except: | ||
return sample | ||
|
||
|
||
if __name__ == "__main__": | ||
|
||
long_shots_dir = "./dataset/dataset/" # Arvinth | ||
# long_shots_dir = '../dataset/' # Harshit | ||
|
||
# drive code | ||
dataset_dir = "./dataset/dataset/" # ARVINTH | ||
# dataset_dir = './dataset/' HARSHITH | ||
transformed_dataset = DarkSightDataset(dataset_dir) | ||
data = list(transformed_dataset) | ||
print(data[0]) | ||
# debugging | ||
|
||
# long_shot_cam = glob.glob(long_shots_dir + "**/*long*.CR3", recursive=True) | ||
# short_shot_cam = glob.glob(long_shots_dir + "**/*short*.CR3", recursive=True) | ||
# therm = glob.glob(long_shots_dir + "**/*temp.jpg", recursive=True) | ||
"""dictionary format | ||
print('dataset1: ', data[0]['short_exposure'].shape, | ||
data[0]['thermal_response'].shape) | ||
# print('dataset2: ', data[1]['short_exposure'].shape, | ||
# data[1]['thermal_response'].shape) | ||
# data[0]['thermal_response'].show() | ||
print(np.max(data[0]['long_exposure'])) | ||
plt.figure() | ||
plt.imshow(data[0]['long_exposure']) | ||
plt.figure() | ||
plt.imshow(data[0]['thermal_response']) | ||
plt.figure() | ||
plt.imshow(data[0]['short_exposure'][:, :, :3]) | ||
print(data[0]['short_exposure'][:, :, 1:4].shape) | ||
plt.show() | ||
print(data[0]['thermal_response'][0][20:30]) | ||
""" | ||
|
||
"""tensor format""" |
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.