Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
66 changes: 66 additions & 0 deletions camera_calibration.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
import numpy as np
import cv2

# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*7, 3), np.float32)
objp[:, :2] = np.mgrid[0:7, 0:6].T.reshape(-1, 2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.

w = 1280
h = 720

target_size = 720

s_x = (w//2-target_size//2)
e_x = (w//2+target_size//2)
s_y = 0
e_y = target_size

cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)

while cap.isOpened():
success, img = cap.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
continue

img = img[s_y:e_y, s_x:e_x]
img = cv2.resize(img, (1080, 1080))
# print(img.shape)

width = img.shape[0]
height = img.shape[1]

gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (7, 6), None)
# If found, add object points, image points (after refining them)
if ret == True:
objpoints.append(objp)
corners2 = cv2.cornerSubPix(
gray, corners, (11, 11), (-1, -1), criteria)
imgpoints.append(corners)
# Draw and display the corners
cv2.drawChessboardCorners(img, (7, 6), corners2, ret)
cv2.imshow('img', img)

if cv2.waitKey(5) & 0xFF == 27:
break

cv2.destroyAllWindows()

ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(
objpoints, imgpoints, gray.shape[::-1], None, None)

print(f"width:{width}")
print(f"height:{height}")
print(f"ret:{ret}")
print(f"mtx:{mtx}")
print(f"dist:{dist}")
42 changes: 42 additions & 0 deletions environment.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
name: 360_vdo_reconstruck
channels:
- defaults
dependencies:
- _libgcc_mutex=0.1
- _openmp_mutex=4.5
- ca-certificates=2022.4.26
- certifi=2021.10.8
- ld_impl_linux-64=2.35.1
- libffi=3.3
- libgcc-ng=9.3.0
- libgomp=9.3.0
- libstdcxx-ng=9.3.0
- ncurses=6.3
- openssl=1.1.1o
- pip=21.2.2
- python=3.7.13
- readline=8.1.2
- setuptools=61.2.0
- sqlite=3.38.3
- tk=8.6.11
- wheel=0.37.1
- xz=5.2.5
- zlib=1.2.12
- pip:
- cached-property==1.5.2
- chumpy==0.70
- cycler==0.11.0
- cython==0.29.28
- h5py==3.6.0
- kiwisolver==1.4.2
- matplotlib==2.2.5
- numpy==1.21.6
- opencv-python==4.5.5.64
- opendr==0.77
- pyparsing==3.0.8
- python-dateutil==2.8.2
- pytz==2022.1
- scipy==1.7.3
- six==1.16.0
- tqdm==4.64.0
- typing-extensions==4.2.0
17 changes: 17 additions & 0 deletions hdf5_read.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
import h5py
import numpy as np
filename = "reconstructed_poses_noratap.hdf5"

with h5py.File(filename, "r") as f:
# List all groups
print("Keys: %s" % f.keys())
a_group_key = list(f.keys())[1]

# Get the data
data = list(f[a_group_key])
pose_data = data[0].reshape((24, 3))

for i in range(0, 24):
print(f"{i}:{np.degrees(pose_data[i])}")

print(len(data[0]))
40 changes: 29 additions & 11 deletions lib/rays.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,14 @@ def unpose_and_select_rays(rays, Vi, smpl, rn_b, rn_m):
v_ids = visible_boundary_edge_verts(rn_b, rn_m)
verts = smpl.r[v_ids]

print(rn_b)
print(rn_m)
print(v_ids)
print(verts)

n, m = plucker(rays)
dist = np.linalg.norm(np.cross(verts.reshape(-1, 1, 3), n, axisa=2, axisb=1) - m, axis=2)
dist = np.linalg.norm(np.cross(verts.reshape(-1, 1, 3),
n, axisa=2, axisb=1) - m, axis=2)

ray_matches = np.argmin(dist, axis=0)
vert_matches = np.argmin(dist, axis=1)
Expand All @@ -38,27 +44,35 @@ def unpose_and_select_rays(rays, Vi, smpl, rn_b, rn_m):
M = Vi[:, :, v_ids]
T = smpl.v_posevariation[v_ids].r

tmp0 = M[:, :, ray_matches] * np.hstack((rays[:, 0], np.ones((rays.shape[0], 1)))).T.reshape(1, 4, -1)
tmp1 = M[:, :, ray_matches] * np.hstack((rays[:, 1], np.ones((rays.shape[0], 1)))).T.reshape(1, 4, -1)
tmp0 = M[:, :, ray_matches] * \
np.hstack((rays[:, 0], np.ones((rays.shape[0], 1)))
).T.reshape(1, 4, -1)
tmp1 = M[:, :, ray_matches] * \
np.hstack((rays[:, 1], np.ones((rays.shape[0], 1)))
).T.reshape(1, 4, -1)

rays_u_r[:, 0] = np.sum(tmp0, axis=1).T[:, :3] - T[ray_matches]
rays_u_r[:, 1] = np.sum(tmp1, axis=1).T[:, :3] - T[ray_matches]

rays_u_v = np.zeros_like(rays[vert_matches])

tmp0 = M * np.hstack((rays[vert_matches, 0], np.ones((verts.shape[0], 1)))).T.reshape(1, 4, -1)
tmp1 = M * np.hstack((rays[vert_matches, 1], np.ones((verts.shape[0], 1)))).T.reshape(1, 4, -1)
tmp0 = M * np.hstack((rays[vert_matches, 0],
np.ones((verts.shape[0], 1)))).T.reshape(1, 4, -1)
tmp1 = M * np.hstack((rays[vert_matches, 1],
np.ones((verts.shape[0], 1)))).T.reshape(1, 4, -1)

rays_u_v[:, 0] = np.sum(tmp0, axis=1).T[:, :3] - T
rays_u_v[:, 1] = np.sum(tmp1, axis=1).T[:, :3] - T

valid_rays = dist[np.vstack((ray_matches, range(dist.shape[1]))).tolist()] < 0.12
valid_verts = dist[np.vstack((range(dist.shape[0]), vert_matches)).tolist()] < 0.03
valid_rays = dist[np.vstack(
(ray_matches, range(dist.shape[1]))).tolist()] < 0.12
valid_verts = dist[np.vstack(
(range(dist.shape[0]), vert_matches)).tolist()] < 0.03

ray_matches = ray_matches[valid_rays]

return np.concatenate((v_ids[ray_matches], v_ids[valid_verts])), \
np.concatenate((rays_u_r[valid_rays], rays_u_v[valid_verts]))
np.concatenate((rays_u_r[valid_rays], rays_u_v[valid_verts]))


def rays_from_points(points, camera):
Expand All @@ -73,9 +87,12 @@ def rays_from_points(points, camera):
def rays_from_silh(mask, camera):

if cv2.__version__[0] == '2':
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours, _ = cv2.findContours(
mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
else:
_, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#_, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours, _ = cv2.findContours(
mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

silh = np.zeros_like(mask)

Expand All @@ -92,7 +109,8 @@ def ray_objective(f, sigma, base_smpl, camera, vis_rn_b, vis_rn_m):
base_smpl.pose[:] = f.pose
camera.t[:] = f.trans

f.v_ids, f.rays_u = unpose_and_select_rays(f.rays, f.Vi, base_smpl, vis_rn_b, vis_rn_m)
f.v_ids, f.rays_u = unpose_and_select_rays(
f.rays, f.Vi, base_smpl, vis_rn_b, vis_rn_m)
f.verts = base_smpl.v_shaped_personal[f.v_ids]
f.dist = distance_function(f.rays_u, f.verts)

Expand Down
30 changes: 30 additions & 0 deletions mask_numpy2pic.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
import numpy as np
import cv2
import h5py
import argparse

parser = argparse.ArgumentParser()
parser.add_argument('input', type=str)
parser.add_argument('output', type=str)

args = parser.parse_args()

input_file = args.input
output_folder = args.output


with h5py.File(input_file, "r") as f:
# List all groups
print("Keys: %s" % f.keys())
a_group_key = list(f.keys())[0]

# Get the data
data = list(f[a_group_key])
for frame, mask in enumerate(data):
print(frame)
print(mask.shape)
white_img = np.ones((1080, 1080), dtype='uint8')*255
mask = cv2.bitwise_and(white_img, white_img, mask=mask)
print(mask.dtype)
#mask *= 255
cv2.imwrite(f"{output_folder}/{frame}.png", mask)
72 changes: 72 additions & 0 deletions mask_person_by_bg_subtrack.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
import cv2
import numpy as np

vdo_file = "E:/CMU Med Data Analysis/3d_reconstruck/dataset/input/Noratap/noratap_360.mp4"
bg_file = "E:/CMU Med Data Analysis/3d_reconstruck/dataset/input/Noratap/bg.jpg"

mask_out_path = "E:/CMU Med Data Analysis/3d_reconstruck/dataset/input/Noratap/mask"

# Set Up Vdo Write
vdo_name = "E:/CMU Med Data Analysis/3d_reconstruck/dataset/input/Noratap/noratap_360_1080.avi"
print("VDO Save ", vdo_name)
vdo_out = cv2.VideoWriter(vdo_name, cv2.VideoWriter_fourcc(
*'DIVX'), 25, (1080, 1080))

cap = cv2.VideoCapture(vdo_file)
img_bg = cv2.imread(bg_file)

w = img_bg.shape[0]
h = img_bg.shape[1]

target_size = 720

s_x = (w//2-target_size//2) + 350
e_x = (w//2+target_size//2) + 350
s_y = 0
e_y = target_size

img_bg = img_bg[s_y:e_y, s_x:e_x]

frame = 0

if (cap.isOpened() == False):
print("Error opening video stream or file")

while(cap.isOpened()):
ret, img = cap.read()
if ret == True:

img = img[s_y:e_y, s_x:e_x]

cv2.imshow('Img', img)

# bg Subtrack
img_mask = cv2.subtract(img_bg, img)
img_mask = cv2.cvtColor(img_mask, cv2.COLOR_BGR2GRAY)

# img_mask = cv2.equalizeHist(img_mask)

ret_th, img_mask = cv2.threshold(img_mask, 30, 255, cv2.THRESH_BINARY)

# img_mask = cv2.dilate(img_mask, np.ones((10, 10)))

cv2.imshow('Mask', img_mask)

img_mask = cv2.resize(img_mask, (1080, 1080))
img = cv2.resize(img, (1080, 1080))

# Save Img
cv2.imwrite(f"{mask_out_path}/{frame}.png", img_mask)
vdo_out.write(img)

frame += 1

if cv2.waitKey(25) & 0xFF == ord('q'):
break

else:
break

vdo_out.release()
cap.release()
cv2.destroyAllWindows()
6 changes: 4 additions & 2 deletions models/bodyparts.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#!/usr/bin/env python2
# -*- coding: utf-8 -*-

import cPickle as pkl
import pickle as pkl
import numpy as np

_cache = None
Expand All @@ -12,7 +12,9 @@ def get_bodypart_vertex_ids():

if _cache is None:
with open('assets/bodyparts.pkl', 'rb') as fp:
_cache = pkl.load(fp)
u = pkl._Unpickler(fp)
u.encoding = 'latin1'
_cache = u.load()

return _cache

Expand Down
Loading