From 2067296290d0a3f60017225204f76f1647ab7737 Mon Sep 17 00:00:00 2001 From: Tluszczyk Date: Tue, 9 Jul 2024 17:45:32 +0200 Subject: [PATCH] Added possibility to use it as a module in other projects --- {src => build/lib/openpose}/__init__.py | 0 {src => build/lib/openpose}/body.py | 4 +- {src => build/lib/openpose}/hand.py | 4 +- .../lib/openpose}/hand_model_outputsize.py | 2 +- {src => build/lib/openpose}/model.py | 0 {src => build/lib/openpose}/util.py | 0 demo.py | 8 +- demo_camera.py | 8 +- dist/openpose-0.1.0-py3-none-any.whl | Bin 0 -> 12582 bytes dist/openpose-0.1.0.tar.gz | Bin 0 -> 12356 bytes pyproject.toml | 11 + src/openpose.egg-info/PKG-INFO | 124 ++++++++++ src/openpose.egg-info/SOURCES.txt | 12 + src/openpose.egg-info/dependency_links.txt | 1 + src/openpose.egg-info/top_level.txt | 1 + src/openpose/__init__.py | 0 src/openpose/body.py | 218 +++++++++++++++++ src/openpose/hand.py | 85 +++++++ .../hand_model_output_size.json | 0 src/openpose/hand_model_outputsize.py | 17 ++ src/openpose/model.py | 219 ++++++++++++++++++ src/openpose/util.py | 198 ++++++++++++++++ 22 files changed, 899 insertions(+), 13 deletions(-) rename {src => build/lib/openpose}/__init__.py (100%) rename {src => build/lib/openpose}/body.py (99%) rename {src => build/lib/openpose}/hand.py (98%) rename {src => build/lib/openpose}/hand_model_outputsize.py (88%) rename {src => build/lib/openpose}/model.py (100%) rename {src => build/lib/openpose}/util.py (100%) create mode 100644 dist/openpose-0.1.0-py3-none-any.whl create mode 100644 dist/openpose-0.1.0.tar.gz create mode 100644 pyproject.toml create mode 100644 src/openpose.egg-info/PKG-INFO create mode 100644 src/openpose.egg-info/SOURCES.txt create mode 100644 src/openpose.egg-info/dependency_links.txt create mode 100644 src/openpose.egg-info/top_level.txt create mode 100644 src/openpose/__init__.py create mode 100644 src/openpose/body.py create mode 100644 src/openpose/hand.py rename src/{ => openpose}/hand_model_output_size.json (100%) create mode 100644 src/openpose/hand_model_outputsize.py create mode 100644 src/openpose/model.py create mode 100644 src/openpose/util.py diff --git a/src/__init__.py b/build/lib/openpose/__init__.py similarity index 100% rename from src/__init__.py rename to build/lib/openpose/__init__.py diff --git a/src/body.py b/build/lib/openpose/body.py similarity index 99% rename from src/body.py rename to build/lib/openpose/body.py index ecf06938..df53d82f 100644 --- a/src/body.py +++ b/build/lib/openpose/body.py @@ -8,8 +8,8 @@ import torch from torchvision import transforms -from src import util -from src.model import bodypose_model +from openpose import util +from openpose.model import bodypose_model class Body(object): def __init__(self, model_path): diff --git a/src/hand.py b/build/lib/openpose/hand.py similarity index 98% rename from src/hand.py rename to build/lib/openpose/hand.py index 808aa13a..c5773b8f 100644 --- a/src/hand.py +++ b/build/lib/openpose/hand.py @@ -9,8 +9,8 @@ import torch from skimage.measure import label -from src.model import handpose_model -from src import util +from openpose.model import handpose_model +from openpose import util class Hand(object): def __init__(self, model_path): diff --git a/src/hand_model_outputsize.py b/build/lib/openpose/hand_model_outputsize.py similarity index 88% rename from src/hand_model_outputsize.py rename to build/lib/openpose/hand_model_outputsize.py index 57dd0705..e5bc5ab8 100644 --- a/src/hand_model_outputsize.py +++ b/build/lib/openpose/hand_model_outputsize.py @@ -2,7 +2,7 @@ from tqdm import tqdm import json -from src.model import handpose_model +from openpose.model import handpose_model model = handpose_model() diff --git a/src/model.py b/build/lib/openpose/model.py similarity index 100% rename from src/model.py rename to build/lib/openpose/model.py diff --git a/src/util.py b/build/lib/openpose/util.py similarity index 100% rename from src/util.py rename to build/lib/openpose/util.py diff --git a/demo.py b/demo.py index 34ff7f47..4fb27b29 100644 --- a/demo.py +++ b/demo.py @@ -3,10 +3,10 @@ import copy import numpy as np -from src import model -from src import util -from src.body import Body -from src.hand import Hand +from openpose import model +from openpose import util +from openpose.body import Body +from openpose.hand import Hand body_estimation = Body('model/body_pose_model.pth') hand_estimation = Hand('model/hand_pose_model.pth') diff --git a/demo_camera.py b/demo_camera.py index ad32930d..2e4222c9 100644 --- a/demo_camera.py +++ b/demo_camera.py @@ -4,10 +4,10 @@ import numpy as np import torch -from src import model -from src import util -from src.body import Body -from src.hand import Hand +from openpose import model +from openpose import util +from openpose.body import Body +from openpose.hand import Hand body_estimation = Body('model/body_pose_model.pth') hand_estimation = Hand('model/hand_pose_model.pth') diff --git a/dist/openpose-0.1.0-py3-none-any.whl b/dist/openpose-0.1.0-py3-none-any.whl new file mode 100644 index 0000000000000000000000000000000000000000..f5cf3e06845f4c3196e61c1e56b2d783bfc4d889 GIT binary patch literal 12582 zcmaKS19T3CqfEee|Zo5A^Dma6i3RR4M{@$63I8 z84YkPUt+=60Md}OdcWy~E1|yV?I9~&H(?^xx}*$2Wbv|7pSz)0;LSsNaNo#N z;x@5Qig@a*C`k3`!HE>fw~o4gkfMc%pVNW_srhJHZV;7TN;u$RHID{VTDYxRaeS{b zVrY0w{y8Y>wiWA={k`7Miym`+FOfntWy!kyd(i3bH74iK9q37{>?NS1o31a8x$CS zYY|7K*Pv`g01|guR0FTbB%;Y^c3Rjf*KGlr8G%1s(;>eOd|~aB-)so z%wCjeFLjt zvzY~UHS@sN_L!5Y;DkBEx~*-!5#{H0E#H^@dcI0&7Ugl$6V1^l4FdsLbOGUf%yHG^Q>H59MBkNH zZt~$Ye&?vv!g+jMhqJDRJ-_AW`06FL#H1bV0p)vcA*2)(V8fB=q;iAm;%O+cCGfcx z1V1Dff(mT6slG>e9)!yfCC8e?m)6nsZ`IG=-sk`&S8 zH)P*)0@Cs}GdRvS>h-uU0GflDbBb{Q6_z7}kz88$sd@s5&I9m0C__um@u=i=nySIi zI9w?;lyi_w7Jfz3x(L8p8KX)|LRBk}q}_C0UOinimec2-HT2IO%YdUE@icC4dx86rR_tLFmt@A_u2MOlY_5B* zXUD&5oiBkVXKG+9?qO32Pl)lpR2YmmC%;1Z?06}lm-KC5S;u_q({994+0yfA)?2JX zCsw%Y;p8)=9~^=4d5V?yyL`Ri8Sypsfwq})ti8rH5=;pF`s!h1$QyEd?E1x`Q{BP; zgQ1hnliXc@7V9IS5brzt^-9k{CU45hMdRzPQtK2P1&6D|rW}#ktP1;7kk@*}6$L#; z-?2nZjJ(K22CKBkO0l|KApkuCPn76;<=+@|7T!F!69-hGGI4)CIuBs<7v`E_^p0`RVmO?(E!-=hF2Y`_%O2@E#wx);%q7n4oQX=uDZ`W!OLEzniDJIItxuo zt$NS7xkc>I1BMFmLy)sP?`592|EbJ8d>+c#+0;@}JyTKwjT?P_N&BeTBDrdar_QXl~vDZ8o-_ zqHQ*)T4SF-5EDSe8^2(_+)oIqvwPrL-TBic1coS%Wml46xtP!_0vh7 zeAl?FN=QZUZbLFp5q>~M6lHkiHMZAc?ZQoKpm5JFLR_oDP0;-0IrbmVw8btP|448(y5o0<4pTz!NWRHPID}$|=9z zzX`O8_M45Qm2|Hz(j}5R4Y>zRz^&5j16YY!K9E%;d;nU%jdL)<*d}x0F3k9bksIcD z1TfAo@%xy$kzWFGVz_X7X(*7S1zamX6`@e~#_jo=e+nX9;|Jzy6E(*vEQCO-u~Fe;`=~c2EpSOAQ!qrogF-i zZ&PwYxJ90YxU1%OhBp_b{$LTR3h|oVz9ieez8h@09i@iH#)2=J{}kS?VYnUTQDDAF zWtAkIf`UQsj0*OUZTFVGRK`tXZ|4wwLQ+_T_8x3oeM9%#5zs_ zCj%-9+SKW0KMZsT0T$|0@F0`!5A&U$fPRUJzcO5k?FI8-`xIvb5Bw|&F4xRmMV-Z& zc8@*$NLY{sn!}r@VQKLzYLRwzE%toM9G>R^Etz4K&rMoL7CC0Ffeu=JOVj!3ErJi1 ztEtdKRSI=6$&qP{xEaa=+OWIFqh3GCS28;zQewMOJq+e3{WdO*%St+0VzWuQbn~QE z^(Q~G8KmS&4|;cs1-J(5s*)82cS!tHxp*|`dm3j;uQS2bg!i$W z%_50_GZJn@4H;$1R?4^uMWPloOeh-sW~pNpJa25WrT9^Fv6 z;+r*{nyu%8M#-XWnS_E3U1m*|C&A0&S?%5cA2FVHYp7#~X)$mXA(stH9WD$Wx|cZF z1>~jU*rQ$HqdoAfRWei3kDso<%FrDC4HsRC#xoq<-|qBGP$N$qiz@T#anEa^OiS`7 z=k*%&9k;Q%L{z7XMIgEfhU<<%2eVf2q1q=ce{2mD@4E{+9thb0@ekC}25$VYP^>P| z^wJY#m87^1fO18kkoIB}jm8q%#$O{5_~s)_4(o8M0dBvSnl~{Umg5Nt(*NzOqo3Ia zWycn&50?n0{ns#(&70NE31ie)yrfqz>b)5IKxbXPl@x=nZO7hA!@yk*-Se<(k+K0+ zi6Z<}-K)A(yAFI8n15U{gA_^|SicN8iMynb0cczbRMdl38>k(=app0C8|)yiQ0B~| z8f8x72zOOz1ID~W>KS7wMSL>u++kuNUg?c_TCB)?Bdovx{;yJl5iZ(OYQ~%DSQcN> zR`Kl%!G&w5+`&voTcH+6pCX=Qa1WuQ2MTJuv!w@$v<%c(IZ}?Vllgu&B)xM^v2bjUwQ^V{#teZ#ctjl zV7F6gmZhdCR26kkZfB~X530I5-c{D21%;krxK=jZb>TX{__FJzK@8hGT%HGzm}`@H zjwLSwtI$^=JhxMHdnU9E?Mx}{j$tVK$_Rr@HqY>`xCXgJ4dbC19#nP`uBYLB#j$)Z zHOUItXott;;qs-2ncvTEy@1~a2+Mff)=7sSd>41Mg&fz}q~JG?2ASkPYK?dTui^Hn ze@+j8;eaUsOuP6Nn~`~XwW9|6H8}q2QdRnG#^9sf_^y1#zgT2y4=RZ2s=-WIsm!kH zhMsUG6r2`EP!s(N^v{(9g4c=0tO^<6D+mC95$xYq66OZB#=ln*S?UwE*z72qZ`3R9 zJL#rOOTch2z^6pEb6}wD3>&r}1_Blo;nOf)F=JoZhC4Vr)h3UR2mJ*XK z(zGehbyCap2vd=vumB)-Qd5O zI>toTI#oYfYnYBuLaiK^;ok?(2h~i@+o0$za8m?Nt*gQ*+ctI_;s{ajO;LI|zWXo| z*_!gcd*9mtFeJWEP1zttMm$;pDGDv&P;uhzIp_&42~(QZ$+BR$Uy@^80j4&&0+cpF zxVrGLWBEjNiz%@B3{#<2Tls zoBB2-mahObR6RKHsU}^kPOQSf3bGtkgElIT1#??*K7Nfhn7Tq!S=1kJrpA=4tB7=5 zdTf_fm`$Xr3cIMsNK`%}jVy|b%8AYoa65)9mdS@W8~UH{qAfxzxY<{S_tzJCU=xq(luLh({Ii-+mOtmlXxR0n?y%lXlXqSOH`$!W(cE#af z<<@MJEXu;fSb^xL%znA@M8wd_+ac}x0g$bvT(+hQhRtD6fz1hlk1#fl9iNhV0P4E9 z%ww=ZQ_|?G)-$PCwS!AnakVk#rvRMBJa5l@to=KefM47Y1x6JXR; zOA%c_*rt)AHu$9$C0GSsvbpNm0S5<}Ud3WR4ash7qH4muftEMMp<7B7p^We7P2%=R zpCEX6p-E5F9Eb!g>?xUuuvG~Om=WSc$qu7Ua%A{39tPPvU-}fqJ$eO1cnf3}FgjAW zU$$zHC$Ei_d#9e8Coj;!v;PKpxP%c|ZsEsXw=Kj4sHp)L(5*WbTcv?f&=enOIdQ~k zyvBP@T~_^J>CFqF5JoRv8>`Z@p;A;gefKlQ1xz`Ifo-TTYk?c zAA9&cA0J0G_`Y8UHzPf-8vz$O$?@zsiIAO$u_F(0?MYNCIk)B!yj{>VFdB$n89cp;EHRc?fp*l|7z#GF-R9At=>W z=-UbzAP6>MUpcV4Law%y@7x_b1^752_*-nZ7D8&((+IO?n`lb5jnZ?3p zYj2hK9&*n*CqznvCyKmY*3zd9~|=%l`low13vzMYG+ zy^FJxg{R4H;na=Wk^&_}7`{59JiMf!sn>QBlt-#tQCtsym{DxWJ>GP34wL{1y?W#H zC4ND18p67B0ZuI#@npt3Nz#Tj`%p9V4;VBSju@iCkKXj#kp&+(T~qZp;QL$%2BEmO z_;K?9OJ_<_IIEAO8LCd5;?ZxCZAOU?u+G$eD)tmsPDmDJ(*FioX0wXuSmMJQrKY0x zQEjnI-+^kD$RGWDoQsjlIfcy2ZU$nkPo*}b3;ibOa)ulUEzeY}v8TA8s9ucnH7S}; zcRXxFL*evHvXV0^`3(4HpZwumF%?adC}XaL@~^JX-GLPnjzX%4r0qr2G{B0G_10{W zC&cdIk1R3;Zdysdo>;anQVAvoz88zLwmRNkc~ZsTj}fQg-|}QR$zU%6VF3>3m{f!wUr+}QtgJ5(68?7I#cEsAEjL_#*ankwCl6 zC=Qe9l#d;blykEfj=zEqT!I$d&0RF@-Yiqq80YP+(poWA5>fe{-SMyy z>oTTP=^uaC(Wr;Atj%@kM-uBY*z~E+o@yB5+(myn6lx}mlr5K8%BzwDUd@-%^gZ$T z^6!(v>ffdn(=%r`N~t_wwrod4<>~hW9=uqp3I!7Yca&7`(U~ zfQ2dy@-&(aK+~qE6^&?}O3|PIicG>-XR9={**STC!}qbC8g3V@$4m<&Pq+-#F%L{3 z=#}p6_I_NY3&E@63Y&2sAQlt5IhameCcNK9s-1bwL`Z7bBjW4C<>jY1A{QDSzQGiF z!X)Ap&I6KU@QjdjUJ#?tR0l$U-Z4 z6ijeM(ta_hJ`MN57l3XcDhttnTKN&{nmD z@*742H2}U~J+7)$wzq7>GB$K~>fF2929gWCoe0(3Y?T7e?C{*;00*2iD4aJaoFiz= zHpxi(?PKFb;+TV{%SkoO`>{GirV8OT6e>`S0wKE7Hnn{mwLJ(qXC`&wh=U{Sxf+DN z!DI9Ry}H+HwWhCcDb$*sO$8P{e_d^pRsM!f8gw-%lq>~~KM`^SSnf*DE+7SCo-qJc z+N^*;6{Wu^)V2T=%s>BZ7VTwb@@S?q*xyOthfQ4@Le8(o6PT4+^3**01?K|5{q%e- z1m1@SkwE|#Kh>Z+xWqHorLuGT1GS-MVj!C*=~lt)a!i=ACNPc=KA8t6MV!}zKBl~_oJc*pkNj_V`0~ZD3R%9&E z;3bzFU*bjJHzBwZ-?2hjOq#Jyd2ajL1CGf};bp4ab&hKf&lkXq}ORa$Q-IGPK&E|<8tHw@jtLd`MCRhPoRA+l%x5icrag|j+ywTrG zInX=o7u2( z2I)++z(M(mQIDGi5>GY4mT*dY(i}d~|6Sj|keG!Dp#cEMi2t@WaB;S<{w?nZnl^Up z5^$fF`iv{v-zeizC*(2~-Sl!Lw5{uGYRtV_Ae}=9w2ic@Xvz|BwB1_2d`Zm2u)6k5 z-1zlo*N{Sm9XT>X_c=C-B-Lu&d!(fgk7(q;^9U~94J)$MpOB+7#Uv4fC;GUgUDrCg z?$x(EaQ098?%1)(O1fa#-=emjZ@O}NfS5|>C2=*au&k%m$nLQ#Op8hRcI>fGUxB~^ z{cAK&!E*+tn%%Q-c7KpWdYA@vL=W8(K?B?FJoA5;&rGfQ_id$2^@T-e*?lB+?13V} z!{#?&AfTE8eSFVidNM+~3nuEP$&FowTrWYLQfv&XiHzVvT(qBxqJ4{O44(=xWx_kq zQ51V^g;WC4kR>(IlIG+gJeZA3$TJ!={N{l-3gu3)R37B@h63SWiEc0$oiTwsgA+Nm zy|wE+IZef`HHko1ixI_LyqXNxuO``f3ynA6H-Ml;6Ft_L<_B~4vQjxI(DHx{#0H_w z6~d9?{Bgwq@^GKNyI7oi^(KocYg#bU$@GG&R|Z}sWfiM$4vp^|#dJ)#ER%DV(ac(? zb_Xjt>RvZvxU`E4Bc~Dn;?RAFsr_=sYSuxrjYy4@k|LT=)qGwHd&3+>#It&>Va*x& zj$}Ic8M7d$#srgL14;NRpn$ZErz;{9T+awY?LOxVX@qMmjl`28Be*Wk_z1Le5xQU1 zAnnmj?=S@AHSL>vYVb%cCs$+WV+%15{59jVtYo-tLoh7?p^>pXz6#Uo~?G zq4+JAJDv`Bjzl!@Z5A}E1j;j+>@+El?n7h{V*NCrQ>K*#Ib+9nq8ra-5y_OuVBPOL z3??$;*9Wgkeooa`f#0*7w*@{tH!&7N*Id;Cms7c@M;G*WguAIkZy(wjHQ;siE$wi# z3^7p~v|P?q*K+mxdPQASgm~}~60mJKme)Lb>ss+5-K|G^=hGlmcm44hiw1-D?e~JQ z=_bjRwHT*YN7I|0Fq-M;`rT)+fyE<}3ECoTQblM8m}Svm#VEkeRnw~SUj;DCXAl8k zdZdx@2#Q7h{TQXYO7ki2T;E)%}LCo5%soKWOfxg|oSxVM#2_<{oE81wl!+ z(?2yYnB^bpDyLF zJFH>?(Ak5eLmVPWe}-)eQL(R{Q2r2LV!i~qsV15~TIsfy2!IXe*kw{_aqVTtJZcl9 z_uUym)@>Yej+Zr>C!u18EcSsR!xJ^gxEmwS#X^zl;Kr2h(1sz)9|O${G)W0*Oo)eJ zJ{gqN-=<{YX6y*@c2699dY6be{b)?$)tYQBR)2$YBtZWX(+mRt35cUX^W9i-OhadqM8PPCMhm%Rtj|43xtP0;xm0->ji@5N z{hfj+Kh^PzyR*O?p6fg5%%Po6sqF#cUS9~bJ^u2T5mDlp%8Z^-dKaCEnw44I&7P*r zgh5O}!b~El6c^h|uc5j^wAg|o2_768SiTM()zlZAtXShxcz&+|@_wq~KrKK=1 zI2$BkKA;S`RRw443SpzP%H@HG#V}Y4E?nqHo3=`m0NKXC#}IV9>J8|QJNcu;Lt5eeH{sKtI#$7;7%S`Vzo?Pu z*J)tc_m9pfZXl!gsSxuo7=36|?p3PmM07DjMHd8(jV1({04pE^`8wi93tNN47Kvv zbA2AfqzKk38eT&;XdlREgf|0s?d;lnR2~{R{7!9)7vcVOxrcrAlUg}3<1(@a zgIekkfzVn))P#->?ikHjB8;ulWp|^+@9X*J+riWkeNSiGU3cf(^~uK%?e8)LR9SNn zRbbv&Jf8&iJdjxOKwxwbSw$YH#tD8FOgtZ)og78cWfObhVBIrdv7!D2H43R-E^}+~ zwgI0g<@cfyy~#%+gDcr;Xx}vWa1wEQA>08CW)Ps#dbCJ20ZM~f@E3=Pxm3Em`Ff!u z-UP-uex(8|H&e`eyZUxRMqc1OS-bh(?cf*4^Jw2RuDs11ldbmk3mhMJ_3((N3FaM5 zP-tdf<+2>DdSq9f0yYNm=_Q&-n%2xGF;tLYWl;l1l4?HNw#2_t*uh(jN8#2p(Ay`v zH_E_SOSLw~0%`P%)Gr7ww$-3KK}Tz=Tp_R;++sU+5yo)7`bi1*|G?{g;_El;(;Z!> z7BG%vLxeTPkH|a=Fj^rt`um@PV}p#|0teX#fFF)m)Wh zwVdSs9_*ePhbibm_^O?FX^MT}%S1k!ZztpD@j2Ap0dr&xIZGumP2-Jm4KgfyHzIqGPV8 z3=SeNork%>Gf43|1GoH!vNlvNY0~nyN-Cl^3YI&0(ZhtGg%cskrCf!y`F$)^WD9zK zcsVEMvNk6QDRBQ;duT1?+*T|Pbf?WhVk9uKg*8$z6K`uMRn9}N1gku*JJtt|So_~qWZ;O`L! z9UDCQFR1a;6n2B^S@Yb0kI`tp;!(wlirRIQl?I2jYATx>TpW;5j?8!C6u5kLSyI2h zbcA-jyW%A0yN}m-_UA~e@q}Q7MT_wZgXs%rlmw9Y#JURr-VFBPOnL2XXUd*G zD1%oqtLr)4%aDmCA1}q6373k$$c9;A#&6ugjJw}hA)*Rj9KMZy4i7U!bvbxdq8OQ& zML8Q@=7wI*7P^HsW=RJ6Em<{a(kj-h5V*FJHGzh?%vcU_VHQ-ekGno=4cqIm_YoW9A1BcXE)dy2X7M;hSb2@IU$v12ceQ zK`|zJvTE?+76xUYOYEvZk(bS`nwc7{u7CofCF+YN11*rx!tT)HFS-v>W_x?422=A^ zB!uW$^RE-6oqyUPHN1N=dCV@9QI5SStL&y;lLI1p+FO=ZN!)zI{LD zV6tw9+qQD{6a#KA@K{~##%PvRy{Va}-ETb>e)r=zbA2*wZRh8;4E0DdwjRCJzWw_& zDJt`wP775;Hcx=(N#oe~4zJqcrhlpB81=ph2uQoXq&&+eWmd(7L&9Vb^$(V5=j2S? zZd^Tw#G5r%nf&c?+f0Rq9eNsB&uKu2NAl8(#y-s z7k2eqJ?T>CX;KL2HU=IiN62(FXj`0_JcjmHGnGjV#cKj8-TwU@_mz`6RPR1V#i?{5 zs4fXSW>`b_8f=x;_?JMZylz+muIe7es#02ed617Uru+B7oAyW+8itdm-9P zTBR7(*U6j_OgEg--jg1OHY;lcnV@EA3J|jzP{B`ibC@&OgEKy#g?M;mMv8N>g$`lBNgDWc>5uPnB*Gv3yTM)=i)`bNdl9Y?tF#CzWcV#sdV-1v2h5Ed9RzWVG(Gw~yAjGek zhOCDpxPV|<_$FlydQPS`$`57x_;o++We`s#w}8sO|0-Y~`#G76@+~KC&EB-U$r_%G z(E3~#R~KcU(M}`MR;)x=)*`}1HQ%vHaFasn$C!%2g{_^bHuFgd|L1H3`43nJf%3;t z-qIDOgCqpf)TRg0@O-@Sbj9M*OvyngU-u*+4VEJpm-#)cPFEuQDAAxH1XUN>ew0zX-#(J@ev7=dFXUW@e3`J1geDz$=rdp zqSyh!y>7rw^$75)m4`2iq74wZ0cWoCDGdhoWiWq!E6yd4+3xZXG~c4Ky#(K~w7^AoHjg?DPQQA=tuqrR>C zXW;0JzT{&@`ec1%Os%VQ9f;?IFwgKVc#_qHL}~n91o}k{`~bf}ha4sv`ANw6yh=xx zt;_dj&+GN+qgd7e-_zNR!`F4_nKvy~vmZ;3rJb{qfEl$;5Mk@Kx0c+Z03pdcXAN`A ztB+02bIk*nbN_Ez4<%x9O5La&<13RcbAqB8RqPbO7ySI>dMO0~pzeXcy zz&^IKr2OlV7HoZm+#9^0$t(9lh}RNmj*|*g)cq8yfI17OhTk;I664SSC2zd=hUtcI zA1(0v(?nR!MoP&j;L1zv7DHj$;WU)K`X#+jlqQb=~0{OBoya7TG*jybMu%s{!=yEdlflwhOMwv4?%U`c;=N zoxOfukE)LNo3#5^-VEUuvK1a|`{1E7BlHVq!Z?K-@w;?@?szjI0p}&tJBG^~Uv&&O zcf)4Gp6~;mF0xoyq7yo5-5Qyd{_wl3yq!xGo-}^kRlj2GvVYNRJ$kM{X`l0#PDO;t zVRIlGG>fYnz>bQns93iQX<7nw6P=MI6VcE;){xVlzw5Id1G-|YIetuF}O~N^mqyW zrc`w#$6*Z;=KOeRxo>x6Czd7BIyj^|YiWb{a_!Ar#obV2Zl8k|tk6%p)3nXhf@dEt zN5WITD}7J8#xk2Qd@rG=!EU!eB2(+UD)Fodo=87?Wgo83Luz-Sj(Ku+FN^>4?x$Tf zqh+WFdfo#VaTj$dMBQOyLkyZ+=a;r?v(EN{o2G6Rb{l4iTU5qdn5uP*wtJaI7~Kz} z;4XuR9n*W(l-;8*A!&WX=dvBQ)(8_)vdaPnAMsOVs!#YoeX2hORz9|d9G&0w2kqZG z-e2kv|7T!Tmk1i_mBTh1K;Me_4e*h zX6L>o3oTE%01Q=Qpw~(~jA&YQXEJ?~a+{3S5NDRUY~T2y&Ef68)Q-wG-BC(ecq(}h z{_pLQF^EF@ZJz-^{@%&{Q@fn)?DefpTurR$oZX!#=q0A8N9pAbfc_Ht{o^*z%A14s zf0v&gf2W1<{}Y{(sIa_}h|m0lW4-`D@QzP5C``*?{U}hd7>Fn+Xnh`tVw-E^rlR|6 zUGaF3rm^03#j8AS&>dQV_GdPUsCFDl`gV4kE*wxCKgkdSd#lQk@>efez-ADQ#-wBW zSc@vKnsg@wKkdz~a;s4*`+b6v^SQXxpfvo-i{U`PCD1_gx zGUc-2@O5T!1poZOES`A6fiA4<5rk}DS0L49*|_csOeL&)8G=F^Ex5GubP3Zwh9Xju zUtXc$NT(bEXvdU9+!FZHdK~`%X?go9V1y9n2b zRCmI|yiTjBo6f3cnBD3U#Ovi~JJVffI~}9SEl$}79g%(eWn+e-Yel_;uB4e~VR>iu zZ+pwZ)8gF|8!O?jnkZjnaow1)0H&Dhq{n65>G7Q$HJK86?|LS7Hdr>VAdAQhedrV9 z>U8rSi?z8l9Q0<&$5vCmIfLINPw`jl@1)Ot1|kI|a(#JLOX2MY6Z8L2Z2}#_YtYm= zbSNzOE|i0Que3HndU5JfO&xQwiFx7XWi{Pd)yH7x_xk|>1VRD+|7Xd6KPCUQH3s}~ z`ln?1Um5->S^nR60D#5-mcKImPtN?W=zmI+{E7DcomT(f|NozC$zRd`RAc=UUHrSe z_;2Wcs<{4@?w`t&f6}!h{e|vd#mc|K|L%VO3D3az3;ch)@V}z}X|n$(`sBBp{;zxa zKMM7)ssG>U{w{HU(g{)j2fF_fyuWk(U6%gERT!}LpSb=_r2fwNcWwF?XJY^*&3|a$ Ze=C)OG$_~~0I1)u;ossWMfb<2{|BYo6_)@2 literal 0 HcmV?d00001 diff --git a/dist/openpose-0.1.0.tar.gz b/dist/openpose-0.1.0.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..3f7d5815a143e1748da5c0ea16a09eff7b9b17bf GIT binary patch literal 12356 zcmZ{qb8scVv+rYXY-3|58{6L4wvCNXY}>YNZETz*8z&pv=6UYADU zR`p*~Gu{0miH3uV(1^8w0699DJ2*MInlrL6u`;nRxf#2FUHWY#ZAdm1DwEx}8`kbS0o7XqyLk9lP&5 z_aVD>yx_dT{g>-!@2$Djzp8Pd@yMZkOQCC~ z=agb<_e}c}=nfWYt`d+SP$3P7`JH?>KI>C$72T+0I zgOKT#BF`77;0;6>FFzGCJ&VE*XFn%k5mrXOlSS`8jlQM?{oDIiE+mQ^XXYXj~iLIu2zGesDc>T6plQE`c3tgk%*`=?rB_}h9 z5IzxAos%n2!i>zqqzvDF$jQ3o;>IQZ@8BejH@7Bj!9xgM3-Mp2Fp_?#E%#>@^pj(@4?ijjA(DoRxOl&q~pVPl5-NVj?-9B@W z&dI#z!KonozWvsqQ(4-`Wq<@V7MIGJJ~M-7neKb+!`Y+F zALK#SZ=bG1EUMTX+iCigb6L=Z=QJMWIUr`&a%ppcb&!Sj#P%nezrs>dtrNKo2dpDO zBG9@Uj1;+%K4L78YhMHDhA}ChHqf#d(FfYdYgk^&6viryH5)R4@`bNv^{i*iO8(tI+l*!~#iX&cT)L_jw zwwi#e6AD%}4V#w5)IiQsm~UA&BuYSobD_kU(@FT^ppp)m1O3YjECU?ZDBHVHPR#Pm zo}=tzyxBQ{v}V5PBwptm0gnO&0@(W|tSlnwf)0+Uwl$y=_jg&+eie^%U znQxn+a1&|BGc^L;Jg91yl)NGt21y|!;g0Z%sff~pMWzDBiCqh;M85@oz-HLy&@PU! zILtm^X29*JIAgFw2f{fU%xcJh4sj7-)S;KHI<=(U|7*?l)mWRXEn(@PbVlRIVN#hi zZSz(PKPiBYV3tZY$~F^u0NuAD0t_k3GS3H<6|7~Csu72pXJHKdS%C1u3lkjq``<;o z%0gh@T;Bm20#A2z5$1T@kffit1k$x2(*Y>T)91tGY`tnqhML zQJk@;!N1_85NqXv`GjJF)B2suL zu2q;f0`TakL6ii3{JZ#oAdZJxL z&$8$#3%?y7o+h)Q;|C} z7$r!I1105v15&D-1TwQJGuJ#^iXSdqz6cz+R{k_PfoTE1)3OUBr=fA&6A`?>Wu8#N z2dcyC3^j-dU(7vJ2j_g|6aZ6*iv~*wf4izjd5ufnzfK-U$ANLC)thE~r5rpv^MZZm zIxZF@Bk?n+31WJmwpw~k2WpF-HY|ALXj#7cOEn#!riG3pFyH}X404U7z3irq*27Ot z&AW^a1~v&PPjLjFgfHauW&C7%eViwVh`r_!0r;`%zqf{zsBEr%QSIq8rIZShXC*L% z{@P|ciVG+=#;yAby+f7@weVF(qL2d&P+)$Ujcs+Im zw0k>W8lU&C-b3P?py{5FD<=vQmKs!br!ObzjbWzkj4MNcQHaXm^AQjGm8OV8l$%0} zY>&xW^NjN>R)-%W9+kXO?@S-&7T#K@7ACL?<(x>~>|ue}4H!>TJ76o7r-@fSmx0<- zP%yszTI*A@uIZoS_TB)<9yrp}jr1u)wCSaLCJH_X39y8p08jUymDZD;!{Y`)yAH+H zL=a~C+S)bd&4#VUZ>#UBRUC{WFiojk3Rinvl7$D# zx>t<>kK)O50oNClHJ)j{82(BW);a#U4`PjKG@=oxma4|yLnj(hS?qRL-v zZO5lUy%~yT7@O*5>^Ug;EJ{4$f8h1=<#htJWZxirYlDI3*HtW*vI5h5p<7*pq*pRl z{IpL)ObP^th7)5PB}QWR7+9`#`R*imvk z0xA9g-H(`BSgNRB+Z7WDowJOxcX^n+)C*l|C%jO%Yf9B=&#pnM$=iqEGci0jReF!QcvYwYan z{80$^)98Zw34nwBrC)4I$gu+uoS=xSYg}7bI#A>?c;(~w+kKM(PR687II3YIuI?E1uAwvLwm!J$ znzkxtGoI_EODL52GC-gWLo%I&`%`vcSwm+sFM8Cfd6kWWFDJux(c|VLDJxh#Fuhl4 zgiu%{+I)_jCd~d1l%;2ZMd~^_Epdsm>r%T-MY$*j@13;KGLo3Rb0248AdkU}{Fsc_ zA@tLlO``3FA1cY>E=K3#S4eEvdclk^?5@zw)O^=Il5(qE8qZ@};U~L)<;6D=bM+#y zxuo0%h6%1(5_#%T@vZTsV)Fs_kJ-x8&>z9TuHcx74*1Pqc5&~eQo2v15`HuU6`^vA z$HjH4Tm2(j2VOw-tn|$ojWU$aJ}|Ai`p;=dX0>JX3=gzAWcaszJ;3YH2c+-=ajTi) zZg2{)lNL1tT9fFiN?T0V+M6H|A|)fHIG~0Unu%~zxTJ-*Mo=ma?A2sKTqK(#%Nvpi@)L@yT_fqT9Tel z7fXnGxNx2L_O!&8YE+X0&~nzOyKox2=E!2?XLvSP_a_}Q!vLk}LjqUZa8s+Ibx5AD zKce-mI;7u6jmCLNRfBcREV%DC#|?%J2(v_tuE135@~zA=6X8QgSR`vX$?RjHp}lM> ziMal-^PIet*BA~b%;4O3HAGR=#hF2lF*14#J+mIquWWg{oOtGtjz3aGgDok2!(+Z3 zUqw61gC_aex3F=DSzLgi{=Gb+90`0fJ-H>5q?%Eo;d*jivb9aQO5X%yN7Az;+spZ2 zOnKG91S&!c2HeogmOsI4_cTF8p%U9Bgn74SisD;*Q#b{Sf|Q(_48=87rP{a;h3gQXyOhBIg$Ze`Z-{PuYyJp5N%H$9@J%(5#Z z!x*Ib==bPTnMPu4f}JvTtV>r)2AO=?s4#DvJU+)$l1M^__e?%1T3P%LxdOS(EUx}%)Y`DwHN zbh%V=iJs~FR)!<5=0Mw$G-vs=d4#>1%#z#5o}q7%Ba9dC+$T#e`Q6y*VcPB2#qvW? zzm#Obx0q&#FQPrehIiJM*@_(t$PYfdW{MA=C5w2o&aszgRwQ)2qR-=~`5|`TLfs#8 zFCxqG#JGOARAlZ4|0EgOoAid$js(GV6l|ABS)y?uiHvvUpma%$YtGP7z1>uh`3@gH zeeSx)z$_TW^89JGd>GmB^g6nxcR;0-*>geRC(|c{ngDq>tlCpVoM6hiYNsGuoQr#f zvC=olSa?%KTS*wEMks=LYvCItqh_)fBqeXCQWR@H*>kb=03eLgFJX7s0MiDXF)&gT z@qXgnv&;fv9RT-iq!X1s1`?HO2D{9x@Qgloa$tQ&fEYmB8o!r;aXhked4wA|3hSFB zPvcZ2<`2p6(|GnvQm`1NvHhZ`jNb{~5X*~=TuCP1Btnf#H0k-bG)3ww|B`#!K^D@* z=|5XDU0Yk9BDl2r0kHv`Ek>$Z@fYmtK7gGM9HF_2(q0|YMM zi8{hB_EFW#dCjY~TcabaYb#TJXEDa_ZJMI$=A+W@*9AUD{}inx2`z$mNo`WClv-Cg z&?UV;oYsHWImP)q(a~V1cMi=NdI-0lnkBWBBv)dxE;hea=S@k_wGIYjyvFQH6Gl`a zqqlnn&4OshA}XwYsv>rTGznoE(rA-l49~28iAGvG?;}8bSGn8-6>wFHJjo?RTeXAq z_cnF2Qgt+P>sPH?tkJ@Aoxb9Ap4myG8V0WEt7O#x?uyKA-URsr%W9rZ`o<`!2z6X% zmJZbhad2pyjegb(b$xJCd}l=I-$B@J$9bFOpK{H6N>vmkt(MqH@?ugF3bhMJFfY!I zH5aFr%cC5KCUZ8$tqT~1-*=XepT;!hBM<-7i?B{JEi0#OkIq*&t$2HV#xfgcXZ`3i zNub!0;B2+pf;C-%-hiOG__J)n`)y#fQ-znMDm7(;FirH^RY*@1RV#e*S$ZwO9$qY> zEx{PlY4v4_s+F?CY#PH2XNRxIzQ7%M%sV6kOy~RH*964=A6P9QDRp`zSox>QC7an^G7t>HWd1>5l&80CaIqKb!6r0nIopMt z9iYqYL$iWcOLkCZd2IU9m(pw#tILQ%F&Kmn+1Do;>ml9 z!Xp>HbwRE09;F$n^!p)4=Q?Ve6Rs0&2^zZA%l(0XFjyR%UTTg+DtC_AI+;q&Kc7GR7$0&Dw+|%M#|Ny+rhy zxY(yqbxU&!Z3(wR&HfIuZK0$j8VK&K{xcRSAsNGGt-6!nM*VbZXPj`5VU}jEd5TrW zru~fE@Yd7|D0$zMO_q~7=IN+(mrvuP(tc?7b)Zy_4$KzHk`2f<%y?XuoLJ*IL zfTt50M#U1Guo(O?3*tT9Hfv(D|Halx{UZK5-1YaT;&9jqqyk?FBP$!PbJsnpm^+EI zq*2B}WCWYvV1pz+)Y>TbD6E>aC*uLNy-%od*HR6+-+=e}SCbzyG$RvK1rVVV#41(( zLA)(HSwF+FR%Z@pZ;U7PQ_DFCbWEAh? zPTgGy0OnqVI?i`~0@d}Ew>md1u-^z33p|lFebk6&03b)u7YXQb2lUksItIN9f%b*I zRsf)Pz-zzxmPq%KM%89BLm#7Bn7(=U6`Z-7-a8s!(^kieau8B$WGqJT3#}l>BC@S- z`uGM=CTy)GyePLzamAyB53O|khT;0>&d(4oMSn5# zFZv-l9o~MnE~p^4BsA7KU#Z&C0zM|F?-WnG)x@T?p)hD=T0Td=4Z}5+Lb6ba#Y3LOA+V3iIKf|SkBKsu&O5eAlnZBNc^k6cog(6Ay)zA&!1peQwKjjfg}Gu z-vfMu*~$Sq-e|9!5Iy39cFG?>hs8BL?HbmeSTFiFh}(nBWf$&#v_Pr*sd`x(M=~mP zeEp6Zs77gD)X))T%tpPIDesndRUHv7-RmpuV{(oTuZxgF&A1j7)&MP?9+T+h;K_iD z`3T`z!j^d-9-MI&k!GXQ$|#A$50Ym{GjmA1lYeDz6P=X4 zym#=RSn)38m~5<%ItFg;!fU2sB%EaM2o4fQmYq{OF+Q!G+#yrsV{kr-@KR z-OoIm<@g!6;X5#<)OgsV)2BgURN$qFQbjX=tcX#;zO}!Fzv2-ceM0Ws=Rh(ftMfAP zSb98Mj=Hy|A3b4Vaxpw6!7|D3a|`9yoVrFjl?_GgZ^&nB3p%pLVzgEl zR4_A7V#ksAUGau5CyiWj^LvP&?e8BR(2>vI)awj(ufKG!+o|0BdY3`7uTpKN6+D$1 zrQ&T#^?jw|el)y!SbV-a)PFyYr&o^yb)e?Um@ya`YgJcMY3eG{_l>OU5*d-QX@Dsg z0}gp|+$P8gBf%3!E^lXt2Lzu5Gf}iKWe1QCxN2ur_t}SgSf~P#6)6^*5O)1A;{7no ze?69vZG@3*m6Ua^idoOnlxn09to5a*5GSlq$mm%0^GV5(N?9%z@5IaNO)qWf$VMn? zoQHHOuG|)o2Q4G&9nS(76bZu_=LV|U|i#(WJE7$qk zKYnz|O!(}Jc{%zrT*t;A_TD!3a0#Y8ufYu~ajx9;apT1zqu&Y%_p9AC)u$vfo6~S) zH+5m26q_adqQ~|!7Y>^?BI4Zqg$Q!3zAHOLDvQTtWtGu_b4_;Et<^ z(_^GK@M4O>#N5$PQyN#2C`OIg0jBsDEX&(3FjFU3mN3o14#}S7pjWj}=T;Bea7`My zh+AcJlg+>o++U<*qeRjX&anSyONkrrNJn;>=|@Jz=3-qN*Gafy%^}|}+Gx(JjEkcl zs;jzGfz9!Rd8r=#^v`NWKRsg-sYN3{VG?OXBUf56BF4}m#t1O5R57qTH4IcV4Xh49 z6#x4rqWoXSK+#&68ESJZo|cQ^9X6Ew#HH90q7QucS)2NF9wy=d-_`pRQD&K_WMzUf zqge?_n+4SgKX^A|b#y=OeQ(4zO(7SQcQIUN$TsRpbf`DhLzuYZh13px;Whk2FZlub zTb{5X!%}_0wai2B=gB+egI{2uVThjS4$eP04Az5RWM4nYXF-TR={!nQg4_u#gkR%) zLy$k|0?Jb{N!mOL^qVo73;d^20m+fDOngSbYFaA#wdQH`ZI49z6UNrIm>d1h{AI6b`@b^aHinxI z$$V{(Q2X=y=JtN_o4130F<=TmIMV+df3(&X(@#V5CD%reP_sK-t9v5pxhC82;FseGO9c&W@jY7fYrIxPX?38;^N@x$Nw4_($>W+7^Q@q^ z`P0Hw6GVV_W5ncIQ&PE?srn8;6|vh)TafWsSz!3SDZdY?E5G zVHu_d?iLUsSc8sBt3uwz*h5%&;44LA5o;>3jz!XSzL}RtqZ-=t(W+_+S^EQDt#>b} zD46+8P>KC)BmyRP)VJju#mHvD_OFk8VSwYrfqp|wGcpSxNXz3GGz z4Iv84X)&T0=1-+KM$CWrQr2|4_SIde7W|W}MfaH!h6A5)Bg>wx~+TX_fvQruS zZL`!ed~&RzG-l((g84)3BG!3+uSvre z4)8n!F_^BvYVAtpXDyuHETbr0ZtXcnrrDK2&?dFCT*24{kSxaGE;wJB>Vc)X^h=XL zJXB_8T-~Ygzoj?DM9R0DHq!TIo^rMU&N#<>;H5|SZ z`jVPM86gFJHK(yc3HYMf`wIRm_LVh7o0c--u z6l0ocpPcs{4DrXU)hJ$LMRKOrol3s1>E@;EPBb!R=JKr1w~}Fu#F_OlM9vBvKoGam z5y$%lNO@_UCN}+e7Ur90A#KfKE(`55m7+Unm3+JH;Fx81Bq2W_Csw1L{cB;`<2N{_ zwSqY~+^9+BT#2itN~ltgSVNnIE%|ieVycT))!Vn42hufgeIn3v_9#A|8K(7{yB!WM zNetBVRenKm=^jFKpqkizC>AZF`N_>f;p3+^K5dzLD$h>x?M?#R;0x<3u^yiM^{pZy z(_8@mV4IFxf*m+0Q!Q7Y6n20GH;rtu>T~rp%>OLO8e}ey++!>~3hu!Q=5m-2zz}g; z81IO!0tIQ%;G1HH<_&34rs8CdG{@2$oSFO$4r#u|;c!NZuji;@Ck-O#8F!E-IB@rl zh5uA~#_(u#Va$nQU;MBSR(JDRsq>+xm;wUAN$hKS&nQW<3sp$CQd5+!xALw5ccjaW z@3-PgDzhQSTXN4#WOm(~Dy_{_w%&F&Pz2+ONt(IxOp*M@f}-bd44<0-JX}*yth8@3 z6|n!XKzE*uJOOXJz~WY7z^einjh9)M2)kwBl~AfnEnwMRphQC>zv*lzbTCRUtuB+I z;aI{@>o00t^kXeREKJkVGD`#d`=HK`J?|lhZbAUb!nCpAFouY)Ttw5C9reS3S(|!a zo8lASKw#&KRF=dI6$N}jb>YAgb&- zZaHUUaCBLi)4kML3%!fQAvS`G0-}l33NP>YAQb*jEuqQg;8u0TX7}2G@%ThZdVX)0 z%M<>%b}fIjdwt%Bi)%(5b+T4FZ}{ow-~op`R(FO}Qm1VBmP;o+OX*!J>qV>SQL#hzCU$Fm$bf z7g;I;ljoo8Hp39e*67+)bS?a595O5OylN;tuOFlebmL*)bHgOE89Q6d3p2EkZ4jB_ zHWlrDwS@#CiuI(}zIrtEh6H&kKahA?Dp7GtsM`nQiHDKE^6Y`*cYGIEzIkHv#iX+y zLtB}Sd*SBrQ5--I2UWj$BKmIbmg8!hFA-_(e>#e={F1CoFi?fliI!KFf^(5;Y7_hG z7WzHzGkH@*iw4pjuZ}vA3m#k5-Rxcrtq_Hpa$!;f-yVbch2H*l-lpo>)@f%d{)MGym~XqW!Op7Sgu7SKCMw_eQ?q;lL#8Ji$tMO8MUmlB*YGS zo_VF}%WGy`*%3jNChw-F&F^fae;JA*nc0UMf}KY<1vdxk!?V-i{Ap?yurG6*?b7>4 zXT?x=a{S(X?+#?>VVC2JS7_+rryJv z?2e#4oA7%{T4Kt%mnL)s)#iitEFyl56!$~PG8T`c#G|0#c(lYc&S`{~GxuNj?EQ{; zSGMELRds~^23e5nIBk72o6o!b-~?U9gHq+3cam=@o{3|{zC)dxN?C=KE<&5s{6MUd zYT5>WQzv-x{Yt*~+tpZ*e*_UCvqt)eHR+KY31w+`CvecaoB$dytTI;#6=rqyc)$ez z-4Wu9&!dPT3e5U2Qt80{HI17yN<=bwxsV0tMpWD@o3w?sKY>RBA)PAo>Zcw6jvfy9 z5QLhif8F$BC?^{|ydO-6d?rTLi{Vc@`JhFi!E9`LJq_pZ6{v<>UIYRSlXY*e~=+ zXLvaXbM8oWLmm{!ns8lc(eyt6H1QL9bl18Yn%u0=Ml~1@`zk4V^D$<%WRSb%O3Emq zR6*+DS(F|XU~S$TqR%0Dm_y4leOfA9j0U2SC$kvQ(i;fVAL^JfaD&0y+<`f&p+PAo zl|OY7-yo87K&P^L-E?)~Z!--7OSU&6CeRdCgnxtZhA_ic*s`x;aDMRCZ0Ak54c@x5 z!M2q`wU6+VSw?0LP|xE`sJT#JLh)q->UMS6ici*!kb5$R+-`xJRm)F>zi)7(my$FF zg9Q37mt9GKe56w&BN!e#4vL{f8mp;vOCDfezXzX1O3JiUj&&d;xc}G~r)K8DhKEPA zdm|H4T|DqQ@uWe#R)218PED)C+w<`m^M_gq>>elKjb-fBEWQ$2-0&EvhvDUczMVm$ zN7%wx9G7SyBWD)Q)Du{xb^&LURWngb3*-dy=0y47cc41Z(L6=iGj8{6(eT6$N1m~- zcloFm&eQHa$Q?B&C~~-e&BxoMDQ8Qp63Ye&&23nW;SKkUz%;2+Rw4;1Kx$}+zdi3`L;a3yl%)LrZ)Yv|$Xnr?4zNMXRBhUW`JcYnq|x>$CTrT6!z z;UzwhPnEZWg@t`}hfZ{zmZaFlTZ2;-(<7j-oVl2(p6DCzhsC573<&>`V89F zx&gbNP9W5GA)n8)FU8_D(5uW7K#=YV#E(BX>HQ)N*!0=FfcP}M1}(jqEHrL){_=$V z4~N&l^~DwU`u(fDaPbrH+4qG<@Z()5`x6xV1uA(5DTBD)|Fx(M=Rx&-Urnx0m(kaP z79ap%=L^tVjp9T3S&x$SGM9hkH9>*F%zyGR5Iq01X=-8Bb;k&!{IlNczj1b^5ht^i z$C8_|yf2sM8N1YPL#cRrK!3NxuTL{6N9xeh?>@elsn<7HOIYc$zY&~LF!8HN-V&>! z&nGf57I9i(QS$a+IrPBx{nRh8{Iz}Zw!t3%26kMc;55N2+zCb}Xmk2!d3azBWx%4< z1fGTr<|asI=RsGU#>F;iiUE}oeeHFg&VX)(A?<=S8aCD!u{e;w8%@op&0Ss7{1j8B zoOFHn2h+kkj=fB;#w#I~75Lo(uULsCTW(fX>Bl)JF3fAB1b?s>O}aNY3$^0y^_g!# zgE>o+a3%f+m%}DLYmoLYgUX9#y1)uJL}Mq4#Vf!Y=BOGxtfx8M>Vfd3A!6kVqV0bR zyfEAO+056sp6XaWXlZKC$I_qwa^J*D`s;mL!s`K*u+Yi}VHm`P~9ly6<00oPRSJOkQJ>4Fpc z&#p1vMBP8!%C-=7=)wF0$dRU$l@?jEUpdZFg%If*+ z5VMt1EX>fkAO7PzxH@N!LJH;z8&Lq|ixL_*?vFYCXHpX|I4&SLSk8es3GC*x6}>#N zL;z&u72O{t>m5tOJGf9n(}1F@;^Fs;X=s+Xq1o4W?uN)bCFP%iL-W=agY|hH!yVmi zkOipGEqI26)O=TgE}R%xybk6r38X*4#OwGG?(l8Zj*~))N(h7*rv=7tIU`GY;xE@I zTLn#`aZv;qMOs<0B!KOo8eQ2C)C@hxI>`k;n6(HU(q_qecUfO(-_D9tlZ_w2#1dX) zBMJ7(bio)DL)HYP^Jq3{)0T?tzpuT?7f}Tn0+m!!VC~GH^H3mJG0J8viYLUADxADL zTXIMHZUx=q|D6BQy}p@9E*I+SKw`AK-I~gurT%TS#kb?@_ci)4=yH>R8jzdShIPG{ zlkiqnty~r0U6Id|e*Q9e<=yOPnVwzwE+FK+qOEipdb|r`L*KB{$&+Bfktiihvy1u$ zft?C=Q@{IG18M@buSAcYl-HS@=c=GjSAl>aIqBk{N~Nasz< zZ&E_LRL}^!pkn+SM{P21;!%u2!|x;z48`wQhK$lEpfqYYEZeN2ad?##gbI~^ND3LK zf~qxMB_(~bd|MWo_5Z14EsLUT{O@7zq#NNkD}MDoyqAyyBbZuYl?TY)y>k-2y0?^CVM7#wiPXRbpz)p^S)?RGl?iKmKm{es9U5*BofNYjicbQaiOc? zdsZ>CfuEFK+-HpBHDPl5=>g)b7(2LEPb3u=Hma0zTvK7*5>rDGN~V1?S%GE z%cCF;%3lkXR>xh0K8=c8gt#=1D7ALwRxo>Q4F=)f*WDBv zVgyS{JqaiNzu?=>3y1&&xc_K=0WtTze|Lf2^vt%3j}pL3tZ{u3a_2XBRse8syQ zA;8_mp{u<)Z37qvJu~B-Mx2V1DOF;(GmrN8K&mVynnKN>qtYL~Fu4hVfTt0pcUG2z8uhQ>yQ7y zZ^ag0AWTK}rR+z*e^uN6V?f)lAPP-^m06It*VpxK+Q2_e$mb5y>ydTd|2P3N#~?_3 MWou(FVQ{ej1rje)a{vGU literal 0 HcmV?d00001 diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..bee4b57d --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,11 @@ +[build-system] +requires = ["setuptools>=61.0", "wheel"] +build-backend = "setuptools.build_meta" + +[project] +name = "openpose" +readme = "README.md" +version = "0.1.0" + +[tool.setuptools.dynamic] +dependencies = {file = ["requirements.txt"]} \ No newline at end of file diff --git a/src/openpose.egg-info/PKG-INFO b/src/openpose.egg-info/PKG-INFO new file mode 100644 index 00000000..fb1ee9d7 --- /dev/null +++ b/src/openpose.egg-info/PKG-INFO @@ -0,0 +1,124 @@ +Metadata-Version: 2.1 +Name: openpose +Version: 0.1.0 +Description-Content-Type: text/markdown + +## pytorch-openpose + +pytorch implementation of [openpose](https://github.com/CMU-Perceptual-Computing-Lab/openpose) including **Body and Hand Pose Estimation**, and the pytorch model is directly converted from [openpose](https://github.com/CMU-Perceptual-Computing-Lab/openpose) caffemodel by [caffemodel2pytorch](https://github.com/vadimkantorov/caffemodel2pytorch). You could implement face keypoint detection in the same way if you are interested in. Pay attention to that the face keypoint detector was trained using the procedure described in [Simon et al. 2017] for hands. + +openpose detects hand by the result of body pose estimation, please refer to the code of [handDetector.cpp](https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/src/openpose/hand/handDetector.cpp). +In the paper, it states as: +``` +This is an important detail: to use the keypoint detector in any practical situation, +we need a way to generate this bounding box. +We directly use the body pose estimation models from [29] and [4], +and use the wrist and elbow position to approximate the hand location, +assuming the hand extends 0.15 times the length of the forearm in the same direction. +``` + +If anybody wants a pure python wrapper, please refer to my [pytorch implementation](https://github.com/Hzzone/pytorch-openpose) of openpose, maybe it helps you to implement a standalone hand keypoint detector. + +Don't be mean to star this repo if it helps your research. + +### Getting Started + +#### Install Requriements + +Create a python 3.7 environement, eg: + + conda create -n pytorch-openpose python=3.7 + conda activate pytorch-openpose + +Install pytorch by following the quick start guide here (use pip) https://download.pytorch.org/whl/torch_stable.html + +Install other requirements with pip + + pip install -r requirements.txt + +#### Download the Models + +* [dropbox](https://www.dropbox.com/sh/7xbup2qsn7vvjxo/AABWFksdlgOMXR_r5v3RwKRYa?dl=0) +* [baiduyun](https://pan.baidu.com/s/1IlkvuSi0ocNckwbnUe7j-g) +* [google drive](https://drive.google.com/drive/folders/1JsvI4M4ZTg98fmnCZLFM-3TeovnCRElG?usp=sharing) + +`*.pth` files are pytorch model, you could also download caffemodel file if you want to use caffe as backend. + +Download the pytorch models and put them in a directory named `model` in the project root directory + +#### Run the Demo + +Run: + + python demo_camera.py + +to run a demo with a feed from your webcam or run + + python demo.py + +to use a image from the images folder or run + + python demo_video.py + +to process a video file (requires [ffmpeg-python][ffmpeg]). + +[ffmpeg]: https://pypi.org/project/ffmpeg-python/ + +### Todo list +- [x] convert caffemodel to pytorch. +- [x] Body Pose Estimation. +- [x] Hand Pose Estimation. +- [ ] Performance test. +- [ ] Speed up. + +### Demo +#### Skeleton + +![](images/skeleton.jpg) +#### Body Pose Estimation + +![](images/body_preview.jpg) + +#### Hand Pose Estimation +![](images/hand_preview.png) + +#### Body + Hand +![](images/demo_preview.png) + +#### Video Body + +![](images/kc-e129SBb4-sample.processed.gif) + +Attribution: [this video](https://www.youtube.com/watch?v=kc-e129SBb4). + +#### Video Hand + +![](images/yOAmYSW3WyU-sample.small.processed.gif) + +Attribution: [this video](https://www.youtube.com/watch?v=yOAmYSW3WyU). + +### Citation +Please cite these papers in your publications if it helps your research (the face keypoint detector was trained using the procedure described in [Simon et al. 2017] for hands): + +``` +@inproceedings{cao2017realtime, + author = {Zhe Cao and Tomas Simon and Shih-En Wei and Yaser Sheikh}, + booktitle = {CVPR}, + title = {Realtime Multi-Person 2D Pose Estimation using Part Affinity Fields}, + year = {2017} +} + +@inproceedings{simon2017hand, + author = {Tomas Simon and Hanbyul Joo and Iain Matthews and Yaser Sheikh}, + booktitle = {CVPR}, + title = {Hand Keypoint Detection in Single Images using Multiview Bootstrapping}, + year = {2017} +} + +@inproceedings{wei2016cpm, + author = {Shih-En Wei and Varun Ramakrishna and Takeo Kanade and Yaser Sheikh}, + booktitle = {CVPR}, + title = {Convolutional pose machines}, + year = {2016} +} +``` diff --git a/src/openpose.egg-info/SOURCES.txt b/src/openpose.egg-info/SOURCES.txt new file mode 100644 index 00000000..0150847e --- /dev/null +++ b/src/openpose.egg-info/SOURCES.txt @@ -0,0 +1,12 @@ +README.md +pyproject.toml +src/openpose/__init__.py +src/openpose/body.py +src/openpose/hand.py +src/openpose/hand_model_outputsize.py +src/openpose/model.py +src/openpose/util.py +src/openpose.egg-info/PKG-INFO +src/openpose.egg-info/SOURCES.txt +src/openpose.egg-info/dependency_links.txt +src/openpose.egg-info/top_level.txt \ No newline at end of file diff --git a/src/openpose.egg-info/dependency_links.txt b/src/openpose.egg-info/dependency_links.txt new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/src/openpose.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/src/openpose.egg-info/top_level.txt b/src/openpose.egg-info/top_level.txt new file mode 100644 index 00000000..33802247 --- /dev/null +++ b/src/openpose.egg-info/top_level.txt @@ -0,0 +1 @@ +openpose diff --git a/src/openpose/__init__.py b/src/openpose/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/openpose/body.py b/src/openpose/body.py new file mode 100644 index 00000000..df53d82f --- /dev/null +++ b/src/openpose/body.py @@ -0,0 +1,218 @@ +import cv2 +import numpy as np +import math +import time +from scipy.ndimage.filters import gaussian_filter +import matplotlib.pyplot as plt +import matplotlib +import torch +from torchvision import transforms + +from openpose import util +from openpose.model import bodypose_model + +class Body(object): + def __init__(self, model_path): + self.model = bodypose_model() + if torch.cuda.is_available(): + self.model = self.model.cuda() + model_dict = util.transfer(self.model, torch.load(model_path)) + self.model.load_state_dict(model_dict) + self.model.eval() + + def __call__(self, oriImg): + # scale_search = [0.5, 1.0, 1.5, 2.0] + scale_search = [0.5] + boxsize = 368 + stride = 8 + padValue = 128 + thre1 = 0.1 + thre2 = 0.05 + multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search] + heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 19)) + paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38)) + + for m in range(len(multiplier)): + scale = multiplier[m] + imageToTest = cv2.resize(oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC) + imageToTest_padded, pad = util.padRightDownCorner(imageToTest, stride, padValue) + im = np.transpose(np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5 + im = np.ascontiguousarray(im) + + data = torch.from_numpy(im).float() + if torch.cuda.is_available(): + data = data.cuda() + # data = data.permute([2, 0, 1]).unsqueeze(0).float() + with torch.no_grad(): + Mconv7_stage6_L1, Mconv7_stage6_L2 = self.model(data) + Mconv7_stage6_L1 = Mconv7_stage6_L1.cpu().numpy() + Mconv7_stage6_L2 = Mconv7_stage6_L2.cpu().numpy() + + # extract outputs, resize, and remove padding + # heatmap = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[1]].data), (1, 2, 0)) # output 1 is heatmaps + heatmap = np.transpose(np.squeeze(Mconv7_stage6_L2), (1, 2, 0)) # output 1 is heatmaps + heatmap = cv2.resize(heatmap, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC) + heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :] + heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC) + + # paf = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[0]].data), (1, 2, 0)) # output 0 is PAFs + paf = np.transpose(np.squeeze(Mconv7_stage6_L1), (1, 2, 0)) # output 0 is PAFs + paf = cv2.resize(paf, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC) + paf = paf[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :] + paf = cv2.resize(paf, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC) + + heatmap_avg += heatmap_avg + heatmap / len(multiplier) + paf_avg += + paf / len(multiplier) + + all_peaks = [] + peak_counter = 0 + + for part in range(18): + map_ori = heatmap_avg[:, :, part] + one_heatmap = gaussian_filter(map_ori, sigma=3) + + map_left = np.zeros(one_heatmap.shape) + map_left[1:, :] = one_heatmap[:-1, :] + map_right = np.zeros(one_heatmap.shape) + map_right[:-1, :] = one_heatmap[1:, :] + map_up = np.zeros(one_heatmap.shape) + map_up[:, 1:] = one_heatmap[:, :-1] + map_down = np.zeros(one_heatmap.shape) + map_down[:, :-1] = one_heatmap[:, 1:] + + peaks_binary = np.logical_and.reduce( + (one_heatmap >= map_left, one_heatmap >= map_right, one_heatmap >= map_up, one_heatmap >= map_down, one_heatmap > thre1)) + peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse + peaks_with_score = [x + (map_ori[x[1], x[0]],) for x in peaks] + peak_id = range(peak_counter, peak_counter + len(peaks)) + peaks_with_score_and_id = [peaks_with_score[i] + (peak_id[i],) for i in range(len(peak_id))] + + all_peaks.append(peaks_with_score_and_id) + peak_counter += len(peaks) + + # find connection in the specified sequence, center 29 is in the position 15 + limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \ + [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \ + [1, 16], [16, 18], [3, 17], [6, 18]] + # the middle joints heatmap correpondence + mapIdx = [[31, 32], [39, 40], [33, 34], [35, 36], [41, 42], [43, 44], [19, 20], [21, 22], \ + [23, 24], [25, 26], [27, 28], [29, 30], [47, 48], [49, 50], [53, 54], [51, 52], \ + [55, 56], [37, 38], [45, 46]] + + connection_all = [] + special_k = [] + mid_num = 10 + + for k in range(len(mapIdx)): + score_mid = paf_avg[:, :, [x - 19 for x in mapIdx[k]]] + candA = all_peaks[limbSeq[k][0] - 1] + candB = all_peaks[limbSeq[k][1] - 1] + nA = len(candA) + nB = len(candB) + indexA, indexB = limbSeq[k] + if (nA != 0 and nB != 0): + connection_candidate = [] + for i in range(nA): + for j in range(nB): + vec = np.subtract(candB[j][:2], candA[i][:2]) + norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1]) + norm = max(0.001, norm) + vec = np.divide(vec, norm) + + startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), \ + np.linspace(candA[i][1], candB[j][1], num=mid_num))) + + vec_x = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] \ + for I in range(len(startend))]) + vec_y = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] \ + for I in range(len(startend))]) + + score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1]) + score_with_dist_prior = sum(score_midpts) / len(score_midpts) + min( + 0.5 * oriImg.shape[0] / norm - 1, 0) + criterion1 = len(np.nonzero(score_midpts > thre2)[0]) > 0.8 * len(score_midpts) + criterion2 = score_with_dist_prior > 0 + if criterion1 and criterion2: + connection_candidate.append( + [i, j, score_with_dist_prior, score_with_dist_prior + candA[i][2] + candB[j][2]]) + + connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True) + connection = np.zeros((0, 5)) + for c in range(len(connection_candidate)): + i, j, s = connection_candidate[c][0:3] + if (i not in connection[:, 3] and j not in connection[:, 4]): + connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]]) + if (len(connection) >= min(nA, nB)): + break + + connection_all.append(connection) + else: + special_k.append(k) + connection_all.append([]) + + # last number in each row is the total parts number of that person + # the second last number in each row is the score of the overall configuration + subset = -1 * np.ones((0, 20)) + candidate = np.array([item for sublist in all_peaks for item in sublist]) + + for k in range(len(mapIdx)): + if k not in special_k: + partAs = connection_all[k][:, 0] + partBs = connection_all[k][:, 1] + indexA, indexB = np.array(limbSeq[k]) - 1 + + for i in range(len(connection_all[k])): # = 1:size(temp,1) + found = 0 + subset_idx = [-1, -1] + for j in range(len(subset)): # 1:size(subset,1): + if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]: + subset_idx[found] = j + found += 1 + + if found == 1: + j = subset_idx[0] + if subset[j][indexB] != partBs[i]: + subset[j][indexB] = partBs[i] + subset[j][-1] += 1 + subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2] + elif found == 2: # if found 2 and disjoint, merge them + j1, j2 = subset_idx + membership = ((subset[j1] >= 0).astype(int) + (subset[j2] >= 0).astype(int))[:-2] + if len(np.nonzero(membership == 2)[0]) == 0: # merge + subset[j1][:-2] += (subset[j2][:-2] + 1) + subset[j1][-2:] += subset[j2][-2:] + subset[j1][-2] += connection_all[k][i][2] + subset = np.delete(subset, j2, 0) + else: # as like found == 1 + subset[j1][indexB] = partBs[i] + subset[j1][-1] += 1 + subset[j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2] + + # if find no partA in the subset, create a new subset + elif not found and k < 17: + row = -1 * np.ones(20) + row[indexA] = partAs[i] + row[indexB] = partBs[i] + row[-1] = 2 + row[-2] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + connection_all[k][i][2] + subset = np.vstack([subset, row]) + # delete some rows of subset which has few parts occur + deleteIdx = [] + for i in range(len(subset)): + if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4: + deleteIdx.append(i) + subset = np.delete(subset, deleteIdx, axis=0) + + # subset: n*20 array, 0-17 is the index in candidate, 18 is the total score, 19 is the total parts + # candidate: x, y, score, id + return candidate, subset + +if __name__ == "__main__": + body_estimation = Body('../model/body_pose_model.pth') + + test_image = '../images/ski.jpg' + oriImg = cv2.imread(test_image) # B,G,R order + candidate, subset = body_estimation(oriImg) + canvas = util.draw_bodypose(oriImg, candidate, subset) + plt.imshow(canvas[:, :, [2, 1, 0]]) + plt.show() diff --git a/src/openpose/hand.py b/src/openpose/hand.py new file mode 100644 index 00000000..c5773b8f --- /dev/null +++ b/src/openpose/hand.py @@ -0,0 +1,85 @@ +import cv2 +import json +import numpy as np +import math +import time +from scipy.ndimage.filters import gaussian_filter +import matplotlib.pyplot as plt +import matplotlib +import torch +from skimage.measure import label + +from openpose.model import handpose_model +from openpose import util + +class Hand(object): + def __init__(self, model_path): + self.model = handpose_model() + if torch.cuda.is_available(): + self.model = self.model.cuda() + model_dict = util.transfer(self.model, torch.load(model_path)) + self.model.load_state_dict(model_dict) + self.model.eval() + + def __call__(self, oriImg): + scale_search = [0.5, 1.0, 1.5, 2.0] + # scale_search = [0.5] + boxsize = 368 + stride = 8 + padValue = 128 + thre = 0.05 + multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search] + heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 22)) + # paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38)) + + for m in range(len(multiplier)): + scale = multiplier[m] + imageToTest = cv2.resize(oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC) + imageToTest_padded, pad = util.padRightDownCorner(imageToTest, stride, padValue) + im = np.transpose(np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5 + im = np.ascontiguousarray(im) + + data = torch.from_numpy(im).float() + if torch.cuda.is_available(): + data = data.cuda() + # data = data.permute([2, 0, 1]).unsqueeze(0).float() + with torch.no_grad(): + output = self.model(data).cpu().numpy() + # output = self.model(data).numpy()q + + # extract outputs, resize, and remove padding + heatmap = np.transpose(np.squeeze(output), (1, 2, 0)) # output 1 is heatmaps + heatmap = cv2.resize(heatmap, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC) + heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :] + heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC) + + heatmap_avg += heatmap / len(multiplier) + + all_peaks = [] + for part in range(21): + map_ori = heatmap_avg[:, :, part] + one_heatmap = gaussian_filter(map_ori, sigma=3) + binary = np.ascontiguousarray(one_heatmap > thre, dtype=np.uint8) + # 全部小于阈值 + if np.sum(binary) == 0: + all_peaks.append([0, 0]) + continue + label_img, label_numbers = label(binary, return_num=True, connectivity=binary.ndim) + max_index = np.argmax([np.sum(map_ori[label_img == i]) for i in range(1, label_numbers + 1)]) + 1 + label_img[label_img != max_index] = 0 + map_ori[label_img == 0] = 0 + + y, x = util.npmax(map_ori) + all_peaks.append([x, y]) + return np.array(all_peaks) + +if __name__ == "__main__": + hand_estimation = Hand('../model/hand_pose_model.pth') + + # test_image = '../images/hand.jpg' + test_image = '../images/hand.jpg' + oriImg = cv2.imread(test_image) # B,G,R order + peaks = hand_estimation(oriImg) + canvas = util.draw_handpose(oriImg, peaks, True) + cv2.imshow('', canvas) + cv2.waitKey(0) \ No newline at end of file diff --git a/src/hand_model_output_size.json b/src/openpose/hand_model_output_size.json similarity index 100% rename from src/hand_model_output_size.json rename to src/openpose/hand_model_output_size.json diff --git a/src/openpose/hand_model_outputsize.py b/src/openpose/hand_model_outputsize.py new file mode 100644 index 00000000..e5bc5ab8 --- /dev/null +++ b/src/openpose/hand_model_outputsize.py @@ -0,0 +1,17 @@ +import torch +from tqdm import tqdm +import json + +from openpose.model import handpose_model + +model = handpose_model() + +size = {} +for i in tqdm(range(10, 1000)): + data = torch.randn(1, 3, i, i) + if torch.cuda.is_available(): + data = data.cuda() + size[i] = model(data).size(2) + +with open('hand_model_output_size.json') as f: + json.dump(size, f) diff --git a/src/openpose/model.py b/src/openpose/model.py new file mode 100644 index 00000000..5dfc80de --- /dev/null +++ b/src/openpose/model.py @@ -0,0 +1,219 @@ +import torch +from collections import OrderedDict + +import torch +import torch.nn as nn + +def make_layers(block, no_relu_layers): + layers = [] + for layer_name, v in block.items(): + if 'pool' in layer_name: + layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1], + padding=v[2]) + layers.append((layer_name, layer)) + else: + conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], + kernel_size=v[2], stride=v[3], + padding=v[4]) + layers.append((layer_name, conv2d)) + if layer_name not in no_relu_layers: + layers.append(('relu_'+layer_name, nn.ReLU(inplace=True))) + + return nn.Sequential(OrderedDict(layers)) + +class bodypose_model(nn.Module): + def __init__(self): + super(bodypose_model, self).__init__() + + # these layers have no relu layer + no_relu_layers = ['conv5_5_CPM_L1', 'conv5_5_CPM_L2', 'Mconv7_stage2_L1',\ + 'Mconv7_stage2_L2', 'Mconv7_stage3_L1', 'Mconv7_stage3_L2',\ + 'Mconv7_stage4_L1', 'Mconv7_stage4_L2', 'Mconv7_stage5_L1',\ + 'Mconv7_stage5_L2', 'Mconv7_stage6_L1', 'Mconv7_stage6_L1'] + blocks = {} + block0 = OrderedDict([ + ('conv1_1', [3, 64, 3, 1, 1]), + ('conv1_2', [64, 64, 3, 1, 1]), + ('pool1_stage1', [2, 2, 0]), + ('conv2_1', [64, 128, 3, 1, 1]), + ('conv2_2', [128, 128, 3, 1, 1]), + ('pool2_stage1', [2, 2, 0]), + ('conv3_1', [128, 256, 3, 1, 1]), + ('conv3_2', [256, 256, 3, 1, 1]), + ('conv3_3', [256, 256, 3, 1, 1]), + ('conv3_4', [256, 256, 3, 1, 1]), + ('pool3_stage1', [2, 2, 0]), + ('conv4_1', [256, 512, 3, 1, 1]), + ('conv4_2', [512, 512, 3, 1, 1]), + ('conv4_3_CPM', [512, 256, 3, 1, 1]), + ('conv4_4_CPM', [256, 128, 3, 1, 1]) + ]) + + + # Stage 1 + block1_1 = OrderedDict([ + ('conv5_1_CPM_L1', [128, 128, 3, 1, 1]), + ('conv5_2_CPM_L1', [128, 128, 3, 1, 1]), + ('conv5_3_CPM_L1', [128, 128, 3, 1, 1]), + ('conv5_4_CPM_L1', [128, 512, 1, 1, 0]), + ('conv5_5_CPM_L1', [512, 38, 1, 1, 0]) + ]) + + block1_2 = OrderedDict([ + ('conv5_1_CPM_L2', [128, 128, 3, 1, 1]), + ('conv5_2_CPM_L2', [128, 128, 3, 1, 1]), + ('conv5_3_CPM_L2', [128, 128, 3, 1, 1]), + ('conv5_4_CPM_L2', [128, 512, 1, 1, 0]), + ('conv5_5_CPM_L2', [512, 19, 1, 1, 0]) + ]) + blocks['block1_1'] = block1_1 + blocks['block1_2'] = block1_2 + + self.model0 = make_layers(block0, no_relu_layers) + + # Stages 2 - 6 + for i in range(2, 7): + blocks['block%d_1' % i] = OrderedDict([ + ('Mconv1_stage%d_L1' % i, [185, 128, 7, 1, 3]), + ('Mconv2_stage%d_L1' % i, [128, 128, 7, 1, 3]), + ('Mconv3_stage%d_L1' % i, [128, 128, 7, 1, 3]), + ('Mconv4_stage%d_L1' % i, [128, 128, 7, 1, 3]), + ('Mconv5_stage%d_L1' % i, [128, 128, 7, 1, 3]), + ('Mconv6_stage%d_L1' % i, [128, 128, 1, 1, 0]), + ('Mconv7_stage%d_L1' % i, [128, 38, 1, 1, 0]) + ]) + + blocks['block%d_2' % i] = OrderedDict([ + ('Mconv1_stage%d_L2' % i, [185, 128, 7, 1, 3]), + ('Mconv2_stage%d_L2' % i, [128, 128, 7, 1, 3]), + ('Mconv3_stage%d_L2' % i, [128, 128, 7, 1, 3]), + ('Mconv4_stage%d_L2' % i, [128, 128, 7, 1, 3]), + ('Mconv5_stage%d_L2' % i, [128, 128, 7, 1, 3]), + ('Mconv6_stage%d_L2' % i, [128, 128, 1, 1, 0]), + ('Mconv7_stage%d_L2' % i, [128, 19, 1, 1, 0]) + ]) + + for k in blocks.keys(): + blocks[k] = make_layers(blocks[k], no_relu_layers) + + self.model1_1 = blocks['block1_1'] + self.model2_1 = blocks['block2_1'] + self.model3_1 = blocks['block3_1'] + self.model4_1 = blocks['block4_1'] + self.model5_1 = blocks['block5_1'] + self.model6_1 = blocks['block6_1'] + + self.model1_2 = blocks['block1_2'] + self.model2_2 = blocks['block2_2'] + self.model3_2 = blocks['block3_2'] + self.model4_2 = blocks['block4_2'] + self.model5_2 = blocks['block5_2'] + self.model6_2 = blocks['block6_2'] + + + def forward(self, x): + + out1 = self.model0(x) + + out1_1 = self.model1_1(out1) + out1_2 = self.model1_2(out1) + out2 = torch.cat([out1_1, out1_2, out1], 1) + + out2_1 = self.model2_1(out2) + out2_2 = self.model2_2(out2) + out3 = torch.cat([out2_1, out2_2, out1], 1) + + out3_1 = self.model3_1(out3) + out3_2 = self.model3_2(out3) + out4 = torch.cat([out3_1, out3_2, out1], 1) + + out4_1 = self.model4_1(out4) + out4_2 = self.model4_2(out4) + out5 = torch.cat([out4_1, out4_2, out1], 1) + + out5_1 = self.model5_1(out5) + out5_2 = self.model5_2(out5) + out6 = torch.cat([out5_1, out5_2, out1], 1) + + out6_1 = self.model6_1(out6) + out6_2 = self.model6_2(out6) + + return out6_1, out6_2 + +class handpose_model(nn.Module): + def __init__(self): + super(handpose_model, self).__init__() + + # these layers have no relu layer + no_relu_layers = ['conv6_2_CPM', 'Mconv7_stage2', 'Mconv7_stage3',\ + 'Mconv7_stage4', 'Mconv7_stage5', 'Mconv7_stage6'] + # stage 1 + block1_0 = OrderedDict([ + ('conv1_1', [3, 64, 3, 1, 1]), + ('conv1_2', [64, 64, 3, 1, 1]), + ('pool1_stage1', [2, 2, 0]), + ('conv2_1', [64, 128, 3, 1, 1]), + ('conv2_2', [128, 128, 3, 1, 1]), + ('pool2_stage1', [2, 2, 0]), + ('conv3_1', [128, 256, 3, 1, 1]), + ('conv3_2', [256, 256, 3, 1, 1]), + ('conv3_3', [256, 256, 3, 1, 1]), + ('conv3_4', [256, 256, 3, 1, 1]), + ('pool3_stage1', [2, 2, 0]), + ('conv4_1', [256, 512, 3, 1, 1]), + ('conv4_2', [512, 512, 3, 1, 1]), + ('conv4_3', [512, 512, 3, 1, 1]), + ('conv4_4', [512, 512, 3, 1, 1]), + ('conv5_1', [512, 512, 3, 1, 1]), + ('conv5_2', [512, 512, 3, 1, 1]), + ('conv5_3_CPM', [512, 128, 3, 1, 1]) + ]) + + block1_1 = OrderedDict([ + ('conv6_1_CPM', [128, 512, 1, 1, 0]), + ('conv6_2_CPM', [512, 22, 1, 1, 0]) + ]) + + blocks = {} + blocks['block1_0'] = block1_0 + blocks['block1_1'] = block1_1 + + # stage 2-6 + for i in range(2, 7): + blocks['block%d' % i] = OrderedDict([ + ('Mconv1_stage%d' % i, [150, 128, 7, 1, 3]), + ('Mconv2_stage%d' % i, [128, 128, 7, 1, 3]), + ('Mconv3_stage%d' % i, [128, 128, 7, 1, 3]), + ('Mconv4_stage%d' % i, [128, 128, 7, 1, 3]), + ('Mconv5_stage%d' % i, [128, 128, 7, 1, 3]), + ('Mconv6_stage%d' % i, [128, 128, 1, 1, 0]), + ('Mconv7_stage%d' % i, [128, 22, 1, 1, 0]) + ]) + + for k in blocks.keys(): + blocks[k] = make_layers(blocks[k], no_relu_layers) + + self.model1_0 = blocks['block1_0'] + self.model1_1 = blocks['block1_1'] + self.model2 = blocks['block2'] + self.model3 = blocks['block3'] + self.model4 = blocks['block4'] + self.model5 = blocks['block5'] + self.model6 = blocks['block6'] + + def forward(self, x): + out1_0 = self.model1_0(x) + out1_1 = self.model1_1(out1_0) + concat_stage2 = torch.cat([out1_1, out1_0], 1) + out_stage2 = self.model2(concat_stage2) + concat_stage3 = torch.cat([out_stage2, out1_0], 1) + out_stage3 = self.model3(concat_stage3) + concat_stage4 = torch.cat([out_stage3, out1_0], 1) + out_stage4 = self.model4(concat_stage4) + concat_stage5 = torch.cat([out_stage4, out1_0], 1) + out_stage5 = self.model5(concat_stage5) + concat_stage6 = torch.cat([out_stage5, out1_0], 1) + out_stage6 = self.model6(concat_stage6) + return out_stage6 + + diff --git a/src/openpose/util.py b/src/openpose/util.py new file mode 100644 index 00000000..16dc24a3 --- /dev/null +++ b/src/openpose/util.py @@ -0,0 +1,198 @@ +import numpy as np +import math +import cv2 +import matplotlib +from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas +from matplotlib.figure import Figure +import numpy as np +import matplotlib.pyplot as plt +import cv2 + + +def padRightDownCorner(img, stride, padValue): + h = img.shape[0] + w = img.shape[1] + + pad = 4 * [None] + pad[0] = 0 # up + pad[1] = 0 # left + pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down + pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right + + img_padded = img + pad_up = np.tile(img_padded[0:1, :, :]*0 + padValue, (pad[0], 1, 1)) + img_padded = np.concatenate((pad_up, img_padded), axis=0) + pad_left = np.tile(img_padded[:, 0:1, :]*0 + padValue, (1, pad[1], 1)) + img_padded = np.concatenate((pad_left, img_padded), axis=1) + pad_down = np.tile(img_padded[-2:-1, :, :]*0 + padValue, (pad[2], 1, 1)) + img_padded = np.concatenate((img_padded, pad_down), axis=0) + pad_right = np.tile(img_padded[:, -2:-1, :]*0 + padValue, (1, pad[3], 1)) + img_padded = np.concatenate((img_padded, pad_right), axis=1) + + return img_padded, pad + +# transfer caffe model to pytorch which will match the layer name +def transfer(model, model_weights): + transfered_model_weights = {} + for weights_name in model.state_dict().keys(): + transfered_model_weights[weights_name] = model_weights['.'.join(weights_name.split('.')[1:])] + return transfered_model_weights + +# draw the body keypoint and lims +def draw_bodypose(canvas, candidate, subset): + stickwidth = 4 + limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \ + [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \ + [1, 16], [16, 18], [3, 17], [6, 18]] + + colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \ + [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \ + [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]] + for i in range(18): + for n in range(len(subset)): + index = int(subset[n][i]) + if index == -1: + continue + x, y = candidate[index][0:2] + cv2.circle(canvas, (int(x), int(y)), 4, colors[i], thickness=-1) + for i in range(17): + for n in range(len(subset)): + index = subset[n][np.array(limbSeq[i]) - 1] + if -1 in index: + continue + cur_canvas = canvas.copy() + Y = candidate[index.astype(int), 0] + X = candidate[index.astype(int), 1] + mX = np.mean(X) + mY = np.mean(Y) + length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5 + angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1])) + polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1) + cv2.fillConvexPoly(cur_canvas, polygon, colors[i]) + canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0) + # plt.imsave("preview.jpg", canvas[:, :, [2, 1, 0]]) + # plt.imshow(canvas[:, :, [2, 1, 0]]) + return canvas + +def draw_handpose(canvas, all_hand_peaks, show_number=False): + edges = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], \ + [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]] + fig = Figure(figsize=plt.figaspect(canvas)) + + fig.subplots_adjust(0, 0, 1, 1) + fig.subplots_adjust(bottom=0, top=1, left=0, right=1) + bg = FigureCanvas(fig) + ax = fig.subplots() + ax.axis('off') + ax.imshow(canvas) + + width, height = ax.figure.get_size_inches() * ax.figure.get_dpi() + + for peaks in all_hand_peaks: + for ie, e in enumerate(edges): + if np.sum(np.all(peaks[e], axis=1)==0)==0: + x1, y1 = peaks[e[0]] + x2, y2 = peaks[e[1]] + ax.plot([x1, x2], [y1, y2], color=matplotlib.colors.hsv_to_rgb([ie/float(len(edges)), 1.0, 1.0])) + + for i, keyponit in enumerate(peaks): + x, y = keyponit + ax.plot(x, y, 'r.') + if show_number: + ax.text(x, y, str(i)) + bg.draw() + canvas = np.fromstring(bg.tostring_rgb(), dtype='uint8').reshape(int(height), int(width), 3) + return canvas + +# image drawed by opencv is not good. +def draw_handpose_by_opencv(canvas, peaks, show_number=False): + edges = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], \ + [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]] + # cv2.rectangle(canvas, (x, y), (x+w, y+w), (0, 255, 0), 2, lineType=cv2.LINE_AA) + # cv2.putText(canvas, 'left' if is_left else 'right', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) + for ie, e in enumerate(edges): + if np.sum(np.all(peaks[e], axis=1)==0)==0: + x1, y1 = peaks[e[0]] + x2, y2 = peaks[e[1]] + cv2.line(canvas, (x1, y1), (x2, y2), matplotlib.colors.hsv_to_rgb([ie/float(len(edges)), 1.0, 1.0])*255, thickness=2) + + for i, keyponit in enumerate(peaks): + x, y = keyponit + cv2.circle(canvas, (x, y), 4, (0, 0, 255), thickness=-1) + if show_number: + cv2.putText(canvas, str(i), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0), lineType=cv2.LINE_AA) + return canvas + +# detect hand according to body pose keypoints +# please refer to https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/src/openpose/hand/handDetector.cpp +def handDetect(candidate, subset, oriImg): + # right hand: wrist 4, elbow 3, shoulder 2 + # left hand: wrist 7, elbow 6, shoulder 5 + ratioWristElbow = 0.33 + detect_result = [] + image_height, image_width = oriImg.shape[0:2] + for person in subset.astype(int): + # if any of three not detected + has_left = np.sum(person[[5, 6, 7]] == -1) == 0 + has_right = np.sum(person[[2, 3, 4]] == -1) == 0 + if not (has_left or has_right): + continue + hands = [] + #left hand + if has_left: + left_shoulder_index, left_elbow_index, left_wrist_index = person[[5, 6, 7]] + x1, y1 = candidate[left_shoulder_index][:2] + x2, y2 = candidate[left_elbow_index][:2] + x3, y3 = candidate[left_wrist_index][:2] + hands.append([x1, y1, x2, y2, x3, y3, True]) + # right hand + if has_right: + right_shoulder_index, right_elbow_index, right_wrist_index = person[[2, 3, 4]] + x1, y1 = candidate[right_shoulder_index][:2] + x2, y2 = candidate[right_elbow_index][:2] + x3, y3 = candidate[right_wrist_index][:2] + hands.append([x1, y1, x2, y2, x3, y3, False]) + + for x1, y1, x2, y2, x3, y3, is_left in hands: + # pos_hand = pos_wrist + ratio * (pos_wrist - pos_elbox) = (1 + ratio) * pos_wrist - ratio * pos_elbox + # handRectangle.x = posePtr[wrist*3] + ratioWristElbow * (posePtr[wrist*3] - posePtr[elbow*3]); + # handRectangle.y = posePtr[wrist*3+1] + ratioWristElbow * (posePtr[wrist*3+1] - posePtr[elbow*3+1]); + # const auto distanceWristElbow = getDistance(poseKeypoints, person, wrist, elbow); + # const auto distanceElbowShoulder = getDistance(poseKeypoints, person, elbow, shoulder); + # handRectangle.width = 1.5f * fastMax(distanceWristElbow, 0.9f * distanceElbowShoulder); + x = x3 + ratioWristElbow * (x3 - x2) + y = y3 + ratioWristElbow * (y3 - y2) + distanceWristElbow = math.sqrt((x3 - x2) ** 2 + (y3 - y2) ** 2) + distanceElbowShoulder = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2) + width = 1.5 * max(distanceWristElbow, 0.9 * distanceElbowShoulder) + # x-y refers to the center --> offset to topLeft point + # handRectangle.x -= handRectangle.width / 2.f; + # handRectangle.y -= handRectangle.height / 2.f; + x -= width / 2 + y -= width / 2 # width = height + # overflow the image + if x < 0: x = 0 + if y < 0: y = 0 + width1 = width + width2 = width + if x + width > image_width: width1 = image_width - x + if y + width > image_height: width2 = image_height - y + width = min(width1, width2) + # the max hand box value is 20 pixels + if width >= 20: + detect_result.append([int(x), int(y), int(width), is_left]) + + ''' + return value: [[x, y, w, True if left hand else False]]. + width=height since the network require squared input. + x, y is the coordinate of top left + ''' + return detect_result + +# get max index of 2d array +def npmax(array): + arrayindex = array.argmax(1) + arrayvalue = array.max(1) + i = arrayvalue.argmax() + j = arrayindex[i] + return i, j