Update render script and Makefile
This commit is contained in:
@@ -1,4 +1,4 @@
|
||||
[38;5;12m [39m[38;2;255;187;0m[1m[4mAwesome - Most Cited Deep Learning Papers[0m
|
||||
[38;5;12m [39m[38;2;255;187;0m[1m[4mAwesome - Most Cited Deep Learning Papers[0m
|
||||
|
||||
[38;5;14m[1m![0m[38;5;12mAwesome[39m[38;5;14m[1m (https://cdn.rawgit.com/sindresorhus/awesome/d7305f38d29fed78fa85652e3a63e154dd8e8829/media/badge.svg)[0m[38;5;12m (https://github.com/sindresorhus/awesome)[39m
|
||||
|
||||
@@ -6,8 +6,8 @@
|
||||
|
||||
[38;5;12mA curated list of the most cited deep learning papers (2012-2016)[39m
|
||||
|
||||
[38;5;12mWe[39m[38;5;12m [39m[38;5;12mbelieve[39m[38;5;12m [39m[38;5;12mthat[39m[38;5;12m [39m[38;5;12mthere[39m[38;5;12m [39m[38;5;12mexist[39m[38;5;12m [39m[48;2;30;30;40m[38;5;13m[3mclassic[0m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mlearning[39m[38;5;12m [39m[38;5;12mpapers[39m[38;5;12m [39m[38;5;12mwhich[39m[38;5;12m [39m[38;5;12mare[39m[38;5;12m [39m[38;5;12mworth[39m[38;5;12m [39m[38;5;12mreading[39m[38;5;12m [39m[38;5;12mregardless[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mtheir[39m[38;5;12m [39m[38;5;12mapplication[39m[38;5;12m [39m[38;5;12mdomain.[39m[38;5;12m [39m[38;5;12mRather[39m[38;5;12m [39m[38;5;12mthan[39m[38;5;12m [39m[38;5;12mproviding[39m[38;5;12m [39m[38;5;12moverwhelming[39m[38;5;12m [39m[38;5;12mamount[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mpapers,[39m[38;5;12m [39m[38;5;12mWe[39m[38;5;12m [39m[38;5;12mwould[39m[38;5;12m [39m[38;5;12mlike[39m[38;5;12m [39m[38;5;12mto[39m[38;5;12m [39m[38;5;12mprovide[39m[38;5;12m [39m[38;5;12ma[39m[38;5;12m [39m[48;2;30;30;40m[38;5;13m[3mcurated[0m[48;2;30;30;40m[38;5;13m[3m [0m
|
||||
[48;2;30;30;40m[38;5;13m[3mlist[0m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[38;5;12mawesome[39m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mlearning[39m[38;5;12m [39m[38;5;12mpapers[39m[38;5;12m [39m[38;5;12mwhich[39m[38;5;12m [39m[38;5;12mare[39m[38;5;12m [39m[38;5;12mconsidered[39m[38;5;12m [39m[38;5;12mas[39m[38;5;12m [39m[48;2;30;30;40m[38;5;13m[3mmust-reads[0m[38;5;12m [39m[38;5;12min[39m[38;5;12m [39m[38;5;12mcertain[39m[38;5;12m [39m[38;5;12mresearch[39m[38;5;12m [39m[38;5;12mdomains.[39m
|
||||
[38;5;12mWe[39m[38;5;12m [39m[38;5;12mbelieve[39m[38;5;12m [39m[38;5;12mthat[39m[38;5;12m [39m[38;5;12mthere[39m[38;5;12m [39m[38;5;12mexist[39m[38;5;12m [39m[48;2;30;30;40m[38;5;13m[3mclassic[0m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mlearning[39m[38;5;12m [39m[38;5;12mpapers[39m[38;5;12m [39m[38;5;12mwhich[39m[38;5;12m [39m[38;5;12mare[39m[38;5;12m [39m[38;5;12mworth[39m[38;5;12m [39m[38;5;12mreading[39m[38;5;12m [39m[38;5;12mregardless[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mtheir[39m[38;5;12m [39m[38;5;12mapplication[39m[38;5;12m [39m[38;5;12mdomain.[39m[38;5;12m [39m[38;5;12mRather[39m[38;5;12m [39m[38;5;12mthan[39m[38;5;12m [39m[38;5;12mproviding[39m[38;5;12m [39m[38;5;12moverwhelming[39m[38;5;12m [39m[38;5;12mamount[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mpapers,[39m[38;5;12m [39m[38;5;12mWe[39m[38;5;12m [39m[38;5;12mwould[39m[38;5;12m [39m[38;5;12mlike[39m[38;5;12m [39m[38;5;12mto[39m[38;5;12m [39m
|
||||
[38;5;12mprovide[39m[38;5;12m [39m[38;5;12ma[39m[38;5;12m [39m[48;2;30;30;40m[38;5;13m[3mcurated[0m[48;2;30;30;40m[38;5;13m[3m [0m[48;2;30;30;40m[38;5;13m[3mlist[0m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[38;5;12mawesome[39m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mlearning[39m[38;5;12m [39m[38;5;12mpapers[39m[38;5;12m [39m[38;5;12mwhich[39m[38;5;12m [39m[38;5;12mare[39m[38;5;12m [39m[38;5;12mconsidered[39m[38;5;12m [39m[38;5;12mas[39m[38;5;12m [39m[48;2;30;30;40m[38;5;13m[3mmust-reads[0m[38;5;12m [39m[38;5;12min[39m[38;5;12m [39m[38;5;12mcertain[39m[38;5;12m [39m[38;5;12mresearch[39m[38;5;12m [39m[38;5;12mdomains.[39m
|
||||
|
||||
[38;2;255;187;0m[4mBackground[0m
|
||||
|
||||
@@ -15,8 +15,8 @@
|
||||
[38;5;12m(https://github.com/kjw0612/awesome-rnn).[39m[38;5;12m [39m[38;5;12mAlso,[39m[38;5;12m [39m[38;5;12mafter[39m[38;5;12m [39m[38;5;12mthis[39m[38;5;12m [39m[38;5;12mlist[39m[38;5;12m [39m[38;5;12mcomes[39m[38;5;12m [39m[38;5;12mout,[39m[38;5;12m [39m[38;5;12manother[39m[38;5;12m [39m[38;5;12mawesome[39m[38;5;12m [39m[38;5;12mlist[39m[38;5;12m [39m[38;5;12mfor[39m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mlearning[39m[38;5;12m [39m[38;5;12mbeginners,[39m[38;5;12m [39m[38;5;12mcalled[39m[38;5;12m [39m[38;5;14m[1mDeep[0m[38;5;14m[1m [0m[38;5;14m[1mLearning[0m[38;5;14m[1m [0m[38;5;14m[1mPapers[0m[38;5;14m[1m [0m[38;5;14m[1mReading[0m[38;5;14m[1m [0m[38;5;14m[1mRoadmap[0m[38;5;12m [39m
|
||||
[38;5;12m(https://github.com/songrotek/Deep-Learning-Papers-Reading-Roadmap),[39m[38;5;12m [39m[38;5;12mhas[39m[38;5;12m [39m[38;5;12mbeen[39m[38;5;12m [39m[38;5;12mcreated[39m[38;5;12m [39m[38;5;12mand[39m[38;5;12m [39m[38;5;12mloved[39m[38;5;12m [39m[38;5;12mby[39m[38;5;12m [39m[38;5;12mmany[39m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mlearning[39m[38;5;12m [39m[38;5;12mresearchers.[39m
|
||||
|
||||
[38;5;12mAlthough[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[48;2;30;30;40m[38;5;13m[3mRoadmap[0m[48;2;30;30;40m[38;5;13m[3m [0m[48;2;30;30;40m[38;5;13m[3mList[0m[38;5;12m [39m[38;5;12mincludes[39m[38;5;12m [39m[38;5;12mlots[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mimportant[39m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mlearning[39m[38;5;12m [39m[38;5;12mpapers,[39m[38;5;12m [39m[38;5;12mit[39m[38;5;12m [39m[38;5;12mfeels[39m[38;5;12m [39m[38;5;12moverwhelming[39m[38;5;12m [39m[38;5;12mfor[39m[38;5;12m [39m[38;5;12mme[39m[38;5;12m [39m[38;5;12mto[39m[38;5;12m [39m[38;5;12mread[39m[38;5;12m [39m[38;5;12mthem[39m[38;5;12m [39m[38;5;12mall.[39m[38;5;12m [39m[38;5;12mAs[39m[38;5;12m [39m[38;5;12mI[39m[38;5;12m [39m[38;5;12mmentioned[39m[38;5;12m [39m[38;5;12min[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[38;5;12mintroduction,[39m[38;5;12m [39m[38;5;12mI[39m[38;5;12m [39m[38;5;12mbelieve[39m[38;5;12m [39m[38;5;12mthat[39m[38;5;12m [39m[38;5;12mseminal[39m[38;5;12m [39m[38;5;12mworks[39m[38;5;12m [39m[38;5;12mcan[39m[38;5;12m [39m[38;5;12mgive[39m[38;5;12m [39m[38;5;12mus[39m[38;5;12m [39m[38;5;12mlessons[39m[38;5;12m [39m
|
||||
[38;5;12mregardless[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mtheir[39m[38;5;12m [39m[38;5;12mapplication[39m[38;5;12m [39m[38;5;12mdomain.[39m[38;5;12m [39m[38;5;12mThus,[39m[38;5;12m [39m[38;5;12mI[39m[38;5;12m [39m[38;5;12mwould[39m[38;5;12m [39m[38;5;12mlike[39m[38;5;12m [39m[38;5;12mto[39m[38;5;12m [39m[38;5;12mintroduce[39m[38;5;12m [39m[38;5;14m[1mtop[0m[38;5;14m[1m [0m[38;5;14m[1m100[0m[38;5;14m[1m [0m[38;5;14m[1mdeep[0m[38;5;14m[1m [0m[38;5;14m[1mlearning[0m[38;5;14m[1m [0m[38;5;14m[1mpapers[0m[38;5;12m [39m[38;5;12mhere[39m[38;5;12m [39m[38;5;12mas[39m[38;5;12m [39m[38;5;12ma[39m[38;5;12m [39m[38;5;12mgood[39m[38;5;12m [39m[38;5;12mstarting[39m[38;5;12m [39m[38;5;12mpoint[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12moverviewing[39m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mlearning[39m[38;5;12m [39m[38;5;12mresearches.[39m
|
||||
[38;5;12mAlthough[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[48;2;30;30;40m[38;5;13m[3mRoadmap[0m[48;2;30;30;40m[38;5;13m[3m [0m[48;2;30;30;40m[38;5;13m[3mList[0m[38;5;12m [39m[38;5;12mincludes[39m[38;5;12m [39m[38;5;12mlots[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mimportant[39m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mlearning[39m[38;5;12m [39m[38;5;12mpapers,[39m[38;5;12m [39m[38;5;12mit[39m[38;5;12m [39m[38;5;12mfeels[39m[38;5;12m [39m[38;5;12moverwhelming[39m[38;5;12m [39m[38;5;12mfor[39m[38;5;12m [39m[38;5;12mme[39m[38;5;12m [39m[38;5;12mto[39m[38;5;12m [39m[38;5;12mread[39m[38;5;12m [39m[38;5;12mthem[39m[38;5;12m [39m[38;5;12mall.[39m[38;5;12m [39m[38;5;12mAs[39m[38;5;12m [39m[38;5;12mI[39m[38;5;12m [39m[38;5;12mmentioned[39m[38;5;12m [39m[38;5;12min[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[38;5;12mintroduction,[39m[38;5;12m [39m[38;5;12mI[39m[38;5;12m [39m[38;5;12mbelieve[39m[38;5;12m [39m[38;5;12mthat[39m[38;5;12m [39m[38;5;12mseminal[39m[38;5;12m [39m[38;5;12mworks[39m[38;5;12m [39m[38;5;12mcan[39m[38;5;12m [39m
|
||||
[38;5;12mgive[39m[38;5;12m [39m[38;5;12mus[39m[38;5;12m [39m[38;5;12mlessons[39m[38;5;12m [39m[38;5;12mregardless[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mtheir[39m[38;5;12m [39m[38;5;12mapplication[39m[38;5;12m [39m[38;5;12mdomain.[39m[38;5;12m [39m[38;5;12mThus,[39m[38;5;12m [39m[38;5;12mI[39m[38;5;12m [39m[38;5;12mwould[39m[38;5;12m [39m[38;5;12mlike[39m[38;5;12m [39m[38;5;12mto[39m[38;5;12m [39m[38;5;12mintroduce[39m[38;5;12m [39m[38;5;14m[1mtop[0m[38;5;14m[1m [0m[38;5;14m[1m100[0m[38;5;14m[1m [0m[38;5;14m[1mdeep[0m[38;5;14m[1m [0m[38;5;14m[1mlearning[0m[38;5;14m[1m [0m[38;5;14m[1mpapers[0m[38;5;12m [39m[38;5;12mhere[39m[38;5;12m [39m[38;5;12mas[39m[38;5;12m [39m[38;5;12ma[39m[38;5;12m [39m[38;5;12mgood[39m[38;5;12m [39m[38;5;12mstarting[39m[38;5;12m [39m[38;5;12mpoint[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12moverviewing[39m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mlearning[39m[38;5;12m [39m[38;5;12mresearches.[39m
|
||||
|
||||
[38;5;12mTo get the news for newly released papers everyday, follow my [39m[38;5;14m[1mtwitter[0m[38;5;12m (https://twitter.com/TerryUm_ML) or [39m[38;5;14m[1mfacebook page[0m[38;5;12m (https://www.facebook.com/terryum.io/)! [39m
|
||||
|
||||
@@ -37,18 +37,18 @@
|
||||
[38;5;12m- [39m[38;5;14m[1m2012[0m[38;5;12m : +800 citations[39m
|
||||
[38;5;12m- [39m[38;5;14m[1m~2012[0m[38;5;12m : [39m[48;2;30;30;40m[38;5;13m[3mOld Papers[0m[38;5;12m (by discussion)[39m
|
||||
|
||||
[38;5;12mPlease[39m[38;5;12m [39m[38;5;12mnote[39m[38;5;12m [39m[38;5;12mthat[39m[38;5;12m [39m[38;5;12mwe[39m[38;5;12m [39m[38;5;12mprefer[39m[38;5;12m [39m[38;5;12mseminal[39m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mlearning[39m[38;5;12m [39m[38;5;12mpapers[39m[38;5;12m [39m[38;5;12mthat[39m[38;5;12m [39m[38;5;12mcan[39m[38;5;12m [39m[38;5;12mbe[39m[38;5;12m [39m[38;5;12mapplied[39m[38;5;12m [39m[38;5;12mto[39m[38;5;12m [39m[38;5;12mvarious[39m[38;5;12m [39m[38;5;12mresearches[39m[38;5;12m [39m[38;5;12mrather[39m[38;5;12m [39m[38;5;12mthan[39m[38;5;12m [39m[38;5;12mapplication[39m[38;5;12m [39m[38;5;12mpapers.[39m[38;5;12m [39m[38;5;12mFor[39m[38;5;12m [39m[38;5;12mthat[39m[38;5;12m [39m[38;5;12mreason,[39m[38;5;12m [39m[38;5;12msome[39m[38;5;12m [39m[38;5;12mpapers[39m[38;5;12m [39m[38;5;12mthat[39m[38;5;12m [39m[38;5;12mmeet[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[38;5;12mcriteria[39m[38;5;12m [39m[38;5;12mmay[39m[38;5;12m [39m[38;5;12mnot[39m[38;5;12m [39m[38;5;12mbe[39m[38;5;12m [39m[38;5;12maccepted[39m[38;5;12m [39m[38;5;12mwhile[39m[38;5;12m [39m
|
||||
[38;5;12mothers[39m[38;5;12m [39m[38;5;12mcan[39m[38;5;12m [39m[38;5;12mbe.[39m[38;5;12m [39m[38;5;12mIt[39m[38;5;12m [39m[38;5;12mdepends[39m[38;5;12m [39m[38;5;12mon[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[38;5;12mimpact[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[38;5;12mpaper,[39m[38;5;12m [39m[38;5;12mapplicability[39m[38;5;12m [39m[38;5;12mto[39m[38;5;12m [39m[38;5;12mother[39m[38;5;12m [39m[38;5;12mresearches[39m[38;5;12m [39m[38;5;12mscarcity[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[38;5;12mresearch[39m[38;5;12m [39m[38;5;12mdomain,[39m[38;5;12m [39m[38;5;12mand[39m[38;5;12m [39m[38;5;12mso[39m[38;5;12m [39m[38;5;12mon.[39m
|
||||
[38;5;12mPlease[39m[38;5;12m [39m[38;5;12mnote[39m[38;5;12m [39m[38;5;12mthat[39m[38;5;12m [39m[38;5;12mwe[39m[38;5;12m [39m[38;5;12mprefer[39m[38;5;12m [39m[38;5;12mseminal[39m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mlearning[39m[38;5;12m [39m[38;5;12mpapers[39m[38;5;12m [39m[38;5;12mthat[39m[38;5;12m [39m[38;5;12mcan[39m[38;5;12m [39m[38;5;12mbe[39m[38;5;12m [39m[38;5;12mapplied[39m[38;5;12m [39m[38;5;12mto[39m[38;5;12m [39m[38;5;12mvarious[39m[38;5;12m [39m[38;5;12mresearches[39m[38;5;12m [39m[38;5;12mrather[39m[38;5;12m [39m[38;5;12mthan[39m[38;5;12m [39m[38;5;12mapplication[39m[38;5;12m [39m[38;5;12mpapers.[39m[38;5;12m [39m[38;5;12mFor[39m[38;5;12m [39m[38;5;12mthat[39m[38;5;12m [39m[38;5;12mreason,[39m[38;5;12m [39m[38;5;12msome[39m[38;5;12m [39m[38;5;12mpapers[39m[38;5;12m [39m[38;5;12mthat[39m[38;5;12m [39m[38;5;12mmeet[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[38;5;12mcriteria[39m[38;5;12m [39m[38;5;12mmay[39m[38;5;12m [39m[38;5;12mnot[39m[38;5;12m [39m
|
||||
[38;5;12mbe[39m[38;5;12m [39m[38;5;12maccepted[39m[38;5;12m [39m[38;5;12mwhile[39m[38;5;12m [39m[38;5;12mothers[39m[38;5;12m [39m[38;5;12mcan[39m[38;5;12m [39m[38;5;12mbe.[39m[38;5;12m [39m[38;5;12mIt[39m[38;5;12m [39m[38;5;12mdepends[39m[38;5;12m [39m[38;5;12mon[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[38;5;12mimpact[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[38;5;12mpaper,[39m[38;5;12m [39m[38;5;12mapplicability[39m[38;5;12m [39m[38;5;12mto[39m[38;5;12m [39m[38;5;12mother[39m[38;5;12m [39m[38;5;12mresearches[39m[38;5;12m [39m[38;5;12mscarcity[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[38;5;12mresearch[39m[38;5;12m [39m[38;5;12mdomain,[39m[38;5;12m [39m[38;5;12mand[39m[38;5;12m [39m[38;5;12mso[39m[38;5;12m [39m[38;5;12mon.[39m
|
||||
|
||||
[38;5;14m[1mWe need your contributions![0m
|
||||
|
||||
[38;5;12mIf you have any suggestions (missing papers, new papers, key researchers or typos), please feel free to edit and pull a request.[39m
|
||||
[38;5;12m(Please[39m[38;5;12m [39m[38;5;12mread[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[38;5;14m[1mcontributing[0m[38;5;14m[1m [0m[38;5;14m[1mguide[0m[38;5;12m [39m[38;5;12m(https://github.com/terryum/awesome-deep-learning-papers/blob/master/Contributing.md)[39m[38;5;12m [39m[38;5;12mfor[39m[38;5;12m [39m[38;5;12mfurther[39m[38;5;12m [39m[38;5;12minstructions,[39m[38;5;12m [39m[38;5;12mthough[39m[38;5;12m [39m[38;5;12mjust[39m[38;5;12m [39m[38;5;12mletting[39m[38;5;12m [39m[38;5;12mme[39m[38;5;12m [39m[38;5;12mknow[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[38;5;12mtitle[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mpapers[39m[38;5;12m [39m[38;5;12mcan[39m[38;5;12m [39m[38;5;12malso[39m[38;5;12m [39m[38;5;12mbe[39m[38;5;12m [39m[38;5;12ma[39m[38;5;12m [39m
|
||||
[38;5;12mbig[39m[38;5;12m [39m[38;5;12mcontribution[39m[38;5;12m [39m[38;5;12mto[39m[38;5;12m [39m[38;5;12mus.)[39m
|
||||
[38;5;12m(Please[39m[38;5;12m [39m[38;5;12mread[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[38;5;14m[1mcontributing[0m[38;5;14m[1m [0m[38;5;14m[1mguide[0m[38;5;12m [39m[38;5;12m(https://github.com/terryum/awesome-deep-learning-papers/blob/master/Contributing.md)[39m[38;5;12m [39m[38;5;12mfor[39m[38;5;12m [39m[38;5;12mfurther[39m[38;5;12m [39m[38;5;12minstructions,[39m[38;5;12m [39m[38;5;12mthough[39m[38;5;12m [39m[38;5;12mjust[39m[38;5;12m [39m[38;5;12mletting[39m[38;5;12m [39m[38;5;12mme[39m[38;5;12m [39m[38;5;12mknow[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[38;5;12mtitle[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m
|
||||
[38;5;12mpapers[39m[38;5;12m [39m[38;5;12mcan[39m[38;5;12m [39m[38;5;12malso[39m[38;5;12m [39m[38;5;12mbe[39m[38;5;12m [39m[38;5;12ma[39m[38;5;12m [39m[38;5;12mbig[39m[38;5;12m [39m[38;5;12mcontribution[39m[38;5;12m [39m[38;5;12mto[39m[38;5;12m [39m[38;5;12mus.)[39m
|
||||
|
||||
[38;5;12m(Update)[39m[38;5;12m [39m[38;5;12mYou[39m[38;5;12m [39m[38;5;12mcan[39m[38;5;12m [39m[38;5;12mdownload[39m[38;5;12m [39m[38;5;12mall[39m[38;5;12m [39m[38;5;12mtop-100[39m[38;5;12m [39m[38;5;12mpapers[39m[38;5;12m [39m[38;5;12mwith[39m[38;5;12m [39m[38;5;14m[1mthis[0m[38;5;12m [39m[38;5;12m(https://github.com/terryum/awesome-deep-learning-papers/blob/master/fetch_papers.py)[39m[38;5;12m [39m[38;5;12mand[39m[38;5;12m [39m[38;5;12mcollect[39m[38;5;12m [39m[38;5;12mall[39m[38;5;12m [39m[38;5;12mauthors'[39m[38;5;12m [39m[38;5;12mnames[39m[38;5;12m [39m[38;5;12mwith[39m[38;5;12m [39m[38;5;14m[1mthis[0m[38;5;12m [39m
|
||||
[38;5;12m(https://github.com/terryum/awesome-deep-learning-papers/blob/master/get_authors.py).[39m[38;5;12m [39m[38;5;12mAlso,[39m[38;5;12m [39m[38;5;14m[1mbib[0m[38;5;14m[1m [0m[38;5;14m[1mfile[0m[38;5;12m [39m[38;5;12m(https://github.com/terryum/awesome-deep-learning-papers/blob/master/top100papers.bib)[39m[38;5;12m [39m[38;5;12mfor[39m[38;5;12m [39m[38;5;12mall[39m[38;5;12m [39m[38;5;12mtop-100[39m[38;5;12m [39m[38;5;12mpapers[39m[38;5;12m [39m
|
||||
[38;5;12mare[39m[38;5;12m [39m[38;5;12mavailable.[39m[38;5;12m [39m[38;5;12mThanks,[39m[38;5;12m [39m[38;5;12mdoodhwala,[39m[38;5;12m [39m[38;5;14m[1mSven[0m[38;5;12m [39m[38;5;12m(https://github.com/sunshinemyson)[39m[38;5;12m [39m[38;5;12mand[39m[38;5;12m [39m[38;5;14m[1mgrepinsight[0m[38;5;12m [39m[38;5;12m(https://github.com/grepinsight)![39m
|
||||
[38;5;12m(https://github.com/terryum/awesome-deep-learning-papers/blob/master/get_authors.py).[39m[38;5;12m [39m[38;5;12mAlso,[39m[38;5;12m [39m[38;5;14m[1mbib[0m[38;5;14m[1m [0m[38;5;14m[1mfile[0m[38;5;12m [39m[38;5;12m(https://github.com/terryum/awesome-deep-learning-papers/blob/master/top100papers.bib)[39m[38;5;12m [39m[38;5;12mfor[39m
|
||||
[38;5;12mall[39m[38;5;12m [39m[38;5;12mtop-100[39m[38;5;12m [39m[38;5;12mpapers[39m[38;5;12m [39m[38;5;12mare[39m[38;5;12m [39m[38;5;12mavailable.[39m[38;5;12m [39m[38;5;12mThanks,[39m[38;5;12m [39m[38;5;12mdoodhwala,[39m[38;5;12m [39m[38;5;14m[1mSven[0m[38;5;12m [39m[38;5;12m(https://github.com/sunshinemyson)[39m[38;5;12m [39m[38;5;12mand[39m[38;5;12m [39m[38;5;14m[1mgrepinsight[0m[38;5;12m [39m[38;5;12m(https://github.com/grepinsight)![39m
|
||||
|
||||
[38;5;12m+ Can anyone contribute the code for obtaining the statistics of the authors of Top-100 papers?[39m
|
||||
|
||||
@@ -113,7 +113,8 @@
|
||||
|
||||
|
||||
[38;2;255;187;0m[4mConvolutional Neural Network Models[0m
|
||||
[38;5;12m- [39m[38;5;14m[1mRethinking the inception architecture for computer vision[0m[38;5;12m (2016), C. Szegedy et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Szegedy_Rethinking_the_Inception_CVPR_2016_paper.pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;14m[1mRethinking[0m[38;5;14m[1m [0m[38;5;14m[1mthe[0m[38;5;14m[1m [0m[38;5;14m[1minception[0m[38;5;14m[1m [0m[38;5;14m[1marchitecture[0m[38;5;14m[1m [0m[38;5;14m[1mfor[0m[38;5;14m[1m [0m[38;5;14m[1mcomputer[0m[38;5;14m[1m [0m[38;5;14m[1mvision[0m[38;5;12m [39m[38;5;12m(2016),[39m[38;5;12m [39m[38;5;12mC.[39m[38;5;12m [39m[38;5;12mSzegedy[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Szegedy_Rethinking_the_Inception_CVPR_2016_paper.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mInception-v4, inception-resnet and the impact of residual connections on learning[0m[38;5;12m (2016), C. Szegedy et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://arxiv.org/pdf/1602.07261)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mIdentity Mappings in Deep Residual Networks[0m[38;5;12m (2016), K. He et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://arxiv.org/pdf/1603.05027v2.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mDeep residual learning for image recognition[0m[38;5;12m (2016), K. He et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://arxiv.org/pdf/1512.03385)[39m
|
||||
@@ -124,13 +125,16 @@
|
||||
[38;5;12m- [39m[38;5;14m[1mOverFeat: Integrated recognition, localization and detection using convolutional networks[0m[38;5;12m (2013), P. Sermanet et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://arxiv.org/pdf/1312.6229)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mMaxout networks[0m[38;5;12m (2013), I. Goodfellow et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://arxiv.org/pdf/1302.4389v4)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mNetwork in network[0m[38;5;12m (2013), M. Lin et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://arxiv.org/pdf/1312.4400)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mImageNet classification with deep convolutional neural networks[0m[38;5;12m (2012), A. Krizhevsky et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;14m[1mImageNet[0m[38;5;14m[1m [0m[38;5;14m[1mclassification[0m[38;5;14m[1m [0m[38;5;14m[1mwith[0m[38;5;14m[1m [0m[38;5;14m[1mdeep[0m[38;5;14m[1m [0m[38;5;14m[1mconvolutional[0m[38;5;14m[1m [0m[38;5;14m[1mneural[0m[38;5;14m[1m [0m[38;5;14m[1mnetworks[0m[38;5;12m [39m[38;5;12m(2012),[39m[38;5;12m [39m[38;5;12mA.[39m[38;5;12m [39m[38;5;12mKrizhevsky[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf)[39m
|
||||
|
||||
|
||||
|
||||
[38;2;255;187;0m[4mImage: Segmentation / Object Detection[0m
|
||||
[38;5;12m- [39m[38;5;14m[1mYou only look once: Unified, real-time object detection[0m[38;5;12m (2016), J. Redmon et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Redmon_You_Only_Look_CVPR_2016_paper.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mFully convolutional networks for semantic segmentation[0m[38;5;12m (2015), J. Long et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Long_Fully_Convolutional_Networks_2015_CVPR_paper.pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;14m[1mYou[0m[38;5;14m[1m [0m[38;5;14m[1monly[0m[38;5;14m[1m [0m[38;5;14m[1mlook[0m[38;5;14m[1m [0m[38;5;14m[1monce:[0m[38;5;14m[1m [0m[38;5;14m[1mUnified,[0m[38;5;14m[1m [0m[38;5;14m[1mreal-time[0m[38;5;14m[1m [0m[38;5;14m[1mobject[0m[38;5;14m[1m [0m[38;5;14m[1mdetection[0m[38;5;12m [39m[38;5;12m(2016),[39m[38;5;12m [39m[38;5;12mJ.[39m[38;5;12m [39m[38;5;12mRedmon[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Redmon_You_Only_Look_CVPR_2016_paper.pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;14m[1mFully[0m[38;5;14m[1m [0m[38;5;14m[1mconvolutional[0m[38;5;14m[1m [0m[38;5;14m[1mnetworks[0m[38;5;14m[1m [0m[38;5;14m[1mfor[0m[38;5;14m[1m [0m[38;5;14m[1msemantic[0m[38;5;14m[1m [0m[38;5;14m[1msegmentation[0m[38;5;12m [39m[38;5;12m(2015),[39m[38;5;12m [39m[38;5;12mJ.[39m[38;5;12m [39m[38;5;12mLong[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Long_Fully_Convolutional_Networks_2015_CVPR_paper.pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;14m[1mFaster[0m[38;5;14m[1m [0m[38;5;14m[1mR-CNN:[0m[38;5;14m[1m [0m[38;5;14m[1mTowards[0m[38;5;14m[1m [0m[38;5;14m[1mReal-Time[0m[38;5;14m[1m [0m[38;5;14m[1mObject[0m[38;5;14m[1m [0m[38;5;14m[1mDetection[0m[38;5;14m[1m [0m[38;5;14m[1mwith[0m[38;5;14m[1m [0m[38;5;14m[1mRegion[0m[38;5;14m[1m [0m[38;5;14m[1mProposal[0m[38;5;14m[1m [0m[38;5;14m[1mNetworks[0m[38;5;12m [39m[38;5;12m(2015),[39m[38;5;12m [39m[38;5;12mS.[39m[38;5;12m [39m[38;5;12mRen[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://papers.nips.cc/paper/5638-faster-r-cnn-towards-real-time-object-detection-with-region-proposal-networks.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mFast R-CNN[0m[38;5;12m (2015), R. Girshick [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Girshick_Fast_R-CNN_ICCV_2015_paper.pdf)[39m
|
||||
@@ -155,7 +159,8 @@
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;14m[1mDeepFace:[0m[38;5;14m[1m [0m[38;5;14m[1mClosing[0m[38;5;14m[1m [0m[38;5;14m[1mthe[0m[38;5;14m[1m [0m[38;5;14m[1mgap[0m[38;5;14m[1m [0m[38;5;14m[1mto[0m[38;5;14m[1m [0m[38;5;14m[1mhuman-level[0m[38;5;14m[1m [0m[38;5;14m[1mperformance[0m[38;5;14m[1m [0m[38;5;14m[1min[0m[38;5;14m[1m [0m[38;5;14m[1mface[0m[38;5;14m[1m [0m[38;5;14m[1mverification[0m[38;5;12m [39m[38;5;12m(2014),[39m[38;5;12m [39m[38;5;12mY.[39m[38;5;12m [39m[38;5;12mTaigman[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Taigman_DeepFace_Closing_the_2014_CVPR_paper.pdf):[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mLarge-scale video classification with convolutional neural networks[0m[38;5;12m (2014), A. Karpathy et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://vision.stanford.edu/pdf/karpathy14.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mTwo-stream convolutional networks for action recognition in videos[0m[38;5;12m (2014), K. Simonyan et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://papers.nips.cc/paper/5353-two-stream-convolutional-networks-for-action-recognition-in-videos.pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;14m[1mTwo-stream[0m[38;5;14m[1m [0m[38;5;14m[1mconvolutional[0m[38;5;14m[1m [0m[38;5;14m[1mnetworks[0m[38;5;14m[1m [0m[38;5;14m[1mfor[0m[38;5;14m[1m [0m[38;5;14m[1maction[0m[38;5;14m[1m [0m[38;5;14m[1mrecognition[0m[38;5;14m[1m [0m[38;5;14m[1min[0m[38;5;14m[1m [0m[38;5;14m[1mvideos[0m[38;5;12m [39m[38;5;12m(2014),[39m[38;5;12m [39m[38;5;12mK.[39m[38;5;12m [39m[38;5;12mSimonyan[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://papers.nips.cc/paper/5353-two-stream-convolutional-networks-for-action-recognition-in-videos.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1m3D convolutional neural networks for human action recognition[0m[38;5;12m (2013), S. Ji et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://machinelearning.wustl.edu/mlpapers/paper_files/icml2010_JiXYY10.pdf)[39m
|
||||
|
||||
|
||||
@@ -181,7 +186,8 @@
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;14m[1mDistributed[0m[38;5;14m[1m [0m[38;5;14m[1mrepresentations[0m[38;5;14m[1m [0m[38;5;14m[1mof[0m[38;5;14m[1m [0m[38;5;14m[1mwords[0m[38;5;14m[1m [0m[38;5;14m[1mand[0m[38;5;14m[1m [0m[38;5;14m[1mphrases[0m[38;5;14m[1m [0m[38;5;14m[1mand[0m[38;5;14m[1m [0m[38;5;14m[1mtheir[0m[38;5;14m[1m [0m[38;5;14m[1mcompositionality[0m[38;5;12m [39m[38;5;12m(2013),[39m[38;5;12m [39m[38;5;12mT.[39m[38;5;12m [39m[38;5;12mMikolov[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mEfficient estimation of word representations in vector space[0m[38;5;12m (2013), T. Mikolov et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://arxiv.org/pdf/1301.3781)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mRecursive deep models for semantic compositionality over a sentiment treebank[0m[38;5;12m (2013), R. Socher et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.383.1327&rep=rep1&type=pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;14m[1mRecursive[0m[38;5;14m[1m [0m[38;5;14m[1mdeep[0m[38;5;14m[1m [0m[38;5;14m[1mmodels[0m[38;5;14m[1m [0m[38;5;14m[1mfor[0m[38;5;14m[1m [0m[38;5;14m[1msemantic[0m[38;5;14m[1m [0m[38;5;14m[1mcompositionality[0m[38;5;14m[1m [0m[38;5;14m[1mover[0m[38;5;14m[1m [0m[38;5;14m[1ma[0m[38;5;14m[1m [0m[38;5;14m[1msentiment[0m[38;5;14m[1m [0m[38;5;14m[1mtreebank[0m[38;5;12m [39m[38;5;12m(2013),[39m[38;5;12m [39m[38;5;12mR.[39m[38;5;12m [39m[38;5;12mSocher[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.383.1327&rep=rep1&type=pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mGenerating sequences with recurrent neural networks[0m[38;5;12m (2013), A. Graves. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://arxiv.org/pdf/1308.0850)[39m
|
||||
|
||||
|
||||
@@ -190,8 +196,10 @@
|
||||
[38;5;12m- [39m[38;5;14m[1mEnd-to-end attention-based large vocabulary speech recognition[0m[38;5;12m (2016), D. Bahdanau et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://arxiv.org/pdf/1508.04395)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mDeep speech 2: End-to-end speech recognition in English and Mandarin[0m[38;5;12m (2015), D. Amodei et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://arxiv.org/pdf/1512.02595)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mSpeech recognition with deep recurrent neural networks[0m[38;5;12m (2013), A. Graves [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://arxiv.org/pdf/1303.5778.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mDeep neural networks for acoustic modeling in speech recognition: The shared views of four research groups[0m[38;5;12m (2012), G. Hinton et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.cs.toronto.edu/~asamir/papers/SPM_DNN_12.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mContext-dependent pre-trained deep neural networks for large-vocabulary speech recognition[0m[38;5;12m (2012) G. Dahl et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.337.7548&rep=rep1&type=pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;14m[1mDeep[0m[38;5;14m[1m [0m[38;5;14m[1mneural[0m[38;5;14m[1m [0m[38;5;14m[1mnetworks[0m[38;5;14m[1m [0m[38;5;14m[1mfor[0m[38;5;14m[1m [0m[38;5;14m[1macoustic[0m[38;5;14m[1m [0m[38;5;14m[1mmodeling[0m[38;5;14m[1m [0m[38;5;14m[1min[0m[38;5;14m[1m [0m[38;5;14m[1mspeech[0m[38;5;14m[1m [0m[38;5;14m[1mrecognition:[0m[38;5;14m[1m [0m[38;5;14m[1mThe[0m[38;5;14m[1m [0m[38;5;14m[1mshared[0m[38;5;14m[1m [0m[38;5;14m[1mviews[0m[38;5;14m[1m [0m[38;5;14m[1mof[0m[38;5;14m[1m [0m[38;5;14m[1mfour[0m[38;5;14m[1m [0m[38;5;14m[1mresearch[0m[38;5;14m[1m [0m[38;5;14m[1mgroups[0m[38;5;12m [39m[38;5;12m(2012),[39m[38;5;12m [39m[38;5;12mG.[39m[38;5;12m [39m[38;5;12mHinton[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://www.cs.toronto.edu/~asamir/papers/SPM_DNN_12.pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;14m[1mContext-dependent[0m[38;5;14m[1m [0m[38;5;14m[1mpre-trained[0m[38;5;14m[1m [0m[38;5;14m[1mdeep[0m[38;5;14m[1m [0m[38;5;14m[1mneural[0m[38;5;14m[1m [0m[38;5;14m[1mnetworks[0m[38;5;14m[1m [0m[38;5;14m[1mfor[0m[38;5;14m[1m [0m[38;5;14m[1mlarge-vocabulary[0m[38;5;14m[1m [0m[38;5;14m[1mspeech[0m[38;5;14m[1m [0m[38;5;14m[1mrecognition[0m[38;5;12m [39m[38;5;12m(2012)[39m[38;5;12m [39m[38;5;12mG.[39m[38;5;12m [39m[38;5;12mDahl[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.337.7548&rep=rep1&type=pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mAcoustic modeling using deep belief networks[0m[38;5;12m (2012), A. Mohamed et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.cs.toronto.edu/~asamir/papers/speechDBN_jrnl.pdf)[39m
|
||||
|
||||
|
||||
@@ -222,7 +230,8 @@
|
||||
[38;5;12m- [39m[38;5;14m[1mEie: Efficient inference engine on compressed deep neural network[0m[38;5;12m (2016), S. Han et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://arxiv.org/pdf/1602.01528)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mBinarized neural networks: Training deep neural networks with weights and activations constrained to+ 1 or-1[0m[38;5;12m (2016), M. Courbariaux et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://arxiv.org/pdf/1602.02830)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mDynamic memory networks for visual and textual question answering[0m[38;5;12m (2016), C. Xiong et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.jmlr.org/proceedings/papers/v48/xiong16.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mStacked attention networks for image question answering[0m[38;5;12m (2016), Z. Yang et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Yang_Stacked_Attention_Networks_CVPR_2016_paper.pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;14m[1mStacked[0m[38;5;14m[1m [0m[38;5;14m[1mattention[0m[38;5;14m[1m [0m[38;5;14m[1mnetworks[0m[38;5;14m[1m [0m[38;5;14m[1mfor[0m[38;5;14m[1m [0m[38;5;14m[1mimage[0m[38;5;14m[1m [0m[38;5;14m[1mquestion[0m[38;5;14m[1m [0m[38;5;14m[1manswering[0m[38;5;12m [39m[38;5;12m(2016),[39m[38;5;12m [39m[38;5;12mZ.[39m[38;5;12m [39m[38;5;12mYang[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Yang_Stacked_Attention_Networks_CVPR_2016_paper.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mHybrid computing using a neural network with dynamic external memory[0m[38;5;12m (2016), A. Graves et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://www.gwern.net/docs/2016-graves.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mGoogle's neural machine translation system: Bridging the gap between human and machine translation[0m[38;5;12m (2016), Y. Wu et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://arxiv.org/pdf/1609.08144)[39m
|
||||
|
||||
@@ -259,14 +268,16 @@
|
||||
[38;5;12m(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.297.3484&rep=rep1&type=pdf)[39m
|
||||
[38;5;12m- Learning mid-level features for recognition (2010), Y. Boureau [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://ece.duke.edu/~lcarin/boureau-cvpr-10.pdf)[39m
|
||||
[38;5;12m- A practical guide to training restricted boltzmann machines (2010), G. Hinton [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.csri.utoronto.ca/~hinton/absps/guideTR.pdf)[39m
|
||||
[38;5;12m- Understanding the difficulty of training deep feedforward neural networks (2010), X. Glorot and Y. Bengio [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://machinelearning.wustl.edu/mlpapers/paper_files/AISTATS2010_GlorotB10.pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;12mUnderstanding[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[38;5;12mdifficulty[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mtraining[39m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mfeedforward[39m[38;5;12m [39m[38;5;12mneural[39m[38;5;12m [39m[38;5;12mnetworks[39m[38;5;12m [39m[38;5;12m(2010),[39m[38;5;12m [39m[38;5;12mX.[39m[38;5;12m [39m[38;5;12mGlorot[39m[38;5;12m [39m[38;5;12mand[39m[38;5;12m [39m[38;5;12mY.[39m[38;5;12m [39m[38;5;12mBengio[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://machinelearning.wustl.edu/mlpapers/paper_files/AISTATS2010_GlorotB10.pdf)[39m
|
||||
[38;5;12m- Why does unsupervised pre-training help deep learning (2010), D. Erhan et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://machinelearning.wustl.edu/mlpapers/paper_files/AISTATS2010_ErhanCBV10.pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;12mLearning[39m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12marchitectures[39m[38;5;12m [39m[38;5;12mfor[39m[38;5;12m [39m[38;5;12mAI[39m[38;5;12m [39m[38;5;12m(2009),[39m[38;5;12m [39m[38;5;12mY.[39m[38;5;12m [39m[38;5;12mBengio.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://sanghv.com/download/soft/machine%20learning,%20artificial%20intelligence,%20mathematics%20ebooks/ML/learning%20deep%20architectures%20for%20AI%20(2009).pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;12mConvolutional[39m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mbelief[39m[38;5;12m [39m[38;5;12mnetworks[39m[38;5;12m [39m[38;5;12mfor[39m[38;5;12m [39m[38;5;12mscalable[39m[38;5;12m [39m[38;5;12munsupervised[39m[38;5;12m [39m[38;5;12mlearning[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mhierarchical[39m[38;5;12m [39m[38;5;12mrepresentations[39m[38;5;12m [39m[38;5;12m(2009),[39m[38;5;12m [39m[38;5;12mH.[39m[38;5;12m [39m[38;5;12mLee[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.149.802&rep=rep1&type=pdf)[39m
|
||||
[38;5;12m- Greedy layer-wise training of deep networks (2007), Y. Bengio et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_739.pdf)[39m
|
||||
[38;5;12m- Reducing the dimensionality of data with neural networks, G. Hinton and R. Salakhutdinov. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://homes.mpimf-heidelberg.mpg.de/~mhelmsta/pdf/2006%20Hinton%20Salakhudtkinov%20Science.pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;12mReducing[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[38;5;12mdimensionality[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mdata[39m[38;5;12m [39m[38;5;12mwith[39m[38;5;12m [39m[38;5;12mneural[39m[38;5;12m [39m[38;5;12mnetworks,[39m[38;5;12m [39m[38;5;12mG.[39m[38;5;12m [39m[38;5;12mHinton[39m[38;5;12m [39m[38;5;12mand[39m[38;5;12m [39m[38;5;12mR.[39m[38;5;12m [39m[38;5;12mSalakhutdinov.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://homes.mpimf-heidelberg.mpg.de/~mhelmsta/pdf/2006%20Hinton%20Salakhudtkinov%20Science.pdf)[39m
|
||||
[38;5;12m- A fast learning algorithm for deep belief nets (2006), G. Hinton et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://nuyoo.utm.mx/~jjf/rna/A8%20A%20fast%20learning%20algorithm%20for%20deep%20belief%20nets.pdf)[39m
|
||||
[38;5;12m- Gradient-based learning applied to document recognition (1998), Y. LeCun et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://yann.lecun.com/exdb/publis/pdf/lecun-01a.pdf)[39m
|
||||
[38;5;12m- Long short-term memory (1997), S. Hochreiter and J. Schmidhuber. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.mitpressjournals.org/doi/pdfplus/10.1162/neco.1997.9.8.1735)[39m
|
||||
@@ -346,7 +357,8 @@
|
||||
[38;5;12m(http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Bell_Inside-Outside_Net_Detecting_CVPR_2016_paper.pdf).[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;12mInstance-aware[39m[38;5;12m [39m[38;5;12msemantic[39m[38;5;12m [39m[38;5;12msegmentation[39m[38;5;12m [39m[38;5;12mvia[39m[38;5;12m [39m[38;5;12mmulti-task[39m[38;5;12m [39m[38;5;12mnetwork[39m[38;5;12m [39m[38;5;12mcascades[39m[38;5;12m [39m[38;5;12m(2016),[39m[38;5;12m [39m[38;5;12mJ.[39m[38;5;12m [39m[38;5;12mDai[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Dai_Instance-Aware_Semantic_Segmentation_CVPR_2016_paper.pdf)[39m
|
||||
[38;5;12m- Conditional image generation with pixelcnn decoders (2016), A. van den Oord et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://papers.nips.cc/paper/6527-tree-structured-reinforcement-learning-for-sequential-object-localization.pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;12mConditional[39m[38;5;12m [39m[38;5;12mimage[39m[38;5;12m [39m[38;5;12mgeneration[39m[38;5;12m [39m[38;5;12mwith[39m[38;5;12m [39m[38;5;12mpixelcnn[39m[38;5;12m [39m[38;5;12mdecoders[39m[38;5;12m [39m[38;5;12m(2016),[39m[38;5;12m [39m[38;5;12mA.[39m[38;5;12m [39m[38;5;12mvan[39m[38;5;12m [39m[38;5;12mden[39m[38;5;12m [39m[38;5;12mOord[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://papers.nips.cc/paper/6527-tree-structured-reinforcement-learning-for-sequential-object-localization.pdf)[39m
|
||||
[38;5;12m- Deep networks with stochastic depth (2016), G. Huang et al., [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://arxiv.org/pdf/1603.09382)[39m
|
||||
[38;5;12m- Consistency and Fluctuations For Stochastic Gradient Langevin Dynamics (2016), Yee Whye Teh et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.jmlr.org/papers/volume17/teh16a/teh16a.pdf)[39m
|
||||
|
||||
@@ -391,7 +403,8 @@
|
||||
|
||||
|
||||
[48;2;30;30;40m[38;5;13m[3m(~2014)[0m
|
||||
[38;5;12m- DeepPose: Human pose estimation via deep neural networks (2014), A. Toshev and C. Szegedy [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Toshev_DeepPose_Human_Pose_2014_CVPR_paper.pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;12mDeepPose:[39m[38;5;12m [39m[38;5;12mHuman[39m[38;5;12m [39m[38;5;12mpose[39m[38;5;12m [39m[38;5;12mestimation[39m[38;5;12m [39m[38;5;12mvia[39m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mneural[39m[38;5;12m [39m[38;5;12mnetworks[39m[38;5;12m [39m[38;5;12m(2014),[39m[38;5;12m [39m[38;5;12mA.[39m[38;5;12m [39m[38;5;12mToshev[39m[38;5;12m [39m[38;5;12mand[39m[38;5;12m [39m[38;5;12mC.[39m[38;5;12m [39m[38;5;12mSzegedy[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Toshev_DeepPose_Human_Pose_2014_CVPR_paper.pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;12mLearning[39m[38;5;12m [39m[38;5;12ma[39m[38;5;12m [39m[38;5;12mDeep[39m[38;5;12m [39m[38;5;12mConvolutional[39m[38;5;12m [39m[38;5;12mNetwork[39m[38;5;12m [39m[38;5;12mfor[39m[38;5;12m [39m[38;5;12mImage[39m[38;5;12m [39m[38;5;12mSuper-Resolution[39m[38;5;12m [39m[38;5;12m(2014,[39m[38;5;12m [39m[38;5;12mC.[39m[38;5;12m [39m[38;5;12mDong[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(https://www.researchgate.net/profile/Chen_Change_Loy/publication/264552416_Lecture_Notes_in_Computer_Science/links/53e583e50cf25d674e9c280e.pdf)[39m
|
||||
[38;5;12m- Recurrent models of visual attention (2014), V. Mnih et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://arxiv.org/pdf/1406.6247.pdf)[39m
|
||||
@@ -401,7 +414,8 @@
|
||||
[38;5;12m- Recurrent neural network regularization (2014), W. Zaremba et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://arxiv.org/pdf/1409.2329)[39m
|
||||
[38;5;12m- Intriguing properties of neural networks (2014), C. Szegedy et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://arxiv.org/pdf/1312.6199.pdf)[39m
|
||||
[38;5;12m- Towards end-to-end speech recognition with recurrent neural networks (2014), A. Graves and N. Jaitly. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.jmlr.org/proceedings/papers/v32/graves14.pdf)[39m
|
||||
[38;5;12m- Scalable object detection using deep neural networks (2014), D. Erhan et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Erhan_Scalable_Object_Detection_2014_CVPR_paper.pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;12mScalable[39m[38;5;12m [39m[38;5;12mobject[39m[38;5;12m [39m[38;5;12mdetection[39m[38;5;12m [39m[38;5;12musing[39m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mneural[39m[38;5;12m [39m[38;5;12mnetworks[39m[38;5;12m [39m[38;5;12m(2014),[39m[38;5;12m [39m[38;5;12mD.[39m[38;5;12m [39m[38;5;12mErhan[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Erhan_Scalable_Object_Detection_2014_CVPR_paper.pdf)[39m
|
||||
[38;5;12m- On the importance of initialization and momentum in deep learning (2013), I. Sutskever et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://machinelearning.wustl.edu/mlpapers/paper_files/icml2013_sutskever13.pdf)[39m
|
||||
[38;5;12m- Regularization of neural networks using dropconnect (2013), L. Wan et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://machinelearning.wustl.edu/mlpapers/paper_files/icml2013_wan13.pdf)[39m
|
||||
[38;5;12m- Learning Hierarchical Features for Scene Labeling (2013), C. Farabet et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://hal-enpc.archives-ouvertes.fr/docs/00/74/20/77/PDF/farabet-pami-13.pdf)[39m
|
||||
@@ -413,7 +427,8 @@
|
||||
|
||||
[38;2;255;187;0m[4mAcknowledgement[0m
|
||||
|
||||
[38;5;12mThank you for all your contributions. Please make sure to read the [39m[38;5;14m[1mcontributing guide[0m[38;5;12m (https://github.com/terryum/awesome-deep-learning-papers/blob/master/Contributing.md) before you make a pull request.[39m
|
||||
[38;5;12mThank[39m[38;5;12m [39m[38;5;12myou[39m[38;5;12m [39m[38;5;12mfor[39m[38;5;12m [39m[38;5;12mall[39m[38;5;12m [39m[38;5;12myour[39m[38;5;12m [39m[38;5;12mcontributions.[39m[38;5;12m [39m[38;5;12mPlease[39m[38;5;12m [39m[38;5;12mmake[39m[38;5;12m [39m[38;5;12msure[39m[38;5;12m [39m[38;5;12mto[39m[38;5;12m [39m[38;5;12mread[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[38;5;14m[1mcontributing[0m[38;5;14m[1m [0m[38;5;14m[1mguide[0m[38;5;12m [39m[38;5;12m(https://github.com/terryum/awesome-deep-learning-papers/blob/master/Contributing.md)[39m[38;5;12m [39m[38;5;12mbefore[39m[38;5;12m [39m[38;5;12myou[39m[38;5;12m [39m[38;5;12mmake[39m[38;5;12m [39m[38;5;12ma[39m[38;5;12m [39m
|
||||
[38;5;12mpull[39m[38;5;12m [39m[38;5;12mrequest.[39m
|
||||
|
||||
[38;2;255;187;0m[4mLicense[0m
|
||||
[38;5;14m[1m![0m[38;5;12mCC0[39m[38;5;14m[1m (http://mirrors.creativecommons.org/presskit/buttons/88x31/svg/cc-zero.svg)[0m[38;5;12m (https://creativecommons.org/publicdomain/zero/1.0/)[39m
|
||||
|
||||
Reference in New Issue
Block a user